├── .codespell.ignore.files ├── .codespell.ignore.words ├── .gitattributes ├── .github ├── ISSUE_TEMPLATE │ └── bug_report.md ├── hana-netweaver-tf-only.tfvars └── workflows │ └── ci.yml ├── .gitignore ├── .salt-lint ├── .shellcheckrc ├── .yamllint.yaml ├── LICENSE ├── Makefile ├── README.md ├── aws ├── .terraform.lock.hcl ├── README.md ├── create_remote_state │ ├── README.md │ ├── dynamodb.tf │ ├── main.tf │ └── vars.tf ├── images │ └── policies.png ├── infrastructure.tf ├── json-examples │ ├── container-snapshot.json │ ├── container.json │ ├── devicemap.json │ ├── role-policy.json │ └── trust-policy.json ├── main.tf ├── modules │ ├── bastion │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── salt_provisioner.tf │ │ └── variables.tf │ ├── drbd_node │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── salt_provisioner.tf │ │ └── variables.tf │ ├── get_os_image │ │ ├── main.tf │ │ ├── output.tf │ │ └── variables.tf │ ├── hana_node │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── salt_provisioner.tf │ │ └── variables.tf │ ├── iscsi_server │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── salt_provisioner.tf │ │ └── variables.tf │ ├── majority_maker_node │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── salt_provisioner.tf │ │ └── variables.tf │ ├── monitoring │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── salt_provisioner.tf │ │ └── variables.tf │ ├── netweaver_node │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── salt_provisioner.tf │ │ └── variables.tf │ └── sap_cluster_policies │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── templates │ │ ├── aws_data_provider_policy.tpl │ │ ├── aws_ip_agent_policy.tpl │ │ └── aws_stonith_policy.tpl │ │ └── variables.tf ├── outputs.tf ├── remote-state.sample ├── terraform.tfvars.example ├── variables.tf └── version.tf ├── azure ├── .terraform.lock.hcl ├── README.md ├── infrastructure.tf ├── main.tf ├── modules │ ├── bastion │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── salt_provisioner.tf │ │ └── variables.tf │ ├── drbd_node │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── salt_provisioner.tf │ │ └── variables.tf │ ├── hana_node │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── salt_provisioner.tf │ │ └── variables.tf │ ├── iscsi_server │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── salt_provisioner.tf │ │ └── variables.tf │ ├── majority_maker_node │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── salt_provisioner.tf │ │ └── variables.tf │ ├── monitoring │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── salt_provisioner.tf │ │ └── variables.tf │ ├── netweaver_node │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── salt_provisioner.tf │ │ └── variables.tf │ └── os_image_reference │ │ ├── outputs.tf │ │ └── variables.tf ├── outputs.tf ├── terraform.tfvars.example ├── variables.tf └── version.tf ├── doc ├── drbd.md ├── fencing.md ├── highlevel_description_aws.png ├── highlevel_description_azure.png ├── highlevel_description_gcp.png ├── highlevel_description_libvirt.png ├── highlevel_description_openstack.png ├── monitoring.md ├── netweaver.md ├── project-architecture.png ├── project-components.png ├── sap-workload-automation-suse-flow.png ├── sap_passwords.md ├── sap_software.md ├── saptune.md ├── troubleshooting.md └── workspaces-workflow.md ├── gcp ├── .terraform.lock.hcl ├── README.md ├── create_remote_state │ ├── README.md │ └── bucket.tf ├── infrastructure.tf ├── main.tf ├── modules │ ├── bastion │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── salt_provisioner.tf │ │ └── variables.tf │ ├── drbd_node │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── salt_provisioner.tf │ │ └── variables.tf │ ├── hana_node │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── salt_provisioner.tf │ │ └── variables.tf │ ├── iscsi_server │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── salt_provisioner.tf │ │ └── variables.tf │ ├── load_balancer │ │ ├── README.md │ │ ├── main.tf │ │ └── variables.tf │ ├── majority_maker_node │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── salt_provisioner.tf │ │ └── variables.tf │ ├── monitoring │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── salt_provisioner.tf │ │ └── variables.tf │ └── netweaver_node │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── salt_provisioner.tf │ │ └── variables.tf ├── outputs.tf ├── remote-state.sample ├── terraform.tfvars.example ├── variables.tf └── version.tf ├── generic_modules ├── common_variables │ ├── drbd_variables.tf │ ├── hana_variables.tf │ ├── monitoring_variables.tf │ ├── netweaver_variables.tf │ ├── outputs.tf │ └── variables.tf ├── local_exec │ ├── main.tf │ └── variables.tf ├── on_destroy │ ├── README.md │ ├── main.tf │ ├── on_destroy.sh │ └── variables.tf └── salt_provisioner │ ├── README.md │ ├── main.tf │ └── variables.tf ├── libvirt ├── .terraform.lock.hcl ├── README.md ├── cloud-config.tpl ├── infrastructure.tf ├── main.tf ├── modules │ ├── drbd_node │ │ ├── main.tf │ │ ├── salt_provisioner.tf │ │ ├── shareable.xsl │ │ └── variables.tf │ ├── hana_node │ │ ├── main.tf │ │ ├── salt_provisioner.tf │ │ ├── shareable.xsl │ │ └── variables.tf │ ├── iscsi_server │ │ ├── main.tf │ │ ├── salt_provisioner.tf │ │ └── variables.tf │ ├── majority_maker_node │ │ ├── main.tf │ │ ├── salt_provisioner.tf │ │ ├── shareable.xsl │ │ └── variables.tf │ ├── monitoring │ │ ├── main.tf │ │ ├── salt_provisioner.tf │ │ └── variables.tf │ ├── netweaver_node │ │ ├── main.tf │ │ ├── salt_provisioner.tf │ │ ├── shareable.xls │ │ └── variables.tf │ └── shared_disk │ │ ├── main.tf │ │ ├── raw.xsl │ │ └── variables.tf ├── outputs.tf ├── terraform.tfvars.example ├── variables.tf └── version.tf ├── openstack ├── .terraform.lock.hcl ├── README.md ├── cloud-config.tpl ├── create_remote_state │ └── README.md ├── infrastructure.tf ├── main.tf ├── modules │ ├── bastion │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── salt_provisioner.tf │ │ ├── variables.tf │ │ └── versions.tf │ ├── drbd_node │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── salt_provisioner.tf │ │ ├── variables.tf │ │ └── versions.tf │ ├── hana_node │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── salt_provisioner.tf │ │ ├── variables.tf │ │ └── versions.tf │ ├── iscsi_server │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── salt_provisioner.tf │ │ ├── variables.tf │ │ └── versions.tf │ ├── majority_maker_node │ │ ├── main.tf │ │ ├── salt_provisioner.tf │ │ ├── variables.tf │ │ └── versions.tf │ ├── monitoring │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── salt_provisioner.tf │ │ ├── variables.tf │ │ └── versions.tf │ ├── netweaver_node │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── salt_provisioner.tf │ │ ├── variables.tf │ │ └── versions.tf │ └── nfs_server │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── salt_provisioner.tf │ │ ├── variables.tf │ │ └── versions.tf ├── outputs.tf ├── remote-state.sample ├── terraform.tfvars.example ├── variables.tf └── versions.tf ├── pillar ├── drbd │ └── .keep ├── hana │ └── .keep ├── iscsi_srv.sls ├── netweaver │ └── .keep └── top.sls ├── pillar_examples ├── README.md ├── automatic │ ├── drbd │ │ ├── cluster.sls │ │ └── drbd.sls │ ├── hana │ │ ├── cluster.sls │ │ └── hana.sls │ └── netweaver │ │ ├── cluster.sls │ │ └── netweaver.sls ├── aws │ ├── cluster.sls │ ├── hana.sls │ └── top.sls ├── azure │ ├── cluster.sls │ ├── hana.sls │ └── top.sls ├── libvirt │ ├── cost_optimized │ │ ├── cluster.sls │ │ ├── hana.sls │ │ └── top.sls │ └── performance_optimized │ │ ├── cluster.sls │ │ ├── hana.sls │ │ └── top.sls └── openstack │ ├── cluster.sls │ ├── hana.sls │ └── top.sls ├── requirements.yml └── salt ├── bastion ├── init.sls ├── nginx.sls ├── sapinst.sls └── templates │ └── nginx.conf.j2 ├── cluster_node ├── aws_add_credentials.sls ├── aws_data_provider.sls ├── ha │ ├── init.sls │ ├── iscsi_initiator.sls │ ├── network.sls │ ├── packages.sls │ ├── sbd.sls │ └── ssh.sls ├── hosts.sls ├── init.sls ├── monitoring.sls └── templates │ ├── aws_credentials_template.j2 │ └── promtail.yaml.j2 ├── default ├── hostname.sls ├── init.sls ├── pkgs.sls └── timezone.sls ├── drbd_node ├── custom_handlers.sls ├── drbd_packages.sls ├── init.sls ├── nfs.sls └── parted.sls ├── hana_node ├── download_hana_inst.sls ├── hana_inst_media.sls ├── hana_packages.sls ├── init.sls ├── mount │ ├── init.sls │ ├── lvm.sls │ ├── mount.sls │ ├── mount_uuid.sls │ └── packages.sls └── wait.sls ├── hwcct ├── files │ └── hwcct │ │ ├── hwcct_bench.jinja │ │ └── hwcct_config.json.jinja └── init.sls ├── iscsi_srv ├── init.sls └── parted.sls ├── macros └── download_from_google_storage.sls ├── majority_maker_node └── init.sls ├── monitoring_srv ├── grafana.sls ├── grafana │ └── datasources.yml.j2 ├── init.sls ├── loki.sls ├── prometheus.sls └── prometheus │ ├── prometheus.yml.j2 │ └── rules.yml ├── netweaver_node ├── init.sls ├── installation_files.sls ├── mount │ ├── azure.sls │ ├── init.sls │ └── packages.sls ├── netweaver_packages.sls └── nfs.sls ├── nfs_srv ├── directories.sls ├── init.sls ├── lvm.sls ├── nfs.sls └── packages.sls ├── os_setup ├── auth_keys.sls ├── init.sls ├── ip_workaround.sls ├── minion_configuration.sls ├── packages_install.sls ├── packages_repos.sls ├── packages_update.sls ├── registration.sls └── requirements.sls ├── postdeployment ├── init.sls ├── remove_grains.sls └── remove_salt_logs.sls ├── provider └── azure │ └── nfsv4.sls ├── provision.sh ├── shared_storage └── nfs.sls └── top.sls /.codespell.ignore.files: -------------------------------------------------------------------------------- 1 | venv,.direnv,.git,**/.terraform,**/terraform.**,*.svg 2 | -------------------------------------------------------------------------------- /.codespell.ignore.words: -------------------------------------------------------------------------------- 1 | msdos 2 | aas 3 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | * text eol=lf 2 | *.png binary 3 | -------------------------------------------------------------------------------- /.github/hana-netweaver-tf-only.tfvars: -------------------------------------------------------------------------------- 1 | # the following 2 vars are acquired via ENV 2 | # qemu_uri = 3 | # source_image = 4 | 5 | hana_inst_master = "10.162.32.134:/sapdata/sap_inst_media/51053787" 6 | iprange = "192.168.25.0/24" 7 | 8 | storage_pool = "terraform" 9 | 10 | # Enable pre deployment to automatically copy the pillar files and create cluster ssh keys 11 | pre_deployment = true 12 | 13 | # For iscsi, it will deploy a new machine hosting an iscsi service 14 | sbd_storage_type = "iscsi" 15 | ha_sap_deployment_repo = "https://download.opensuse.org/repositories/network:/ha-clustering:/sap-deployments:/devel" 16 | 17 | monitoring_enabled = true 18 | 19 | # don't use salt for this test 20 | provisioner = "" 21 | 22 | # Netweaver variables 23 | 24 | # Enable/disable Netweaver deployment 25 | netweaver_enabled = true 26 | 27 | # NFS share with netweaver installation folders 28 | netweaver_inst_media = "10.162.32.134:/sapdata/sap_inst_media" 29 | netweaver_swpm_folder = "SWPM_10_SP26_6" 30 | 31 | # Install NetWeaver 32 | netweaver_sapexe_folder = "kernel_nw75_sar" 33 | netweaver_additional_dvds = ["51050829_3", "51053787"] 34 | 35 | 36 | # DRBD variables 37 | 38 | # Enable the DRBD cluster for nfs 39 | drbd_enabled = true 40 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | **/.terraform 2 | **/terraform.tfstate* 3 | **/.terraform.tfstate* 4 | **/terraform*.tfvars 5 | azure/terraform/provision/node0_id_rsa 6 | azure/terraform/provision/node0_id_rsa.pub 7 | azure/terraform/provision/node1_id_rsa 8 | azure/terraform/provision/node1_id_rsa.pub 9 | gcp/*.json 10 | **/id_rsa* 11 | 12 | salt/sshkeys 13 | pillar/hana/* 14 | !pillar/hana/.keep 15 | pillar/drbd/* 16 | !pillar/drbd/.keep 17 | pillar/netweaver/* 18 | !pillar/netweaver/.keep 19 | 20 | 21 | # Dev specific 22 | **/*.swp 23 | **/*.swo 24 | **/*~ 25 | shell.nix 26 | venv 27 | **/.envrc 28 | **/.direnv 29 | .vscode -------------------------------------------------------------------------------- /.salt-lint: -------------------------------------------------------------------------------- 1 | --- 2 | rules: 3 | 204: 4 | ignore: 'salt/shared_storage/nfs.sls' 5 | -------------------------------------------------------------------------------- /.shellcheckrc: -------------------------------------------------------------------------------- 1 | disable=SC1091 2 | -------------------------------------------------------------------------------- /.yamllint.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | extends: default 3 | 4 | ignore: | 5 | venv 6 | 7 | rules: 8 | # 80 chars should be enough, but don't fail if a line is longer 9 | line-length: 10 | max: 160 11 | level: warning 12 | -------------------------------------------------------------------------------- /aws/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/hashicorp/aws" { 5 | version = "4.0.0" 6 | constraints = "~> 4.0.0" 7 | hashes = [ 8 | "h1:G0toIzBkhRr/UNRdksvnIyPXnGT9nH0A7gWmu93I1Eg=", 9 | "zh:02937cb37860b022e7d996726e7584ca23904baf7852d266f2dd7891ee088ae4", 10 | "zh:259dd5790ec5f4e6814c9584c79834dce3d719e932ce662b21f13434e9441194", 11 | "zh:2d230c8c92c3cb2c07471a4324d802c44365dcf99fe0d562cc737d1f964e9c1d", 12 | "zh:380b04e78934519469e699c537516ae1674d15f77c6778c2738cd69374b661aa", 13 | "zh:3d7121da1fa92166c9ea26f3c9839cef06833420d6c46978b4cbbfd0b5050791", 14 | "zh:6b7f5a3b28ec3a631d689f599a39bfe98ca5b785353b01e374cff655b097a791", 15 | "zh:7882291716d2d03df5ece721429770452db76c712fcff08964c3a7c0b639f703", 16 | "zh:95250c5768610d69a28501f03176b6a05a5d5ac2ae317cb582d94b044b3272b3", 17 | "zh:b16a622a76bee455c8b256d828f8a60515e1e9dad38420a4db1be9b9e16d474a", 18 | "zh:c805822f0ba57e8063b6201e1f351aa4dbd5ad8886dedd25d809e5aeb9aa0259", 19 | "zh:e1c3a0da5576aec4a48f897cd04b739c1f533cdb0005ce4c7f5bc45808b799b1", 20 | ] 21 | } 22 | 23 | provider "registry.terraform.io/hashicorp/null" { 24 | version = "3.1.0" 25 | constraints = "~> 3.1.0" 26 | hashes = [ 27 | "h1:vpC6bgUQoJ0znqIKVFevOdq+YQw42bRq0u+H3nto8nA=", 28 | "zh:02a1675fd8de126a00460942aaae242e65ca3380b5bb192e8773ef3da9073fd2", 29 | "zh:53e30545ff8926a8e30ad30648991ca8b93b6fa496272cd23b26763c8ee84515", 30 | "zh:5f9200bf708913621d0f6514179d89700e9aa3097c77dac730e8ba6e5901d521", 31 | "zh:9ebf4d9704faba06b3ec7242c773c0fbfe12d62db7d00356d4f55385fc69bfb2", 32 | "zh:a6576c81adc70326e4e1c999c04ad9ca37113a6e925aefab4765e5a5198efa7e", 33 | "zh:a8a42d13346347aff6c63a37cda9b2c6aa5cc384a55b2fe6d6adfa390e609c53", 34 | "zh:c797744d08a5307d50210e0454f91ca4d1c7621c68740441cf4579390452321d", 35 | "zh:cecb6a304046df34c11229f20a80b24b1603960b794d68361a67c5efe58e62b8", 36 | "zh:e1371aa1e502000d9974cfaff5be4cfa02f47b17400005a16f14d2ef30dc2a70", 37 | "zh:fc39cc1fe71234a0b0369d5c5c7f876c71b956d23d7d6f518289737a001ba69b", 38 | "zh:fea4227271ebf7d9e2b61b89ce2328c7262acd9fd190e1fd6d15a591abfa848e", 39 | ] 40 | } 41 | -------------------------------------------------------------------------------- /aws/create_remote_state/README.md: -------------------------------------------------------------------------------- 1 | 2 | ## Overview 3 | 4 | ### Store State Remotely in S3 5 | 6 | If you are working on a team, then its best to store the Terraform state file remotely so that many people can access it. In order to setup terraform to store state remotely you need two things: a S3 bucket to store the state file in and a Terraform S3 backend resource. 7 | 8 | ### What is locking and why do we need it? 9 | 10 | If the state file is stored remotely so that many people can access it, then you risk multiple people attempting to make changes to the same file at the exact same time. So we need to provide a mechanism that will “lock” the state if its currently in-use by another user. We can accomplish this by creating a dynamoDB table for terraform to use. 11 | 12 | ### Show me the code 13 | 14 | The Terraform configuration on this directory creates the S3 bucket and DynamoDB table for storing and locking the Terraform state file remotely. This is known as the [S3 backend🔗](https://www.terraform.io/docs/backends/types/s3.html). 15 | 16 | The S3 bucket is created in a particular AWS region. The name of the S3 must be globally unique. You can check its availability by checking this URL: 17 | 18 | `https://.s3.amazonaws.com/` 19 | 20 | It should output a XML with this content: 21 | ``` 22 | NoSuchBucket 23 | The specified bucket does not exist 24 | ``` 25 | 26 | ## Procedure to create the S3 backend: 27 | 28 | 1. Edit the [vars.tf](vars.tf) file to specify the region and bucket name. 29 | 2. Optionally edit the `dynamodb_name` variable in the [vars.tf](vars.tf) file. 30 | 3. Run `terraform init` 31 | 4. Run `terraform plan` to check whether the following command will succeed: 32 | 5. Rename the [remote-state.sample](../remote-state.sample) file to remote-state.tf inside your project. Make sure that the values for the `bucket`, `dynamodb_table` and `region` are the same as the used in the [vars.tf](vars.tf) file. 33 | 6. In your project directory, run the command `terraform init` to reset the state file. 34 | 7. Run `terraform plan` to check whether the following command will succeed: 35 | 8. Run `terraform apply` 36 | 9. Check whether you can run `terraform destroy` from another directory or machine. 37 | 38 | ## Resources 39 | - https://www.terraform.io/docs/backends/types/s3.html 40 | - https://medium.com/@jessgreb01/how-to-terraform-locking-state-in-s3-2dc9a5665cb6 41 | 42 | -------------------------------------------------------------------------------- /aws/create_remote_state/dynamodb.tf: -------------------------------------------------------------------------------- 1 | # create a dynamodb table for locking the state file 2 | resource "aws_dynamodb_table" "dynamodb-terraform-state-lock" { 3 | name = var.dynamodb_name 4 | hash_key = "LockID" 5 | read_capacity = 20 6 | write_capacity = 20 7 | 8 | attribute { 9 | name = "LockID" 10 | type = "S" 11 | } 12 | 13 | tags { 14 | Name = "DynamoDB Terraform State Lock Table" 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /aws/create_remote_state/main.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = var.aws_region 3 | } 4 | 5 | resource "aws_s3_bucket" "terraform_state" { 6 | bucket = var.bucket_name 7 | 8 | versioning { 9 | enabled = true 10 | } 11 | 12 | lifecycle { 13 | prevent_destroy = true 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /aws/create_remote_state/vars.tf: -------------------------------------------------------------------------------- 1 | variable "aws_region" { 2 | type = string 3 | default = "eu-central-1" 4 | } 5 | 6 | variable "bucket_name" { 7 | description = "The name of the S3 bucket. Must be globally unique." 8 | default = "my-terraform-state" 9 | } 10 | 11 | variable "dynamodb_name" { 12 | description = "The name of the DynamoDB table." 13 | default = "terraform-state-lock-dynamo" 14 | } 15 | -------------------------------------------------------------------------------- /aws/images/policies.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE/ha-sap-terraform-deployments/d27d45e6a36c4ced8e5134ecb3f9eab13a4fc908/aws/images/policies.png -------------------------------------------------------------------------------- /aws/json-examples/container-snapshot.json: -------------------------------------------------------------------------------- 1 | { 2 | "Description": "SLES4SAP 12-SP4 Beta4 Build 1.1", 3 | "Format": "raw", 4 | "UserBucket": { 5 | "S3Bucket": "instmasters", 6 | "S3Key": "SLES12-SP4-SAP-EC2-HVM-BYOS.x86_64-0.9.2-Build1.1.raw" 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /aws/json-examples/container.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "Description": "SLES4SAP 12-SP4 Beta4 Build 1.1", 4 | "Format": "raw", 5 | "UserBucket": { 6 | "S3Bucket": "instmasters", 7 | "S3Key": "SLES12-SP4-SAP-EC2-HVM-BYOS.x86_64-0.9.2-Build1.1.raw" 8 | } 9 | } 10 | ] 11 | -------------------------------------------------------------------------------- /aws/json-examples/devicemap.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "DeviceName": "/dev/sda1", 4 | "VirtualName": "/dev/sda1", 5 | "Ebs": { 6 | "Encrypted": false, 7 | "DeleteOnTermination": true, 8 | "Iops": 100, 9 | "SnapshotId": "snap-0a369f803b17037bb", 10 | "VolumeSize": 40, 11 | "VolumeType": "gp2" 12 | }, 13 | "NoDevice": "/dev/sda1" 14 | } 15 | ] 16 | -------------------------------------------------------------------------------- /aws/json-examples/role-policy.json: -------------------------------------------------------------------------------- 1 | { 2 | "Version":"2012-10-17", 3 | "Statement":[ 4 | { 5 | "Effect":"Allow", 6 | "Action":[ 7 | "s3:GetBucketLocation", 8 | "s3:ListBucket" 9 | ], 10 | "Resource":[ 11 | "arn:aws:s3:::instmasters" 12 | ] 13 | }, 14 | { 15 | "Effect":"Allow", 16 | "Action":[ 17 | "s3:GetObject" 18 | ], 19 | "Resource":[ 20 | "arn:aws:s3:::instmasters/*" 21 | ] 22 | }, 23 | { 24 | "Effect":"Allow", 25 | "Action":[ 26 | "ec2:ModifySnapshotAttribute", 27 | "ec2:CopySnapshot", 28 | "ec2:RegisterImage", 29 | "ec2:Describe*" 30 | ], 31 | "Resource":"*" 32 | } 33 | ] 34 | } 35 | -------------------------------------------------------------------------------- /aws/json-examples/trust-policy.json: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Effect": "Allow", 6 | "Principal": { "Service": "vmie.amazonaws.com" }, 7 | "Action": "sts:AssumeRole", 8 | "Condition": { 9 | "StringEquals":{ 10 | "sts:Externalid": "vmimport" 11 | } 12 | } 13 | } 14 | ] 15 | } 16 | -------------------------------------------------------------------------------- /aws/modules/bastion/main.tf: -------------------------------------------------------------------------------- 1 | # bastion server resources 2 | 3 | locals { 4 | provisioning_addresses = aws_instance.bastion.*.public_ip 5 | hostname = var.common_variables["deployment_name_in_hostname"] ? format("%s-%s", var.common_variables["deployment_name"], var.name) : var.name 6 | } 7 | 8 | # AWS key pair 9 | resource "aws_key_pair" "key-pair" { 10 | count = var.bastion_count 11 | key_name = "${var.common_variables["deployment_name"]} - terraform-bastion" 12 | public_key = var.common_variables["bastion_public_key"] 13 | } 14 | 15 | module "get_os_image" { 16 | source = "../../modules/get_os_image" 17 | os_image = var.os_image 18 | os_owner = var.os_owner 19 | } 20 | 21 | resource "aws_instance" "bastion" { 22 | count = var.bastion_count 23 | ami = module.get_os_image.image_id 24 | instance_type = var.instance_type 25 | key_name = aws_key_pair.key-pair.0.key_name 26 | associate_public_ip_address = true 27 | subnet_id = element(var.subnet_ids, count.index) 28 | private_ip = element(var.host_ips, count.index) 29 | vpc_security_group_ids = [var.security_group_id] 30 | availability_zone = element(var.availability_zones, count.index) 31 | 32 | root_block_device { 33 | volume_type = "gp2" 34 | volume_size = "20" 35 | } 36 | 37 | volume_tags = { 38 | Name = "${var.common_variables["deployment_name"]}-${var.name}${format("%02d", count.index + 1)}" 39 | } 40 | 41 | tags = { 42 | Name = "${var.common_variables["deployment_name"]}-${var.name}" 43 | Workspace = var.common_variables["deployment_name"] 44 | } 45 | } 46 | 47 | module "bastion_on_destroy" { 48 | source = "../../../generic_modules/on_destroy" 49 | node_count = var.bastion_count 50 | instance_ids = aws_instance.bastion.*.id 51 | user = var.common_variables["authorized_user"] 52 | private_key = var.common_variables["bastion_private_key"] 53 | public_ips = local.provisioning_addresses 54 | dependencies = var.on_destroy_dependencies 55 | } 56 | -------------------------------------------------------------------------------- /aws/modules/bastion/outputs.tf: -------------------------------------------------------------------------------- 1 | data "aws_instance" "bastion" { 2 | count = var.bastion_count 3 | instance_id = element(aws_instance.bastion.*.id, count.index) 4 | } 5 | 6 | output "bastion_ip" { 7 | value = join("", data.aws_instance.bastion.*.private_ip) 8 | } 9 | 10 | output "bastion_public_ip" { 11 | value = join("", data.aws_instance.bastion.*.public_ip) 12 | } 13 | 14 | output "bastion_name" { 15 | value = join("", data.aws_instance.bastion.*.tags.Name) 16 | } 17 | 18 | output "bastion_id" { 19 | value = join("", data.aws_instance.bastion.*.id) 20 | } 21 | 22 | output "bastion_public_name" { 23 | value = join("", data.aws_instance.bastion.*.public_dns) 24 | } 25 | 26 | -------------------------------------------------------------------------------- /aws/modules/bastion/salt_provisioner.tf: -------------------------------------------------------------------------------- 1 | resource "null_resource" "bastion_provisioner" { 2 | count = var.common_variables["provisioner"] == "salt" ? var.bastion_count : 0 3 | 4 | triggers = { 5 | bastion_id = join(",", aws_instance.bastion.*.id) 6 | } 7 | 8 | connection { 9 | host = element(local.provisioning_addresses, count.index) 10 | type = "ssh" 11 | user = var.common_variables["authorized_user"] 12 | private_key = var.common_variables["bastion_private_key"] 13 | } 14 | 15 | provisioner "file" { 16 | content = < { 32 | start : format("%.0f%%", index * 100 / var.lun_count), 33 | end : format("%.0f%%", (index + 1) * 100 / var.lun_count) 34 | } 35 | } } 36 | )} 37 | 38 | EOF 39 | destination = "/tmp/grains" 40 | } 41 | } 42 | 43 | module "iscsi_provision" { 44 | source = "../../../generic_modules/salt_provisioner" 45 | node_count = var.common_variables["provisioner"] == "salt" ? var.iscsi_count : 0 46 | instance_ids = null_resource.iscsi_provisioner.*.id 47 | user = var.common_variables["authorized_user"] 48 | private_key = var.common_variables["private_key"] 49 | public_ips = local.provisioning_addresses 50 | bastion_host = var.bastion_host 51 | bastion_private_key = var.common_variables["bastion_private_key"] 52 | background = var.common_variables["background"] 53 | } 54 | -------------------------------------------------------------------------------- /aws/modules/majority_maker_node/outputs.tf: -------------------------------------------------------------------------------- 1 | data "aws_instance" "majority_maker" { 2 | count = var.node_count 3 | instance_id = element(aws_instance.majority_maker.*.id, count.index) 4 | } 5 | 6 | output "majority_maker_ip" { 7 | value = data.aws_instance.majority_maker.*.private_ip 8 | } 9 | 10 | output "hana_majority_maker_public_ip" { 11 | value = data.aws_instance.majority_maker.*.public_ip 12 | } 13 | 14 | output "hana_majority_maker_name" { 15 | value = data.aws_instance.majority_maker.*.tags.Name 16 | } 17 | 18 | output "hana_majority_maker_id" { 19 | value = data.aws_instance.majority_maker.*.id 20 | } 21 | 22 | output "hana_majority_maker_public_name" { 23 | value = data.aws_instance.majority_maker.*.public_dns 24 | } 25 | -------------------------------------------------------------------------------- /aws/modules/monitoring/main.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | bastion_enabled = var.common_variables["bastion_enabled"] 3 | provisioning_addresses = local.bastion_enabled ? aws_instance.monitoring.*.private_ip : aws_instance.monitoring.*.public_ip 4 | hostname = var.common_variables["deployment_name_in_hostname"] ? format("%s-%s", var.common_variables["deployment_name"], var.name) : var.name 5 | } 6 | 7 | module "get_os_image" { 8 | source = "../../modules/get_os_image" 9 | os_image = var.os_image 10 | os_owner = var.os_owner 11 | } 12 | 13 | resource "aws_instance" "monitoring" { 14 | count = var.monitoring_enabled == true ? 1 : 0 15 | ami = module.get_os_image.image_id 16 | instance_type = var.instance_type 17 | key_name = var.key_name 18 | associate_public_ip_address = local.bastion_enabled ? false : true 19 | subnet_id = element(var.subnet_ids, 0) 20 | private_ip = var.monitoring_srv_ip 21 | vpc_security_group_ids = [var.security_group_id] 22 | availability_zone = element(var.availability_zones, 0) 23 | 24 | root_block_device { 25 | volume_type = "gp2" 26 | volume_size = "20" 27 | } 28 | 29 | ebs_block_device { 30 | volume_type = "gp2" 31 | volume_size = "10" 32 | device_name = "/dev/sdb" 33 | } 34 | 35 | volume_tags = { 36 | Name = "${var.common_variables["deployment_name"]}-${var.name}" 37 | } 38 | 39 | tags = { 40 | Name = "${var.common_variables["deployment_name"]}-${var.name}" 41 | Workspace = var.common_variables["deployment_name"] 42 | } 43 | } 44 | 45 | module "monitoring_on_destroy" { 46 | source = "../../../generic_modules/on_destroy" 47 | node_count = var.monitoring_enabled ? 1 : 0 48 | instance_ids = aws_instance.monitoring.*.id 49 | user = var.common_variables["authorized_user"] 50 | private_key = var.common_variables["private_key"] 51 | bastion_host = var.bastion_host 52 | bastion_private_key = var.common_variables["bastion_private_key"] 53 | public_ips = local.provisioning_addresses 54 | dependencies = var.on_destroy_dependencies 55 | } 56 | -------------------------------------------------------------------------------- /aws/modules/monitoring/outputs.tf: -------------------------------------------------------------------------------- 1 | data "aws_instance" "monitoring" { 2 | count = var.monitoring_enabled == true ? 1 : 0 3 | instance_id = aws_instance.monitoring.0.id 4 | } 5 | 6 | output "monitoring_ip" { 7 | value = join("", data.aws_instance.monitoring.*.private_ip) 8 | } 9 | 10 | output "monitoring_public_ip" { 11 | value = join("", data.aws_instance.monitoring.*.public_ip) 12 | } 13 | 14 | output "monitoring_name" { 15 | value = join("", data.aws_instance.monitoring.*.tags.Name) 16 | } 17 | 18 | output "monitoring_id" { 19 | value = join("", data.aws_instance.monitoring.*.id) 20 | } 21 | 22 | output "monitoring_public_name" { 23 | value = join("", data.aws_instance.monitoring.*.public_dns) 24 | } 25 | -------------------------------------------------------------------------------- /aws/modules/monitoring/salt_provisioner.tf: -------------------------------------------------------------------------------- 1 | resource "null_resource" "monitoring_provisioner" { 2 | count = var.common_variables["provisioner"] == "salt" && var.monitoring_enabled ? 1 : 0 3 | 4 | triggers = { 5 | monitoring_id = aws_instance.monitoring.0.id 6 | } 7 | 8 | connection { 9 | host = element(local.provisioning_addresses, count.index) 10 | type = "ssh" 11 | user = var.common_variables["authorized_user"] 12 | private_key = var.common_variables["private_key"] 13 | 14 | bastion_host = var.bastion_host 15 | bastion_user = var.common_variables["authorized_user"] 16 | bastion_private_key = var.common_variables["bastion_private_key"] 17 | } 18 | 19 | provisioner "file" { 20 | content = < { 31 | start : format("%.0f%%", index * 100 / var.lun_count), 32 | end : format("%.0f%%", (index + 1) * 100 / var.lun_count) 33 | } 34 | } } 35 | )} 36 | 37 | EOF 38 | destination = "/tmp/grains" 39 | } 40 | } 41 | 42 | module "iscsi_provision" { 43 | source = "../../../generic_modules/salt_provisioner" 44 | node_count = var.common_variables["provisioner"] == "salt" ? var.iscsi_count : 0 45 | instance_ids = null_resource.iscsi_provisioner.*.id 46 | user = var.common_variables["authorized_user"] 47 | private_key = var.common_variables["private_key"] 48 | bastion_host = var.bastion_host 49 | bastion_private_key = var.common_variables["bastion_private_key"] 50 | public_ips = local.provisioning_addresses 51 | background = var.common_variables["background"] 52 | } 53 | -------------------------------------------------------------------------------- /azure/modules/iscsi_server/variables.tf: -------------------------------------------------------------------------------- 1 | variable "common_variables" { 2 | description = "Output of the common_variables module" 3 | } 4 | 5 | variable "bastion_host" { 6 | description = "Bastion host address" 7 | type = string 8 | default = "" 9 | } 10 | 11 | variable "az_region" { 12 | type = string 13 | default = "westeurope" 14 | } 15 | 16 | variable "resource_group_name" { 17 | type = string 18 | } 19 | 20 | variable "network_subnet_id" { 21 | type = string 22 | } 23 | 24 | variable "storage_account" { 25 | type = string 26 | } 27 | 28 | variable "os_image" { 29 | description = "sles4sap image used to create this module machines. Composed by 'Publisher:Offer:Sku:Version' syntax. Example: SUSE:sles-sap-15-sp2:gen2:latest" 30 | type = string 31 | } 32 | 33 | variable "iscsi_srv_uri" { 34 | type = string 35 | default = "" 36 | } 37 | 38 | variable "name" { 39 | description = "hostname, without the domain part" 40 | type = string 41 | } 42 | 43 | variable "vm_size" { 44 | type = string 45 | default = "Standard_D2s_v3" 46 | } 47 | 48 | variable "network_domain" { 49 | description = "hostname's network domain" 50 | type = string 51 | } 52 | 53 | variable "iscsi_count" { 54 | description = "Number of iscsi machines to deploy" 55 | type = number 56 | } 57 | 58 | variable "host_ips" { 59 | description = "List of ip addresses to set to the machines" 60 | type = list(string) 61 | } 62 | 63 | variable "iscsi_disk_size" { 64 | description = "Disk size in GB used to create the LUNs and partitions to be served by the ISCSI service" 65 | type = number 66 | default = 10 67 | } 68 | 69 | variable "lun_count" { 70 | description = "Number of LUN (logical units) to serve with the iscsi server. Each LUN can be used as a unique sbd disk" 71 | type = number 72 | default = 3 73 | } 74 | -------------------------------------------------------------------------------- /azure/modules/majority_maker_node/outputs.tf: -------------------------------------------------------------------------------- 1 | data "azurerm_public_ip" "majority_maker" { 2 | count = local.bastion_enabled ? 0 : var.node_count 3 | name = element(azurerm_public_ip.majority_maker.*.name, count.index) 4 | resource_group_name = element(azurerm_virtual_machine.majority_maker.*.resource_group_name, count.index) 5 | # depends_on is included to avoid the issue with `resource_group was not found`. Find an example in: https://github.com/terraform-providers/terraform-provider-azurerm/issues/8476 6 | depends_on = [azurerm_virtual_machine.majority_maker] 7 | } 8 | 9 | data "azurerm_network_interface" "majority_maker" { 10 | count = var.node_count 11 | name = element(azurerm_network_interface.majority_maker.*.name, count.index) 12 | resource_group_name = element(azurerm_virtual_machine.majority_maker.*.resource_group_name, count.index) 13 | # depends_on is included to avoid the issue with `resource_group was not found`. Find an example in: https://github.com/terraform-providers/terraform-provider-azurerm/issues/8476 14 | depends_on = [azurerm_virtual_machine.majority_maker] 15 | } 16 | 17 | output "hana_ip" { 18 | value = [data.azurerm_network_interface.majority_maker.*.private_ip_address] 19 | } 20 | 21 | output "hana_public_ip" { 22 | value = [data.azurerm_public_ip.majority_maker.*.ip_address] 23 | } 24 | 25 | output "hana_name" { 26 | value = [azurerm_virtual_machine.majority_maker.*.name] 27 | } 28 | 29 | output "hana_public_name" { 30 | value = [data.azurerm_public_ip.majority_maker.*.fqdn] 31 | } 32 | -------------------------------------------------------------------------------- /azure/modules/monitoring/outputs.tf: -------------------------------------------------------------------------------- 1 | data "azurerm_public_ip" "monitoring" { 2 | count = local.bastion_enabled == false && var.monitoring_enabled == true ? 1 : 0 3 | name = azurerm_public_ip.monitoring.0.name 4 | resource_group_name = azurerm_virtual_machine.monitoring.0.resource_group_name 5 | # depends_on is included to avoid the issue with `resource_group was not found`. Find an example in: https://github.com/terraform-providers/terraform-provider-azurerm/issues/8476 6 | depends_on = [azurerm_virtual_machine.monitoring] 7 | } 8 | 9 | data "azurerm_network_interface" "monitoring" { 10 | count = var.monitoring_enabled == true ? 1 : 0 11 | name = azurerm_network_interface.monitoring.0.name 12 | resource_group_name = azurerm_virtual_machine.monitoring.0.resource_group_name 13 | # depends_on is included to avoid the issue with `resource_group was not found`. Find an example in: https://github.com/terraform-providers/terraform-provider-azurerm/issues/8476 14 | depends_on = [azurerm_virtual_machine.monitoring] 15 | } 16 | 17 | output "monitoring_ip" { 18 | value = join("", data.azurerm_network_interface.monitoring.*.private_ip_address) 19 | } 20 | 21 | output "monitoring_public_ip" { 22 | value = join("", data.azurerm_public_ip.monitoring.*.ip_address) 23 | } 24 | 25 | output "monitoring_name" { 26 | value = join("", azurerm_virtual_machine.monitoring.*.name) 27 | } 28 | 29 | output "monitoring_public_name" { 30 | value = join("", data.azurerm_public_ip.monitoring.*.fqdn) 31 | } 32 | -------------------------------------------------------------------------------- /azure/modules/monitoring/salt_provisioner.tf: -------------------------------------------------------------------------------- 1 | resource "null_resource" "monitoring_provisioner" { 2 | count = var.common_variables["provisioner"] == "salt" && var.monitoring_enabled ? 1 : 0 3 | 4 | triggers = { 5 | monitoring_id = azurerm_virtual_machine.monitoring.0.id 6 | } 7 | 8 | connection { 9 | host = element(local.provisioning_addresses, count.index) 10 | type = "ssh" 11 | user = var.common_variables["authorized_user"] 12 | private_key = var.common_variables["private_key"] 13 | 14 | bastion_host = var.bastion_host 15 | bastion_user = var.common_variables["authorized_user"] 16 | bastion_private_key = var.common_variables["bastion_private_key"] 17 | } 18 | 19 | provisioner "file" { 20 | content = </README.md` for details). 8 | 9 | Usually the cloud native fence mechanism is recommended as it simpler and less expensive. 10 | 11 | ## SBD 12 | 13 | SBD (Storage Based Death) uses a shared disk among the nodes to halt the nodes. 14 | 15 | Find more information in: 16 | - https://wiki.clusterlabs.org/wiki/Using_SBD_with_Pacemaker 17 | 18 | The next options are available to use SBD as the cluster fencing mechanism. 19 | 20 | ### ISCSI server 21 | 22 | Use a shared disk served by an ISCSI server. This is a quite standard option if the clusters are hosted in the cloud, as shared disks are not commonly available. To use this option, an ISCSI server must be created (or use an already existing one). The project gives the option to create a new virtual machine to host this service. For that we need to use the next variables: 23 | - Set `sbd_storage_type` to `iscsi` 24 | - Enable at least one cluster that will use SBD setting the option `*_cluster_fencing_mechanism` to `sbd` 25 | - ISCSI server configuration has some advanced configuration options. Check the terraform template examples and the available variables for that. 26 | 27 | ### Shared disk 28 | 29 | Use a shared disk attached to all clustered nodes. **This option is only available for libvirt**. To use this option: 30 | - Set `sbd_storage_type` to `shared-disk` 31 | - Enable at least one cluster that will use SBD setting the option `*_cluster_fencing_mechanism` to `sbd` 32 | 33 | ## Cloud native fencing 34 | 35 | The cloud native fencing mechanism is based in capabilities of the cloud providers to halt the virtual machines using their own APIs. This means that there is not any need to have additional machines or resources, making them simpler and less expensive. **This option is only available for AWS and GCP by now.** 36 | 37 | To use this option: 38 | - Set `*_cluster_fencing_mechanism` to `native` to the clusters that have to use this mechanism. 39 | -------------------------------------------------------------------------------- /doc/highlevel_description_aws.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE/ha-sap-terraform-deployments/d27d45e6a36c4ced8e5134ecb3f9eab13a4fc908/doc/highlevel_description_aws.png -------------------------------------------------------------------------------- /doc/highlevel_description_azure.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE/ha-sap-terraform-deployments/d27d45e6a36c4ced8e5134ecb3f9eab13a4fc908/doc/highlevel_description_azure.png -------------------------------------------------------------------------------- /doc/highlevel_description_gcp.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE/ha-sap-terraform-deployments/d27d45e6a36c4ced8e5134ecb3f9eab13a4fc908/doc/highlevel_description_gcp.png -------------------------------------------------------------------------------- /doc/highlevel_description_libvirt.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE/ha-sap-terraform-deployments/d27d45e6a36c4ced8e5134ecb3f9eab13a4fc908/doc/highlevel_description_libvirt.png -------------------------------------------------------------------------------- /doc/highlevel_description_openstack.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE/ha-sap-terraform-deployments/d27d45e6a36c4ced8e5134ecb3f9eab13a4fc908/doc/highlevel_description_openstack.png -------------------------------------------------------------------------------- /doc/project-architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE/ha-sap-terraform-deployments/d27d45e6a36c4ced8e5134ecb3f9eab13a4fc908/doc/project-architecture.png -------------------------------------------------------------------------------- /doc/project-components.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE/ha-sap-terraform-deployments/d27d45e6a36c4ced8e5134ecb3f9eab13a4fc908/doc/project-components.png -------------------------------------------------------------------------------- /doc/sap-workload-automation-suse-flow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE/ha-sap-terraform-deployments/d27d45e6a36c4ced8e5134ecb3f9eab13a4fc908/doc/sap-workload-automation-suse-flow.png -------------------------------------------------------------------------------- /doc/sap_passwords.md: -------------------------------------------------------------------------------- 1 | # SAP Product Password Rules 2 | 3 | Every SAP product comes with its own set of password rules. Terraform will check the passwords `hana_master_password` 4 | and `netweaver_master_password` configured in `terraform.tfvars` for default rules. If another ruleset is favored, 5 | it is advisable to use passwords following the default rules, then deploying the system and change to the new ruleset 6 | from inside the system afterwards. 7 | 8 | The password rule checks can be found in `generic_modules/common_variables/*.tf`. 9 | 10 | 11 | ## SAP HANA 12 | 13 | The password for SAP HANA supports password lengths between 8 up to 64 characters. 14 | We enforce 10 to 14 characters to be compatible with the Netweaver and S/4HANA deployments, though. 15 | It can be composed of lowercase letters (`a-z`), uppercase letters (`A-Z`) and numerical 16 | digits (`0-9`). All other characters are considered as special character. 17 | The default configuration requires passwords to contain at least one uppercase letter, 18 | at least one number, and at least one lowercase letter, with special characters being optional. 19 | 20 | For further configuration options of the password rules see: 21 | 22 | - https://help.sap.com/docs/SAP_HANA_PLATFORM/009e68bc5f3c440cb31823a3ec4bb95b/974e9cb991704d05a256241a7b821971.html?locale=en-US&version=2.0.05 23 | 24 | - https://help.sap.com/docs/SAP_HANA_ONE/102d9916bf77407ea3942fef93a47da8/61662e3032ad4f8dbdb5063a21a7d706.html?locale=en-US 25 | 26 | 27 | ## SAP NetWeaver/SAP S/4HANA 28 | 29 | The password for SAP NetWeaver supports password lengths between 10 and 14 characters. 30 | The password can only consist of digits, letters, and the following (ASCII) special characters: `!"@ $%&/()=?'*+~#-_.,;:{[]}<>`, and space and the grave accent. 31 | The password can consist of any characters including national special characters (such as `ä`, `ç`, `ß` from ISO Latin-1, 8859-1). 32 | However, all characters that aren’t contained in the set above are mapped to the same special character, and the system therefore doesn’t differentiate between them. 33 | 34 | For further configuration options of the password rules see: 35 | 36 | - https://help.sap.com/docs/SAP_NETWEAVER_750/c6e6d078ab99452db94ed7b3b7bbcccf/4ac3f18f8c352470e10000000a42189c.html?locale=en-US 37 | -------------------------------------------------------------------------------- /doc/saptune.md: -------------------------------------------------------------------------------- 1 | # Saptune deployment configuration 2 | 3 | You can tune your HANA or S/4HANA and NetWeaver nodes with saptune during the deployment phase. 4 | 5 | 6 | In order to apply a saptune solution, you need to adapt the pillars 7 | during deployment: 8 | 9 | ``` 10 | saptune_solution: 'HANA' 11 | ``` 12 | 13 | By default the pillars are configured to apply HANA for hana nodes and 14 | NETWEAVER solution for NetWeavers. 15 | 16 | For further information refer to the saphanaboostrap-formula or NetWeaver. 17 | The code for the module is implemented in 18 | [SUSE/salt-shaptools repository🔗](https://github.com/SUSE/salt-shaptools). 19 | -------------------------------------------------------------------------------- /doc/troubleshooting.md: -------------------------------------------------------------------------------- 1 | # Troubleshooting 2 | 3 | The goal of this guide is to provide some useful entry points for debug. 4 | Feel free to open an issue with these logs, and/or analyze them accordingly. 5 | 6 | # Debugging 7 | 8 | The variable `provisioning_log_level` variable can be used to change the logging verbosity/level (being `error` by default). Change to `info` or `debug` to get more hints about what's going on. 9 | Find here the log level options for salt: https://docs.saltstack.com/en/latest/ref/configuration/logging/index.html 10 | 11 | 12 | # Salt useful logs 13 | 14 | Besides the `terraform` execution output, more logs are stored within the created machines in the next logging files. 15 | 16 | - `/var/log/salt-result.log`: Summarized result of the salt execution processes. 17 | - `/var/log/salt-os-setup.log`: initial OS setup registering the machines to SCC, updating the system, etc. 18 | - `/var/log/salt-predeployment.log`: before executing formula states, execute the saltstack file contained in the repository of ha-sap-terraform-deployments. 19 | - `/var/log/salt-deployment.log`: this is the log file where the formulas salt execution is logged. (salt-formulas are not part of the github deployments project). 20 | 21 | 22 | # S/4HANA and NetWeaver debugging 23 | 24 | - `/tmp/swpm_unnattended/sapinst.log` is the best first entry point to look at when debugging NetWeaver failures. 25 | 26 | 27 | # Misc 28 | 29 | When opening/issues, provide Which SLE version, which provider, and the logs (described before). 30 | -------------------------------------------------------------------------------- /gcp/create_remote_state/README.md: -------------------------------------------------------------------------------- 1 | ## Overview 2 | 3 | ### Store State Remotely 4 | 5 | If you are working on a team, then its best to store the terraform state file remotely so that many people can access it. In order to setup terraform to store state remotely you need two things: a GCS bucket to store the state file in and a Terraform GCS backend resource. 6 | 7 | ### Show me the code 8 | 9 | The Terraform configuration on this directory creates the GCS bucket for storing and locking the Terraform state file remotely. This is known as the [GCS backend🔗](https://www.terraform.io/docs/backends/types/gcs.html). 10 | 11 | The bucket name must be globally unique and conform to certain requirements described in [this document🔗](https://cloud.google.com/storage/docs/naming#requirements). 12 | 13 | ## Procedure to create the GCP backend: 14 | 15 | 1. Edit the [bucket.tf](bucket.tf) file to specify the `location` and bucket `name`. 16 | 2. Run `terraform init` 17 | 3. Run `terraform plan` to check whether the following command will succeed: 18 | 4. Run `terraform apply` 19 | 5. Rename the [remote-state.sample](../remote-state.sample) file to remote-state.tf inside your project. Make sure that the values for the `bucket` are the same. 20 | 6. In your project directory, run the command `terraform init --upgrade` to reset the state file. 21 | 7. Run `terraform plan` to check whether the following command will succeed: 22 | 8. Run `terraform apply` 23 | 9. Check whether you can run `terraform destroy` from another directory or machine. 24 | 25 | ## Resources 26 | - https://www.terraform.io/docs/backends/types/gcs.html 27 | -------------------------------------------------------------------------------- /gcp/create_remote_state/bucket.tf: -------------------------------------------------------------------------------- 1 | resource "google_storage_bucket" "terraform-state" { 2 | # NOTE: The bucket name must be globally unique and conform to certain requirements described in: 3 | # https://cloud.google.com/storage/docs/naming#requirements 4 | name = "terraform-state" 5 | 6 | location = "eu" 7 | project = "my-project" 8 | 9 | versioning { 10 | enabled = true 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /gcp/modules/bastion/outputs.tf: -------------------------------------------------------------------------------- 1 | output "public_ip" { 2 | value = join("", google_compute_instance.bastion.*.network_interface.0.access_config.0.nat_ip) 3 | } 4 | -------------------------------------------------------------------------------- /gcp/modules/bastion/salt_provisioner.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | node_count = var.common_variables["provisioner"] == "salt" ? local.bastion_count : 0 3 | } 4 | 5 | resource "null_resource" "bastion_provisioner" { 6 | count = local.node_count 7 | 8 | triggers = { 9 | bastion_id = join(",", google_compute_instance.bastion.*.id) 10 | } 11 | 12 | connection { 13 | host = element(google_compute_instance.bastion.*.network_interface.0.access_config.0.nat_ip, count.index) 14 | type = "ssh" 15 | user = var.common_variables["authorized_user"] 16 | private_key = var.common_variables["bastion_private_key"] 17 | } 18 | 19 | provisioner "file" { 20 | content = < { 31 | start : format("%.0f%%", index * 100 / var.lun_count), 32 | end : format("%.0f%%", (index + 1) * 100 / var.lun_count) 33 | } 34 | } } 35 | )} 36 | 37 | EOF 38 | destination = "/tmp/grains" 39 | } 40 | } 41 | 42 | module "iscsi_provision" { 43 | source = "../../../generic_modules/salt_provisioner" 44 | node_count = var.common_variables["provisioner"] == "salt" ? var.iscsi_count : 0 45 | instance_ids = null_resource.iscsi_provisioner.*.id 46 | user = var.common_variables["authorized_user"] 47 | private_key = var.common_variables["private_key"] 48 | bastion_host = var.bastion_host 49 | bastion_private_key = var.common_variables["bastion_private_key"] 50 | public_ips = local.provisioning_addresses 51 | background = var.common_variables["background"] 52 | } 53 | -------------------------------------------------------------------------------- /gcp/modules/iscsi_server/variables.tf: -------------------------------------------------------------------------------- 1 | variable "common_variables" { 2 | description = "Output of the common_variables module" 3 | } 4 | 5 | variable "bastion_host" { 6 | description = "Bastion host address" 7 | type = string 8 | default = "" 9 | } 10 | 11 | variable "name" { 12 | description = "hostname, without the domain part" 13 | type = string 14 | } 15 | 16 | variable "machine_type" { 17 | type = string 18 | default = "custom-1-2048" 19 | } 20 | 21 | variable "compute_zones" { 22 | description = "gcp compute zones data" 23 | type = list(string) 24 | } 25 | 26 | variable "network_subnet_name" { 27 | description = "Subnet name to attach the network interface of the nodes" 28 | type = string 29 | } 30 | 31 | variable "os_image" { 32 | description = "Image used to create the machine" 33 | type = string 34 | } 35 | 36 | variable "network_domain" { 37 | description = "hostname's network domain" 38 | type = string 39 | } 40 | 41 | variable "iscsi_count" { 42 | type = number 43 | description = "Number of iscsi machines to deploy" 44 | } 45 | 46 | variable "host_ips" { 47 | description = "List of ip addresses to set to the machines" 48 | type = list(string) 49 | } 50 | 51 | variable "iscsi_disk_size" { 52 | description = "Disk size in GB used to create the LUNs and partitions to be served by the ISCSI service" 53 | type = number 54 | default = 10 55 | } 56 | 57 | variable "lun_count" { 58 | description = "Number of LUN (logical units) to serve with the iscsi server. Each LUN can be used as a unique sbd disk" 59 | type = number 60 | default = 3 61 | } 62 | 63 | variable "on_destroy_dependencies" { 64 | description = "Resources objects need in the on_destroy script (everything that allows ssh connection)" 65 | type = any 66 | default = [] 67 | } 68 | -------------------------------------------------------------------------------- /gcp/modules/load_balancer/README.md: -------------------------------------------------------------------------------- 1 | # GCP load balancer 2 | 3 | This module implements a GCP load balancer with the purpose of managing the HA cluster virtual ip address, focused for 2 node clusters. This means that it has two different groups, for the primary and secondary nodes. 4 | 5 | Find here the implementation details: 6 | - https://cloud.google.com/solutions/sap/docs/sap-hana-ha-vip-migration-sles 7 | - https://cloud.google.com/solutions/sap/docs/sap-hana-ha-config-sles 8 | -------------------------------------------------------------------------------- /gcp/modules/load_balancer/main.tf: -------------------------------------------------------------------------------- 1 | # GCP load balancer resource 2 | # Based on: https://cloud.google.com/solutions/sap/docs/sap-hana-ha-vip-migration-sles 3 | # And: https://cloud.google.com/solutions/sap/docs/sap-hana-ha-config-sles 4 | 5 | resource "google_compute_health_check" "health-check" { 6 | name = "${var.name}-health-check" 7 | 8 | timeout_sec = 10 9 | check_interval_sec = 10 10 | unhealthy_threshold = 2 11 | healthy_threshold = 2 12 | 13 | tcp_health_check { 14 | port = var.tcp_health_check_port 15 | } 16 | } 17 | 18 | # More information about the load balancer firewall 19 | # https://cloud.google.com/load-balancing/docs/health-checks#fw-rule 20 | resource "google_compute_firewall" "load-balancer-firewall" { 21 | name = "${var.name}-load-balancer-firewall" 22 | network = var.network_name 23 | source_ranges = ["35.191.0.0/16", "130.211.0.0/22"] 24 | target_tags = var.target_tags 25 | 26 | allow { 27 | protocol = "tcp" 28 | ports = [var.tcp_health_check_port] 29 | } 30 | } 31 | 32 | resource "google_compute_region_backend_service" "backend-service" { 33 | name = "${var.name}-backend-service" 34 | region = var.region 35 | load_balancing_scheme = "INTERNAL" 36 | health_checks = [google_compute_health_check.health-check.*.id[0]] 37 | 38 | backend { 39 | group = var.primary_node_group 40 | } 41 | 42 | backend { 43 | group = var.secondary_node_group 44 | failover = true 45 | } 46 | 47 | failover_policy { 48 | disable_connection_drain_on_failover = true 49 | drop_traffic_if_unhealthy = true 50 | failover_ratio = 1 51 | } 52 | } 53 | 54 | resource "google_compute_forwarding_rule" "load-balancer-forwarding-rule" { 55 | name = "${var.name}-load-balancer-forwarding-rule" 56 | region = var.region 57 | load_balancing_scheme = "INTERNAL" 58 | subnetwork = var.network_subnet_name 59 | ip_address = var.ip_address 60 | backend_service = google_compute_region_backend_service.backend-service.id 61 | all_ports = true 62 | } 63 | -------------------------------------------------------------------------------- /gcp/modules/load_balancer/variables.tf: -------------------------------------------------------------------------------- 1 | variable "name" { 2 | description = "Prefix name used to create the load balancer resources" 3 | type = string 4 | } 5 | 6 | variable "region" { 7 | description = "Region where the load balancer is deployed" 8 | type = string 9 | } 10 | 11 | variable "network_name" { 12 | description = "Network where the load balancer resources are attached" 13 | type = string 14 | } 15 | 16 | variable "network_subnet_name" { 17 | description = "Subnetwork which has the load balancer attached" 18 | type = string 19 | } 20 | 21 | variable "primary_node_group" { 22 | description = "Primary node group. The load balancer forwards to this group the traffic by default" 23 | type = string 24 | } 25 | 26 | variable "secondary_node_group" { 27 | description = "Secondary node id. The load balancer forwards to this group the traffic as fallback option" 28 | type = string 29 | } 30 | 31 | variable "tcp_health_check_port" { 32 | description = "Port used to check the health of the node" 33 | type = number 34 | } 35 | 36 | variable "target_tags" { 37 | description = "List of tags applied to the virtual machines which are used by the load balancer firewall rule" 38 | type = list(string) 39 | } 40 | 41 | variable "ip_address" { 42 | description = "IP address which the data is forwarded" 43 | type = string 44 | } 45 | -------------------------------------------------------------------------------- /gcp/modules/majority_maker_node/main.tf: -------------------------------------------------------------------------------- 1 | # HANA deployment in GCP 2 | 3 | locals { 4 | bastion_enabled = var.common_variables["bastion_enabled"] 5 | provisioning_addresses = local.bastion_enabled ? google_compute_instance.majority_maker.*.network_interface.0.network_ip : google_compute_instance.majority_maker.*.network_interface.0.access_config.0.nat_ip 6 | hostname = var.common_variables["deployment_name_in_hostname"] ? format("%s-%s", var.common_variables["deployment_name"], var.name) : var.name 7 | } 8 | 9 | resource "google_compute_instance" "majority_maker" { 10 | count = var.node_count 11 | machine_type = var.machine_type 12 | name = "${var.common_variables["deployment_name"]}-${var.name}mm" 13 | zone = element(var.compute_zones, 2) 14 | 15 | can_ip_forward = true 16 | 17 | network_interface { 18 | subnetwork = var.network_subnet_name 19 | network_ip = var.majority_maker_ip 20 | 21 | # Set public IP address. Only if the bastion is not used 22 | dynamic "access_config" { 23 | for_each = local.bastion_enabled ? [] : [1] 24 | content { 25 | nat_ip = "" 26 | } 27 | } 28 | } 29 | 30 | scheduling { 31 | automatic_restart = true 32 | on_host_maintenance = "MIGRATE" 33 | preemptible = false 34 | } 35 | 36 | boot_disk { 37 | initialize_params { 38 | image = var.os_image 39 | size = 60 40 | } 41 | 42 | auto_delete = true 43 | } 44 | 45 | metadata = { 46 | sshKeys = "${var.common_variables["authorized_user"]}:${var.common_variables["public_key"]}" 47 | } 48 | 49 | service_account { 50 | scopes = ["compute-rw", "storage-rw", "logging-write", "monitoring-write", "service-control", "service-management"] 51 | } 52 | 53 | tags = ["hana-group"] 54 | } 55 | 56 | module "hana_on_destroy" { 57 | source = "../../../generic_modules/on_destroy" 58 | node_count = var.node_count 59 | instance_ids = google_compute_instance.majority_maker.*.id 60 | user = var.common_variables["authorized_user"] 61 | private_key = var.common_variables["private_key"] 62 | bastion_host = var.bastion_host 63 | bastion_private_key = var.common_variables["bastion_private_key"] 64 | public_ips = local.provisioning_addresses 65 | dependencies = var.on_destroy_dependencies 66 | } 67 | -------------------------------------------------------------------------------- /gcp/modules/majority_maker_node/outputs.tf: -------------------------------------------------------------------------------- 1 | output "majority_maker_ip" { 2 | value = google_compute_instance.majority_maker.*.network_interface.0.network_ip 3 | } 4 | 5 | output "majority_maker_public_ip" { 6 | value = local.bastion_enabled ? [] : google_compute_instance.majority_maker.*.network_interface.0.access_config.0.nat_ip 7 | } 8 | 9 | output "majority_maker_name" { 10 | value = google_compute_instance.majority_maker.*.name 11 | } 12 | 13 | output "majority_maker_public_name" { 14 | value = [] 15 | } 16 | -------------------------------------------------------------------------------- /gcp/modules/monitoring/outputs.tf: -------------------------------------------------------------------------------- 1 | output "monitoring_ip" { 2 | value = join("", google_compute_instance.monitoring.*.network_interface.0.network_ip) 3 | } 4 | 5 | output "monitoring_public_ip" { 6 | value = local.bastion_enabled ? "" : join("", google_compute_instance.monitoring.*.network_interface.0.access_config.0.nat_ip) 7 | } 8 | 9 | output "monitoring_name" { 10 | value = join("", google_compute_instance.monitoring.*.name) 11 | } 12 | 13 | output "monitoring_public_name" { 14 | value = "" 15 | } 16 | -------------------------------------------------------------------------------- /gcp/modules/monitoring/salt_provisioner.tf: -------------------------------------------------------------------------------- 1 | resource "null_resource" "monitoring_provisioner" { 2 | count = var.common_variables["provisioner"] == "salt" && var.monitoring_enabled ? 1 : 0 3 | 4 | triggers = { 5 | cluster_instance_id = google_compute_instance.monitoring.0.id 6 | } 7 | 8 | connection { 9 | host = element(local.provisioning_addresses, count.index) 10 | type = "ssh" 11 | user = var.common_variables["authorized_user"] 12 | private_key = var.common_variables["private_key"] 13 | 14 | bastion_host = var.bastion_host 15 | bastion_user = var.common_variables["authorized_user"] 16 | bastion_private_key = var.common_variables["bastion_private_key"] 17 | } 18 | 19 | provisioner "file" { 20 | content = < 2 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | -------------------------------------------------------------------------------- /libvirt/modules/hana_node/shareable.xsl: -------------------------------------------------------------------------------- 1 | 2 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | -------------------------------------------------------------------------------- /libvirt/modules/iscsi_server/salt_provisioner.tf: -------------------------------------------------------------------------------- 1 | resource "null_resource" "wait_after_cloud_init" { 2 | count = var.common_variables["provisioner"] == "salt" ? var.iscsi_count : 0 3 | 4 | triggers = { 5 | iscsi_id = libvirt_domain.iscsisrv[count.index].id 6 | } 7 | 8 | provisioner "remote-exec" { 9 | inline = [ 10 | "if command -v cloud-init; then cloud-init status --wait; else echo no cloud-init installed; fi" 11 | ] 12 | } 13 | 14 | connection { 15 | host = libvirt_domain.iscsisrv[count.index].network_interface.0.addresses.0 16 | user = "root" 17 | password = "linux" 18 | } 19 | } 20 | 21 | resource "null_resource" "iscsi_provisioner" { 22 | count = var.common_variables["provisioner"] == "salt" ? var.iscsi_count : 0 23 | 24 | triggers = { 25 | iscsi_id = libvirt_domain.iscsisrv[count.index].id 26 | } 27 | 28 | connection { 29 | host = libvirt_domain.iscsisrv[count.index].network_interface.0.addresses.0 30 | user = "root" 31 | password = "linux" 32 | } 33 | 34 | provisioner "file" { 35 | content = < { 48 | start : format("%.0f%%", index * 100 / var.lun_count), 49 | end : format("%.0f%%", (index + 1) * 100 / var.lun_count) 50 | } 51 | } } 52 | )} 53 | EOF 54 | destination = "/tmp/grains" 55 | } 56 | 57 | depends_on = [null_resource.wait_after_cloud_init] 58 | } 59 | 60 | module "iscsi_provision" { 61 | source = "../../../generic_modules/salt_provisioner" 62 | node_count = var.common_variables["provisioner"] == "salt" ? var.iscsi_count : 0 63 | instance_ids = null_resource.iscsi_provisioner.*.id 64 | user = "root" 65 | password = "linux" 66 | public_ips = libvirt_domain.iscsisrv.*.network_interface.0.addresses.0 67 | background = var.common_variables["background"] 68 | } 69 | -------------------------------------------------------------------------------- /libvirt/modules/majority_maker_node/shareable.xsl: -------------------------------------------------------------------------------- 1 | 2 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | -------------------------------------------------------------------------------- /libvirt/modules/monitoring/salt_provisioner.tf: -------------------------------------------------------------------------------- 1 | resource "null_resource" "wait_after_cloud_init" { 2 | count = var.common_variables["provisioner"] == "salt" && var.monitoring_enabled ? 1 : 0 3 | 4 | triggers = { 5 | monitoring_id = libvirt_domain.monitoring_domain.0.id 6 | } 7 | 8 | provisioner "remote-exec" { 9 | inline = [ 10 | "if command -v cloud-init; then cloud-init status --wait; else echo no cloud-init installed; fi" 11 | ] 12 | } 13 | 14 | depends_on = [libvirt_domain.monitoring_domain.0] 15 | connection { 16 | host = libvirt_domain.monitoring_domain.0.network_interface.0.addresses.0 17 | user = "root" 18 | password = "linux" 19 | } 20 | } 21 | 22 | resource "null_resource" "monitoring_provisioner" { 23 | count = var.common_variables["provisioner"] == "salt" && var.monitoring_enabled ? 1 : 0 24 | triggers = { 25 | monitoring_id = libvirt_domain.monitoring_domain.0.id 26 | } 27 | 28 | connection { 29 | host = libvirt_domain.monitoring_domain.0.network_interface.0.addresses.0 30 | user = "root" 31 | password = "linux" 32 | } 33 | 34 | provisioner "file" { 35 | content = < 2 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | -------------------------------------------------------------------------------- /libvirt/modules/shared_disk/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.1.0" 3 | required_providers { 4 | libvirt = { 5 | source = "dmacvicar/libvirt" 6 | version = "0.6.14" 7 | } 8 | } 9 | } 10 | 11 | resource "libvirt_volume" "shared_disk" { 12 | name = "${var.common_variables["deployment_name"]}-${var.name}.raw" 13 | pool = var.pool 14 | size = var.shared_disk_size 15 | count = var.shared_disk_count 16 | 17 | xml { 18 | xslt = file("modules/shared_disk/raw.xsl") 19 | } 20 | } 21 | 22 | output "id" { 23 | value = join(",", libvirt_volume.shared_disk.*.id) 24 | } 25 | -------------------------------------------------------------------------------- /libvirt/modules/shared_disk/raw.xsl: -------------------------------------------------------------------------------- 1 | 2 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | -------------------------------------------------------------------------------- /libvirt/modules/shared_disk/variables.tf: -------------------------------------------------------------------------------- 1 | variable "common_variables" { 2 | description = "Output of the common_variables module" 3 | } 4 | 5 | variable "name" { 6 | description = "name of the disk" 7 | type = string 8 | } 9 | 10 | variable "pool" { 11 | description = "libvirt storage pool name for VM disks" 12 | default = "default" 13 | } 14 | 15 | variable "shared_disk_size" { 16 | description = "shared partition disk size" 17 | default = "104857600" # 100MB 18 | } 19 | 20 | variable "shared_disk_count" { 21 | description = "variable used to decide to create or not the shared disk device" 22 | default = 1 23 | } 24 | -------------------------------------------------------------------------------- /libvirt/version.tf: -------------------------------------------------------------------------------- 1 | # using the libvirt prodider requires a terraform block every submodule 2 | # keep in mind also to change every terraform block in modules/*/main.tf 3 | 4 | terraform { 5 | required_version = ">= 1.1.0" 6 | required_providers { 7 | libvirt = { 8 | source = "dmacvicar/libvirt" 9 | version = "0.6.14" 10 | } 11 | } 12 | } 13 | 14 | provider "libvirt" { 15 | uri = var.qemu_uri 16 | } 17 | -------------------------------------------------------------------------------- /openstack/cloud-config.tpl: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | 3 | cloud_config_modules: 4 | - runcmd 5 | 6 | cloud_final_modules: 7 | - scripts-user 8 | 9 | runcmd: 10 | - | 11 | # add any command here 12 | -------------------------------------------------------------------------------- /openstack/create_remote_state/README.md: -------------------------------------------------------------------------------- 1 | ## Overview 2 | 3 | ### Store State Remotely 4 | 5 | If you are working on a team, then its best to store the terraform state file remotely so that many people can access it. In order to setup terraform to store state remotely you need two things: a swift enabled OpenStack and a Terraform OpenStack backend resource. 6 | 7 | ## Procedure to create the OpenStack backend: 8 | 9 | 1. Rename the [remote-state.sample](../remote-state.sample) file to remote-state.tf inside your project. 10 | 2. Run `terraform init` 11 | 3. Run `terraform plan` to check whether the following command will succeed: 12 | 4. Run `terraform apply` 13 | 6. In your project directory, run the command `terraform init --upgrade` to reset the state file. 14 | 7. Run `terraform plan` to check whether the following command will succeed: 15 | 8. Run `terraform apply` 16 | 9. Check whether you can run `terraform destroy` from another directory or machine. 17 | 18 | ## Resources 19 | - https://www.terraform.io/docs/language/settings/backends/swift.html 20 | -------------------------------------------------------------------------------- /openstack/modules/bastion/outputs.tf: -------------------------------------------------------------------------------- 1 | data "openstack_networking_floatingip_v2" "bastion" { 2 | count = local.bastion_count 3 | address = openstack_networking_floatingip_v2.bastion.0.address 4 | depends_on = [openstack_compute_instance_v2.bastion] 5 | } 6 | 7 | data "openstack_compute_instance_v2" "bastion" { 8 | count = local.bastion_count 9 | id = openstack_compute_instance_v2.bastion.0.id 10 | depends_on = [openstack_compute_instance_v2.bastion] 11 | } 12 | 13 | output "public_ip" { 14 | value = join("", data.openstack_networking_floatingip_v2.bastion.*.address) 15 | } 16 | 17 | output "bastion_ip" { 18 | value = data.openstack_compute_instance_v2.bastion.*.access_ip_v4 19 | } 20 | -------------------------------------------------------------------------------- /openstack/modules/bastion/salt_provisioner.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | node_count = var.common_variables["provisioner"] == "salt" ? local.bastion_count : 0 3 | } 4 | 5 | resource "null_resource" "wait_after_cloud_init" { 6 | provisioner "remote-exec" { 7 | inline = [ 8 | "cloud-init status --wait" 9 | ] 10 | } 11 | depends_on = [openstack_compute_instance_v2.bastion] 12 | connection { 13 | type = "ssh" 14 | user = var.common_variables["authorized_user"] 15 | private_key = var.common_variables["bastion_private_key"] 16 | host = openstack_compute_floatingip_associate_v2.bastion.floating_ip 17 | } 18 | } 19 | 20 | resource "null_resource" "bastion_provisioner" { 21 | count = local.node_count 22 | 23 | triggers = { 24 | bastion_id = join(",", openstack_compute_instance_v2.bastion.*.id) 25 | } 26 | 27 | connection { 28 | # host = element(data.openstack_networking_floatingip_v2.bastion.*.address, count.index) 29 | host = openstack_compute_floatingip_associate_v2.bastion.floating_ip 30 | type = "ssh" 31 | user = var.common_variables["authorized_user"] 32 | private_key = var.common_variables["bastion_private_key"] 33 | } 34 | 35 | provisioner "file" { 36 | content = <2}'.format(loop.index) }}: 3 | host.present: 4 | - ip: {{ ip }} 5 | - names: 6 | - {{ grains['name_prefix'] }}{{ '{:0>2}'.format(loop.index) }} 7 | {% endfor %} 8 | 9 | {% if grains['majority_maker_ip']|default(None) and grains['majority_maker_node']|default(None) %} 10 | {{ grains['majority_maker_node'] }}: 11 | host.present: 12 | - ip: {{ grains['majority_maker_ip'] }} 13 | {% endif %} 14 | -------------------------------------------------------------------------------- /salt/cluster_node/init.sls: -------------------------------------------------------------------------------- 1 | include: 2 | - cluster_node.hosts 3 | {% if grains.get('ha_enabled', True) %} 4 | - cluster_node.ha 5 | {% endif %} 6 | {% if grains.get('monitoring_enabled') %} 7 | - cluster_node.monitoring 8 | {% endif %} 9 | {%- if grains['provider'] == 'aws' %} 10 | - cluster_node.aws_add_credentials 11 | - cluster_node.aws_data_provider 12 | {%- endif %} 13 | -------------------------------------------------------------------------------- /salt/cluster_node/monitoring.sls: -------------------------------------------------------------------------------- 1 | prometheus_node_exporter: 2 | pkg.installed: 3 | - name: golang-github-prometheus-node_exporter 4 | 5 | node_exporter_service: 6 | service.running: 7 | - name: prometheus-node_exporter 8 | - enable: True 9 | - require: 10 | - pkg: prometheus_node_exporter 11 | - file: activate_node_exporter_systemd_collector 12 | - watch: 13 | - file: activate_node_exporter_systemd_collector 14 | 15 | activate_node_exporter_systemd_collector: 16 | file.managed: 17 | - name: /etc/sysconfig/prometheus-node_exporter 18 | - makedirs: True 19 | - contents: | 20 | ARGS="--collector.systemd --no-collector.mdadm" 21 | 22 | {%- if grains['osmajorrelease'] > 12 %} 23 | promtail: 24 | pkg.installed: 25 | - name: promtail 26 | - retry: 27 | attempts: 3 28 | interval: 15 29 | 30 | promtail_config: 31 | file.managed: 32 | - name: /etc/loki/promtail.yaml 33 | - template: jinja 34 | - source: salt://cluster_node/templates/promtail.yaml.j2 35 | 36 | # we need to add loki's user to the systemd-journal group, to let promtail read /run/log/journal 37 | ## https://build.opensuse.org/request/show/940653 removed the loki user 38 | ## promtail is running as root now and loki's permissions do not need to be adapted for now 39 | # loki_systemd_journal_member: 40 | # group.present: 41 | # - name: systemd-journal 42 | # - addusers: 43 | # - loki 44 | # - require: 45 | # - pkg: promtail 46 | 47 | promtail_service: 48 | service.running: 49 | - name: promtail 50 | - enable: True 51 | - require: 52 | - pkg: promtail 53 | - file: promtail_config 54 | # - group: loki_systemd_journal_member 55 | {%- endif %} 56 | -------------------------------------------------------------------------------- /salt/cluster_node/templates/aws_credentials_template.j2: -------------------------------------------------------------------------------- 1 | {%- set aws_access_key_id = salt['cmd.run']('sed -En "s/aws_access_key_id[[:blank:]]*=[[:blank:]]*(.*)/\\\\1/p" ~'~grains['username']~'/.aws/config', python_shell=true) %} 2 | {%- set aws_secret_access_key = salt['cmd.run']('sed -En "s/aws_secret_access_key[[:blank:]]*=[[:blank:]]*(.*)/\\\\1/p" ~'~grains['username']~'/.aws/config', python_shell=true) %} 3 | [profile {{ grains['aws_cluster_profile'] }}] 4 | region = {{ grains['region'] }} 5 | output = text 6 | aws_access_key_id = {{ aws_access_key_id }} 7 | aws_secret_access_key = {{ aws_secret_access_key }} 8 | -------------------------------------------------------------------------------- /salt/cluster_node/templates/promtail.yaml.j2: -------------------------------------------------------------------------------- 1 | server: 2 | http_listen_port: 9080 3 | grpc_listen_port: 0 4 | 5 | positions: 6 | filename: /tmp/promtail-positions.yaml 7 | 8 | clients: 9 | - url: http://{{ grains['monitoring_srv_ip'] }}:3100/loki/api/v1/push 10 | 11 | scrape_configs: 12 | - job_name: journal 13 | journal: 14 | labels: 15 | job: systemd-journal 16 | relabel_configs: 17 | - source_labels: 18 | - __journal__systemd_unit 19 | target_label: systemd_unit 20 | - source_labels: 21 | - __journal__hostname 22 | target_label: hostname 23 | - source_labels: 24 | - __journal_syslog_identifier 25 | target_label: syslog_identifier 26 | -------------------------------------------------------------------------------- /salt/default/hostname.sls: -------------------------------------------------------------------------------- 1 | # set the hostname in the kernel, this is needed for Red Hat systems 2 | # and does not hurt in others 3 | kernel_hostname: 4 | cmd.run: 5 | - name: sysctl kernel.hostname={{ grains['hostname'] }} 6 | - unless: sysctl --values kernel.hostname | grep -w {{ grains['hostname'] }} 7 | 8 | # set the hostname in userland. There is no consensus among distros 9 | # but Debian prefers the short name, SUSE demands the short name, 10 | # Red Hat suggests the FQDN but works with the short name. 11 | # Bottom line: short name is used here 12 | temporary_hostname: 13 | cmd.run: 14 | {% if grains['init'] == 'systemd' %} 15 | - name: hostnamectl set-hostname {{ grains['hostname'] }} 16 | {% else %} 17 | - name: hostname {{ grains['hostname'] }} 18 | {% endif %} 19 | 20 | # set the hostname in the filesystem, matching the temporary hostname 21 | permanent_hostname: 22 | file.managed: 23 | - name: /etc/hostname 24 | - contents: {{ grains['hostname'] }} 25 | 26 | # /etc/HOSTNAME is supposed to always contain the FQDN 27 | legacy_permanent_hostname: 28 | file.managed: 29 | - name: /etc/HOSTNAME 30 | - follow_symlinks: False 31 | - contents: {{ grains['hostname'] }}.{{ grains['network_domain'] }} 32 | -------------------------------------------------------------------------------- /salt/default/init.sls: -------------------------------------------------------------------------------- 1 | include: 2 | - default.hostname 3 | - default.pkgs 4 | {% if grains['provider'] == 'libvirt' %} 5 | - default.timezone 6 | {% endif %} 7 | -------------------------------------------------------------------------------- /salt/default/pkgs.sls: -------------------------------------------------------------------------------- 1 | {% if grains['additional_packages'] %} 2 | install_additional_packages: 3 | pkg.latest: 4 | - pkgs: 5 | {% for package in grains['additional_packages'] %} 6 | - {{ package }} 7 | {% endfor %} 8 | {% endif %} 9 | -------------------------------------------------------------------------------- /salt/default/timezone.sls: -------------------------------------------------------------------------------- 1 | timezone_package: 2 | pkg.installed: 3 | {% if grains['os_family'] == 'Suse' %} 4 | - name: timezone 5 | {% else %} 6 | - name: tzdata 7 | {% endif %} 8 | - retry: 9 | attempts: 3 10 | interval: 15 11 | 12 | timezone_symlink: 13 | file.symlink: 14 | - name: /etc/localtime 15 | - target: /usr/share/zoneinfo/{{ grains['timezone'] }} 16 | - force: true 17 | - require: 18 | - pkg: timezone_package 19 | 20 | timezone_setting: 21 | timezone.system: 22 | - name: {{ grains['timezone'] }} 23 | - utc: True 24 | - require: 25 | - file: timezone_symlink 26 | -------------------------------------------------------------------------------- /salt/drbd_node/custom_handlers.sls: -------------------------------------------------------------------------------- 1 | # install the custom handler for splitbrain 2 | /usr/lib/drbd/notify-split-brain-haclusterexporter-suse-metric.sh: 3 | file.managed: 4 | - source: salt://drbd/templates/ha_cluster_exporter/notify-split-brain-haclusterexporter-suse-metric.sh 5 | - mode: "0744" 6 | - makedirs: True 7 | - require: 8 | - pkg: drbd-formula 9 | -------------------------------------------------------------------------------- /salt/drbd_node/drbd_packages.sls: -------------------------------------------------------------------------------- 1 | drbd-kmp-default: 2 | pkg.installed: 3 | - retry: 4 | attempts: 3 5 | interval: 15 6 | 7 | drbd-formula: 8 | pkg.installed: 9 | - retry: 10 | attempts: 3 11 | interval: 15 12 | 13 | parted_package: 14 | pkg.installed: 15 | - name: parted 16 | - retry: 17 | attempts: 3 18 | interval: 15 19 | 20 | nfs_packages: 21 | pkg.installed: 22 | - name: nfs-kernel-server 23 | - retry: 24 | attempts: 3 25 | interval: 15 26 | -------------------------------------------------------------------------------- /salt/drbd_node/init.sls: -------------------------------------------------------------------------------- 1 | include: 2 | - drbd_node.drbd_packages 3 | - drbd_node.parted 4 | - drbd_node.custom_handlers 5 | - drbd_node.nfs 6 | -------------------------------------------------------------------------------- /salt/drbd_node/nfs.sls: -------------------------------------------------------------------------------- 1 | create_nfs_folder: 2 | file.directory: 3 | - name: {{ grains['nfs_mounting_point'] }} 4 | - user: root 5 | - mode: "0755" 6 | - makedirs: True 7 | 8 | 9 | configure_nfs: 10 | nfs_export.present: 11 | - name: {{ grains['nfs_mounting_point'] }} 12 | - hosts: '*' 13 | - options: 14 | - rw 15 | - no_root_squash 16 | - fsid=0 17 | - no_subtree_check 18 | - require: 19 | - create_nfs_folder 20 | -------------------------------------------------------------------------------- /salt/drbd_node/parted.sls: -------------------------------------------------------------------------------- 1 | # ephemeral devices will be automatically mounted on openstack 2 | # syncing the drbd device will fail in this case 3 | {% if grains['provider'] == 'openstack' %} 4 | not_in_fstab: 5 | mount.fstab_absent: 6 | - name: {{ grains['drbd_disk_device'] }} 7 | - fs_file: /mnt 8 | 9 | not_mounted: 10 | mount.unmounted: 11 | - name: /mnt 12 | - device: {{ grains['drbd_disk_device'] }} 13 | {% endif %} 14 | 15 | mklabel_drbd: 16 | module.run: 17 | - partition.mklabel: 18 | - device: {{ grains['drbd_disk_device'] }} 19 | - label_type: gpt 20 | 21 | {% for id, data in grains['partitions'].items() %} 22 | mkpart_{{ id }}: 23 | module.run: 24 | - partition.mkpart: 25 | - device: {{ grains['drbd_disk_device'] }} 26 | - part_type: primary 27 | - fs_type: ext3 28 | - start: {{ data['start'] }} 29 | - end: {{ data['end'] }} 30 | 31 | partition_alignment_{{ id }}: 32 | module.run: 33 | - partition.align_check: 34 | - device: {{ grains['drbd_disk_device'] }} 35 | - part_type: optimal 36 | - partition: {{ id }} 37 | {% endfor %} 38 | -------------------------------------------------------------------------------- /salt/hana_node/download_hana_inst.sls: -------------------------------------------------------------------------------- 1 | {% if grains['provider'] == 'aws' %} 2 | download_files_from_s3: 3 | cmd.run: 4 | - name: "aws s3 sync {{ grains['hana_inst_master'] }} {{ grains['hana_inst_folder'] }} --region {{ grains['region'] }} --only-show-errors" 5 | - onlyif: "aws s3 sync --dryrun {{ grains['hana_inst_master'] }} {{ grains['hana_inst_folder'] }} --region {{ grains['region'] }} | grep download" 6 | - output_loglevel: quiet 7 | - hide_output: True 8 | 9 | {% elif grains['provider'] == 'gcp' %} 10 | 11 | hana_inst_directory: 12 | file.directory: 13 | - name: {{ grains['hana_inst_folder'] }} 14 | - user: root 15 | - mode: "0755" 16 | - makedirs: True 17 | 18 | {% from 'macros/download_from_google_storage.sls' import download_from_google_storage with context %} 19 | 20 | {{ download_from_google_storage( 21 | grains['gcp_credentials_file'], 22 | grains['hana_inst_master'], 23 | grains['hana_inst_folder']) }} 24 | 25 | {% endif %} 26 | 27 | {{ grains['hana_inst_folder'] }}: 28 | file.directory: 29 | - user: root 30 | - group: root 31 | - dir_mode: "0755" 32 | - file_mode: "0755" 33 | - recurse: 34 | - user 35 | - group 36 | - mode 37 | -------------------------------------------------------------------------------- /salt/hana_node/hana_inst_media.sls: -------------------------------------------------------------------------------- 1 | nfs-client: 2 | pkg.installed: 3 | - retry: 4 | attempts: 3 5 | interval: 15 6 | 7 | hana_inst_directory: 8 | file.directory: 9 | - name: {{ grains['hana_inst_folder'] }} 10 | - mode: "0755" 11 | - makedirs: True 12 | {% if grains.get('provider') in ['libvirt', 'openstack'] %} 13 | mount.mounted: 14 | - name: {{ grains['hana_inst_folder'] }} 15 | - device: {{ grains['hana_inst_master'] }} 16 | - fstype: nfs 17 | - mkmnt: True 18 | - persist: True 19 | - opts: tcp 20 | - required: 21 | - nfs-client 22 | {% else %} 23 | mount.mounted: 24 | - name: {{ grains['hana_inst_folder'] }} 25 | - device: {{ grains['hana_inst_master'] }} 26 | - fstype: cifs 27 | - mkmnt: True 28 | - persist: True 29 | - opts: vers=3.0,username={{ grains['storage_account_name'] }},password={{ grains['storage_account_key'] }},dir_mode=0755,file_mode=0755,sec=ntlmssp 30 | - required: 31 | - nfs-client 32 | {% endif %} 33 | -------------------------------------------------------------------------------- /salt/hana_node/hana_packages.sls: -------------------------------------------------------------------------------- 1 | {% if grains.get('offline_mode') %} 2 | {% if grains['pythonversion'][0] == 2 %} 3 | python-shaptools: 4 | {% else %} 5 | python3-shaptools: 6 | {% endif %} 7 | pkg.installed: 8 | - resolve_capabilities: true 9 | - retry: 10 | attempts: 3 11 | interval: 15 12 | {% endif %} 13 | 14 | saphanabootstrap-formula: 15 | pkg.installed: 16 | - retry: 17 | attempts: 3 18 | interval: 15 19 | -------------------------------------------------------------------------------- /salt/hana_node/init.sls: -------------------------------------------------------------------------------- 1 | include: 2 | {% if grains['provider'] in ('aws', 'gcp',) %} 3 | - hana_node.download_hana_inst 4 | {% else %} 5 | - hana_node.hana_inst_media 6 | {% endif %} 7 | - hana_node.mount 8 | - hana_node.hana_packages 9 | {% if grains['cluster_ssh_pub'] is defined and grains['cluster_ssh_key'] is defined %} 10 | - hana_node.wait 11 | {% endif %} 12 | -------------------------------------------------------------------------------- /salt/hana_node/mount/init.sls: -------------------------------------------------------------------------------- 1 | include: 2 | - hana_node.mount.packages 3 | {%- if grains['provider'] in ['aws', 'azure', 'gcp', 'libvirt', 'openstack'] %} 4 | - hana_node.mount.lvm 5 | {% else %} 6 | - hana_node.mount.mount 7 | {% endif %} 8 | {%- if grains['hana_scale_out_enabled'] %} 9 | {%- if grains['hana_scale_out_shared_storage_type'] in ['anf', 'efs', 'filestore', 'nfs'] %} 10 | - shared_storage.nfs 11 | {%- endif %} 12 | {%- endif %} 13 | -------------------------------------------------------------------------------- /salt/hana_node/mount/mount.sls: -------------------------------------------------------------------------------- 1 | {% if grains['provider'] == 'aws' %} 2 | {% set devicepartprefix = 'p' %} 3 | {% else %} 4 | {% set devicepartprefix = '' %} 5 | {% endif %} 6 | 7 | hana_partition: 8 | cmd.run: 9 | - name: | 10 | /usr/sbin/parted -s {{ grains['hana_disk_device'] }} mklabel msdos && \ 11 | /usr/sbin/parted -s {{ grains['hana_disk_device'] }} mkpart primary ext2 1M 100% && sleep 1 && \ 12 | /sbin/mkfs -t {{ grains['hana_fstype'] }} {{ grains['hana_disk_device'] }}{{ devicepartprefix }}1 13 | - unless: ls {{ grains['hana_disk_device'] }}{{ devicepartprefix }}1 14 | - require: 15 | - pkg: parted 16 | 17 | hana_directory: 18 | file.directory: 19 | - name: /hana 20 | - user: root 21 | - mode: "0755" 22 | - makedirs: True 23 | mount.mounted: 24 | - name: /hana 25 | - device: {{ grains['hana_disk_device'] }}{{ devicepartprefix }}1 26 | - fstype: {{ grains['hana_fstype'] }} 27 | - mkmnt: True 28 | - persist: True 29 | - opts: 30 | - defaults 31 | - require: 32 | - cmd: hana_partition 33 | -------------------------------------------------------------------------------- /salt/hana_node/mount/mount_uuid.sls: -------------------------------------------------------------------------------- 1 | {% set data = salt['pillar.get']('data') %} 2 | {% set uuid = salt['disk.blkid'](data.device)[data.device]['UUID'] %} 3 | 4 | {{ data.device }}_directory_mount_azure: 5 | file.directory: 6 | - name: {{ data.path }} 7 | - user: root 8 | - mode: "0755" 9 | - makedirs: True 10 | mount.mounted: 11 | - name: {{ data.path }} 12 | - device: UUID={{ uuid }} 13 | - fstype: {{ data.fstype }} 14 | - mkmnt: True 15 | - persist: True 16 | - opts: defaults,nofail 17 | - pass_num: 2 18 | -------------------------------------------------------------------------------- /salt/hana_node/mount/packages.sls: -------------------------------------------------------------------------------- 1 | parted: 2 | pkg.installed: 3 | - retry: 4 | attempts: 3 5 | interval: 15 6 | -------------------------------------------------------------------------------- /salt/hana_node/wait.sls: -------------------------------------------------------------------------------- 1 | # Make sure all hana nodes are reachable via ssh and finished pre-deployment (cluster authorized_keys). 2 | # This is very important for scale-out setups as HANA deployment is done via ssh. 3 | # It also prevents other timing race conditions. 4 | {%- for num in range(1,grains['node_count']) %} 5 | {%- set node = grains['name_prefix'] ~ '%02d' % num %} 6 | wait_until_ssh_is_ready_{{ node }}: 7 | cmd.run: 8 | - name: until ssh -o ConnectTimeout=3 -o PreferredAuthentications=publickey {{ node }} "rpm -q saphanabootstrap-formula";do sleep 30;done 9 | - output_loglevel: quiet 10 | - timeout: 1200 11 | {%- endfor %} 12 | 13 | {%- if grains['hana_scale_out_enabled'] %} 14 | {%- set node = grains['name_prefix'] ~ 'mm' %} 15 | wait_until_ssh_is_ready_{{ node }}: 16 | cmd.run: 17 | - name: until ssh -o ConnectTimeout=3 -o PreferredAuthentications=publickey {{ node }} "rpm -q saphanabootstrap-formula";do sleep 30;done 18 | - output_loglevel: quiet 19 | - timeout: 1200 20 | {%- endif %} 21 | -------------------------------------------------------------------------------- /salt/hwcct/files/hwcct/hwcct_bench.jinja: -------------------------------------------------------------------------------- 1 | {% set data = pillar.cluster.configure.template.parameters %} 2 | {% set sid = data.sid.upper() %} 3 | {% set instance = '{:0>2}'.format(data.instance) %} 4 | 5 | #!/bin/sh 6 | 7 | # Extract and execute HWCCT 8 | 9 | typeset -r HWCCTCONFIG=/srv/salt/hwcct/files/hwcct/hwcct_config.json 10 | 11 | # Extracting HWCCT tool 12 | /usr/sap/{{ sid }}/HDB{{ instance }}/exe/SAPCAR -xf {{ hana_inst_folder }}/DATA_UNITS/SAP_HANA_HWCCT_LINUX_X86_64/HWCCT.SAR 13 | 14 | # Executing HWCCT tool 15 | cd hardwareConfigurationCheckTool 16 | source ./envprofile.sh && ./hwcct -f ${HWCCTCONFIG} 17 | -------------------------------------------------------------------------------- /salt/hwcct/files/hwcct/hwcct_config.json.jinja: -------------------------------------------------------------------------------- 1 | { 2 | "use_hdb":false, 3 | "blades":["localhost"], 4 | "tests": [{ 5 | "package": "FilesystemTest", 6 | "test_timeout": 0, 7 | "id": 1, 8 | "config": { 9 | "mount":{"{{ grains['host'] }}":["/hana"]}, 10 | "duration":"short" 11 | }, 12 | "class": "DataVolumeIO" 13 | }, 14 | { 15 | "package": "FilesystemTest", 16 | "test_timeout": 0, 17 | "id": 2, 18 | "config": { 19 | "mount":{"{{ grains['host'] }}":["/hana"]}, 20 | "duration":"short" 21 | }, 22 | "class": "LogVolumeIO" 23 | }], 24 | "output_dir":"/root/hwcct_out" 25 | } 26 | -------------------------------------------------------------------------------- /salt/hwcct/init.sls: -------------------------------------------------------------------------------- 1 | {% if grains.get('hwcct') == true and 'hana01' in grains['hostname'] %} 2 | hwcct-config-file: 3 | file.managed: 4 | - template: jinja 5 | - names: 6 | - /srv/salt/hwcct/files/hwcct/hwcct_config.json: 7 | - source: salt://hwcct/files/hwcct/hwcct_config.json.jinja 8 | 9 | hwcct-bench-file: 10 | file.managed: 11 | - template: jinja 12 | - names: 13 | - /srv/salt/hwcct/files/hwcct/hwcct_bench.sh: 14 | - source: salt://hwcct/files/hwcct/hwcct_bench.jinja 15 | 16 | hwcct: 17 | cmd.run: 18 | - name: sh /srv/salt/hwcct/files/hwcct/hwcct_bench.sh 19 | - require: 20 | - file: hwcct-config-file 21 | - file: hwcct-bench-file 22 | {% else %} 23 | # Do nothing if 'hwcct=false' 24 | default_nop: 25 | test.nop: [] 26 | {% endif %} 27 | -------------------------------------------------------------------------------- /salt/iscsi_srv/init.sls: -------------------------------------------------------------------------------- 1 | include: 2 | - iscsi_srv.parted 3 | - iscsi.target 4 | -------------------------------------------------------------------------------- /salt/iscsi_srv/parted.sls: -------------------------------------------------------------------------------- 1 | mklabel: 2 | module.run: 3 | - partition.mklabel: 4 | - device: {{ grains['iscsidev'] }} 5 | - label_type: gpt 6 | 7 | {% for id, data in grains['partitions'].items() %} 8 | mkpart{{ id }}: 9 | module.run: 10 | - partition.mkpart: 11 | - device: {{ grains['iscsidev'] }} 12 | - part_type: primary 13 | - fs_type: ext2 14 | - start: {{ data['start'] }} 15 | - end: {{ data['end'] }} 16 | 17 | partition_alignment_{{ id }}: 18 | module.run: 19 | - partition.align_check: 20 | - device: {{ grains['iscsidev'] }} 21 | - part_type: optimal 22 | - partition: {{ id }} 23 | {% endfor %} 24 | -------------------------------------------------------------------------------- /salt/macros/download_from_google_storage.sls: -------------------------------------------------------------------------------- 1 | # Install and configure gcloud to download files from google storage accounts 2 | # https://cloud.google.com/sdk/install 3 | {% macro download_from_google_storage(credentials_file, bucket_path, dest_folder) -%} 4 | {% set gcloud_inst_dir = '/opt' %} 5 | {% set gcloud_dir = gcloud_inst_dir~'/google-cloud-sdk' %} 6 | {% set gcloud_bin_dir = '/usr/local/bin' %} 7 | 8 | # Fix for https://github.com/SUSE/ha-sap-terraform-deployments/issues/669 9 | # gcloud and gsutil don't support python3.4 usage 10 | {%- set python3_version = salt['cmd.run']('python3 --version').split(' ')[1] %} 11 | {%- if salt['pkg.version_cmp'](python3_version, '3.5') < 0 %} 12 | {%- set use_py2 = true %} 13 | {%- else %} 14 | {%- set use_py2 = false %} 15 | {%- endif %} 16 | 17 | install_gcloud: 18 | cmd.run: 19 | - name: export CLOUDSDK_PYTHON; curl https://sdk.cloud.google.com | bash -s -- '--disable-prompts' '--install-dir={{ gcloud_inst_dir }}' 20 | {%- if use_py2 %} 21 | - env: 22 | - CLOUDSDK_PYTHON: python2.7 23 | {%- endif %} 24 | - unless: ls {{ gcloud_dir }} 25 | 26 | /etc/profile.d/google-cloud-sdk.completion.sh: 27 | file.symlink: 28 | - target: {{ gcloud_dir }}/completion.bash.inc 29 | 30 | {{ gcloud_bin_dir }}/gcloud: 31 | file.symlink: 32 | - target: {{ gcloud_dir }}/bin/gcloud 33 | 34 | {{ gcloud_bin_dir }}/gsutil: 35 | file.symlink: 36 | - target: {{ gcloud_dir }}/bin/gsutil 37 | 38 | configure_gcloud_credentials: 39 | cmd.run: 40 | - name: {{ gcloud_bin_dir }}/gcloud auth activate-service-account --key-file {{ credentials_file }} 41 | {%- if use_py2 %} 42 | - env: 43 | - CLOUDSDK_PYTHON: python2.7 44 | {%- endif %} 45 | - require: 46 | - install_gcloud 47 | 48 | # We cannot just use path_join as converts '//' to '/' 49 | {% set bucket_path = bucket_path.replace('gs://', '') %} 50 | {% set gs_url = 'gs://'~(bucket_path | path_join('*')) %} 51 | 52 | download_files_from_gcp: 53 | cmd.run: 54 | - name: {{ gcloud_bin_dir }}/gsutil -m cp -r {{ gs_url }} {{ dest_folder }} 55 | - output_loglevel: quiet 56 | - hide_output: True 57 | {%- if use_py2 %} 58 | - env: 59 | - CLOUDSDK_PYTHON: python2.7 60 | {%- endif %} 61 | - require: 62 | - install_gcloud 63 | 64 | {%- endmacro %} 65 | -------------------------------------------------------------------------------- /salt/majority_maker_node/init.sls: -------------------------------------------------------------------------------- 1 | include: 2 | {% if grains['hana_scale_out_enabled']|default(false) %} 3 | - hana_node.hana_packages 4 | {% if grains['cluster_ssh_pub'] is defined and grains['cluster_ssh_key'] is defined %} 5 | - hana_node.wait 6 | {% endif %} 7 | {% endif %} 8 | -------------------------------------------------------------------------------- /salt/monitoring_srv/grafana.sls: -------------------------------------------------------------------------------- 1 | grafana: 2 | pkg.installed: 3 | - name: grafana 4 | - retry: 5 | attempts: 3 6 | interval: 15 7 | 8 | grafana_anonymous_login_configuration: 9 | file.line: 10 | - name: /etc/grafana/grafana.ini 11 | - mode: ensure 12 | - after: \[auth\.anonymous\] 13 | - content: enabled = true 14 | - require: 15 | - pkg: grafana 16 | 17 | grafana_provisioning_datasources: 18 | file.managed: 19 | - name: /etc/grafana/provisioning/datasources/datasources.yml 20 | - source: salt://monitoring_srv/grafana/datasources.yml.j2 21 | - template: jinja 22 | - makedirs: True 23 | - user: grafana 24 | - group: grafana 25 | - require: 26 | - pkg: grafana 27 | 28 | grafana_dashboards: 29 | pkg.installed: 30 | - pkgs: 31 | - grafana-ha-cluster-dashboards 32 | - grafana-sap-hana-dashboards 33 | - grafana-sap-netweaver-dashboards 34 | 35 | grafana_service: 36 | service.running: 37 | - name: grafana-server 38 | - enable: True 39 | - require: 40 | - pkg: grafana 41 | - pkg: grafana_dashboards 42 | - file: grafana_anonymous_login_configuration 43 | - file: grafana_provisioning_datasources 44 | -------------------------------------------------------------------------------- /salt/monitoring_srv/grafana/datasources.yml.j2: -------------------------------------------------------------------------------- 1 | # config file version 2 | apiVersion: 1 3 | 4 | datasources: 5 | - name: Prometheus 6 | type: prometheus 7 | access: proxy 8 | url: http://{{ grains['public_ip'] }}:9090 9 | basicAuth: False 10 | isDefault: True 11 | editable: true 12 | jsonData: 13 | timeInterval: 5s 14 | 15 | - name: Loki 16 | type: loki 17 | access: proxy 18 | url: http://localhost:3100 19 | -------------------------------------------------------------------------------- /salt/monitoring_srv/init.sls: -------------------------------------------------------------------------------- 1 | include: 2 | - .prometheus 3 | - .grafana 4 | - .loki 5 | -------------------------------------------------------------------------------- /salt/monitoring_srv/loki.sls: -------------------------------------------------------------------------------- 1 | loki: 2 | pkg.installed: 3 | - name: loki 4 | - retry: 5 | attempts: 3 6 | interval: 15 7 | 8 | loki_service: 9 | service.running: 10 | - name: loki 11 | - enable: True 12 | - require: 13 | - pkg: loki 14 | -------------------------------------------------------------------------------- /salt/monitoring_srv/prometheus.sls: -------------------------------------------------------------------------------- 1 | prometheus: 2 | pkg.installed: 3 | - name: golang-github-prometheus-prometheus 4 | - retry: 5 | attempts: 3 6 | interval: 15 7 | 8 | prometheus_alerts: 9 | file.managed: 10 | - name: /etc/prometheus/rules.yml 11 | - source: salt://monitoring_srv/prometheus/rules.yml 12 | - require: 13 | - pkg: prometheus 14 | 15 | prometheus_configuration: 16 | file.managed: 17 | - name: /etc/prometheus/prometheus.yml 18 | - source: salt://monitoring_srv/prometheus/prometheus.yml.j2 19 | - template: jinja 20 | - require: 21 | - pkg: prometheus 22 | 23 | prometheus_service: 24 | service.running: 25 | - name: prometheus 26 | - enable: True 27 | - require: 28 | - pkg: golang-github-prometheus-prometheus 29 | - file: prometheus_configuration 30 | - file: prometheus_alerts 31 | - watch: 32 | - file: prometheus_configuration 33 | - file: prometheus_alerts 34 | 35 | prometheus-alertmanager: 36 | pkg.installed: 37 | - names: 38 | - golang-github-prometheus-alertmanager 39 | - enable: True 40 | - reload: True 41 | - require: 42 | - service: prometheus_service 43 | - file: prometheus_configuration 44 | - file: prometheus_alerts 45 | - watch: 46 | - file: prometheus_configuration 47 | - file: prometheus_alerts 48 | - retry: 49 | attempts: 3 50 | interval: 15 51 | 52 | prometheus-alertmanager_service: 53 | service.running: 54 | - name: prometheus-alertmanager 55 | - enable: True 56 | - require: 57 | - pkg: golang-github-prometheus-alertmanager 58 | - service: prometheus_service 59 | - file: prometheus_configuration 60 | - file: prometheus_alerts 61 | - watch: 62 | - file: prometheus_configuration 63 | - file: prometheus_alerts 64 | -------------------------------------------------------------------------------- /salt/monitoring_srv/prometheus/prometheus.yml.j2: -------------------------------------------------------------------------------- 1 | # Sample config for Prometheus. 2 | global: 3 | scrape_interval: 5s 4 | scrape_timeout: 5s 5 | evaluation_interval: 5s 6 | 7 | alerting: 8 | alertmanagers: 9 | - static_configs: 10 | - targets: 11 | - localhost:9093 12 | 13 | rule_files: 14 | - /etc/prometheus/rules.yml 15 | 16 | scrape_configs: 17 | # we use job_name to group exporters for each cluster 18 | 19 | {% if grains.get('hana_targets', [])|length > 0 %} 20 | - job_name: hana 21 | # The HANA scrapping follows a different scrapping time to reduce the execution load into the database 22 | # This time was based on users feedback, but should be set accordingly with your environment needs. 23 | scrape_interval: 30s 24 | scrape_timeout: 30s 25 | static_configs: 26 | - targets: 27 | {%- for ip in grains['hana_targets'] %} 28 | - "{{ ip }}:9100" # node_exporter 29 | {%- endfor %} 30 | {%- for ip in grains['hana_targets_ha'] %} 31 | - "{{ ip }}:9664" # ha_cluster_exporter 32 | {%- endfor %} 33 | {%- for ip in grains['hana_targets_vip'] %} 34 | - "{{ ip }}:9668" # hanadb_exporter 35 | {%- endfor %} 36 | {%- endif %} 37 | 38 | {%- if grains.get('drbd_targets', [])|length > 0 %} 39 | - job_name: drbd 40 | static_configs: 41 | - targets: 42 | {%- for ip in grains['drbd_targets'] %} 43 | - "{{ ip }}:9100" # node_exporter 44 | {%- endfor %} 45 | {%- for ip in grains['drbd_targets_ha'] %} 46 | - "{{ ip }}:9664" # ha_cluster_exporter 47 | {%- endfor %} 48 | {%- endif %} 49 | 50 | {%- if grains.get('netweaver_targets', [])|length > 0 %} 51 | - job_name: netweaver 52 | static_configs: 53 | - targets: 54 | {%- for ip in grains['netweaver_targets'] %} 55 | - "{{ ip }}:9100" # node_exporter 56 | {%- endfor %} 57 | {%- for ip in grains['netweaver_targets_ha'] %} 58 | - "{{ ip }}:9664" # ha_cluster_exporter 59 | {%- endfor %} 60 | {%- for ip in grains['netweaver_targets_vip'] %} 61 | - "{{ ip }}:9680" # sap_host_exporter 62 | {%- endfor %} 63 | {%- endif %} 64 | -------------------------------------------------------------------------------- /salt/netweaver_node/init.sls: -------------------------------------------------------------------------------- 1 | include: 2 | - netweaver_node.mount 3 | - netweaver_node.nfs 4 | - netweaver_node.netweaver_packages 5 | - netweaver_node.installation_files 6 | -------------------------------------------------------------------------------- /salt/netweaver_node/mount/azure.sls: -------------------------------------------------------------------------------- 1 | {% if grains['additional_lun'] is not none %} 2 | {% set lun_disk = salt['cmd.run']('readlink /dev/disk/azure/scsi1/lun'~grains['additional_lun']).split('/')[-1] %} 3 | {% set real_path = '/dev/'~lun_disk %} 4 | {% set part_path = real_path~'1' %} 5 | 6 | run_fdisk: 7 | cmd.run: 8 | - name: echo -e "n\np\n1\n\n\nw" | fdisk {{ real_path }} 9 | - unless: blkid {{ part_path }} 10 | 11 | format_disk: 12 | cmd.run: 13 | - name: /sbin/mkfs -t xfs {{ part_path }} 14 | - unless: blkid {{ part_path }} | grep ' \+\UUID' 15 | - require: 16 | - run_fdisk 17 | 18 | # This state mounts the new disk using the UUID, as we need to get this value running blkid after the 19 | # previous command, we need to run it as a new state execution 20 | mount_{{ real_path }}: 21 | module.run: 22 | - state.sls: 23 | - mods: 24 | - hana_node.mount.mount_uuid 25 | - pillar: 26 | data: 27 | device: {{ part_path }} 28 | path: /usr/sap 29 | fstype: xfs 30 | - require: 31 | - format_disk 32 | {% endif %} 33 | -------------------------------------------------------------------------------- /salt/netweaver_node/mount/init.sls: -------------------------------------------------------------------------------- 1 | include: 2 | - netweaver_node.mount.packages 3 | {% if grains['provider'] == 'azure' %} 4 | - netweaver_node.mount.azure 5 | {% endif %} 6 | -------------------------------------------------------------------------------- /salt/netweaver_node/mount/packages.sls: -------------------------------------------------------------------------------- 1 | parted: 2 | pkg.installed: 3 | - retry: 4 | attempts: 3 5 | interval: 15 6 | -------------------------------------------------------------------------------- /salt/netweaver_node/netweaver_packages.sls: -------------------------------------------------------------------------------- 1 | sapnwbootstrap-formula: 2 | pkg.installed: 3 | - retry: 4 | attempts: 3 5 | interval: 15 6 | -------------------------------------------------------------------------------- /salt/nfs_srv/directories.sls: -------------------------------------------------------------------------------- 1 | {% set basedir = grains['nfs_mounting_point'] %} 2 | 3 | # create directories for HANA scale-out deployment 4 | {% if grains['hana_scale_out_shared_storage_type'] == 'nfs' %} 5 | {% set hana_sid = grains['hana_sid'].upper() %} 6 | {% set hana_instance = '{:0>2}'.format(grains['hana_instance']) %} 7 | {% set mounts = ["data", "log", "backup", "shared"] %} 8 | {%- for site in [1,2] %} 9 | {%- for mount in mounts %} 10 | dir_{{ hana_sid }}_{{ hana_instance }}_site_{{ site }}_{{ mount }}: 11 | file.directory: 12 | - name: {{ basedir }}/{{ hana_sid }}/{{ hana_instance }}/site_{{ site }}/{{ mount }} 13 | - makedirs: True 14 | {% endfor %} 15 | {% endfor %} 16 | {% endif %} 17 | 18 | # create directories for Netweaver deployment 19 | {% if grains['netweaver_shared_storage_type'] == 'nfs' %} 20 | {% set netweaver_sid = grains['netweaver_sid'].upper() %} 21 | {% set netweaver_ascs_instance = '{:0>2}'.format(grains['netweaver_ascs_instance']) %} 22 | {% set netweaver_ers_instance = '{:0>2}'.format(grains['netweaver_ers_instance']) %} 23 | {% set netweaver_ascs_dir = 'ASCS' + netweaver_ascs_instance %} 24 | {% set netweaver_ers_dir = 'ERS' + netweaver_ers_instance %} 25 | {% set mounts = ["sapmnt", "usrsapsys", netweaver_ascs_dir, netweaver_ers_dir] %} 26 | {%- for mount in mounts %} 27 | dir_{{ netweaver_sid }}_{{ mount }}: 28 | file.directory: 29 | - name: {{ basedir }}/{{ netweaver_sid }}/{{ mount }} 30 | - makedirs: True 31 | {% endfor %} 32 | {% endif %} 33 | -------------------------------------------------------------------------------- /salt/nfs_srv/init.sls: -------------------------------------------------------------------------------- 1 | include: 2 | - nfs_srv.packages 3 | - nfs_srv.lvm 4 | - nfs_srv.directories 5 | - nfs_srv.nfs 6 | -------------------------------------------------------------------------------- /salt/nfs_srv/lvm.sls: -------------------------------------------------------------------------------- 1 | # exclude DVD drive and root disk 2 | {% set pvs = grains['disks']|reject('match', 'sr0')|reject('match', 'sda')|list %} 3 | {% set fstype = 'xfs' %} 4 | {% set basedir = grains['nfs_mounting_point'] %} 5 | 6 | # Create Physical volumes 7 | {% for pv in pvs %} 8 | nfs_lvm_pvcreate_{{ pv }}: 9 | lvm.pv_present: 10 | - name: /dev/{{ pv }} 11 | {% endfor %} 12 | 13 | # Create Volume group 14 | nfs_lvm_vgcreate_data: 15 | lvm.vg_present: 16 | - name: vg_nfs 17 | - devices: 18 | {% for pv in pvs %} 19 | - /dev/{{ pv }} 20 | {% endfor %} 21 | 22 | # Activate Volume group (may be needed after re-provisioning) 23 | nfs_lvm_vgactivate_data: 24 | cmd.run: 25 | - name: vgchange -ay vg_nfs 26 | - require: 27 | - lvm: nfs_lvm_vgcreate_data 28 | 29 | # Create Logical volume 30 | nfs_lvm_lvcreate_sapdata: 31 | lvm.lv_present: 32 | - name: lv_sapdata 33 | - vgname: vg_nfs 34 | - extents: 100%VG 35 | 36 | {% for vg in ['nfs'] %} 37 | 38 | {% if vg == 'nfs' %} 39 | {% set lvs = ['sapdata'] %} 40 | {% endif %} 41 | 42 | {% for lv in lvs %} 43 | 44 | # Format lvs 45 | nfs_format_lv_vg_{{ vg }}_lv_{{ lv }}: 46 | cmd.run: 47 | - name: | 48 | /sbin/mkfs -t {{ fstype }} /dev/vg_{{ vg }}/lv_{{ lv }} 49 | - unless: blkid /dev/mapper/vg_{{ vg }}-lv_{{ lv }} 50 | 51 | {% endfor %} 52 | {% endfor %} 53 | 54 | # Mount sapdata 55 | nfs_sapdata_directory_mount: 56 | file.directory: 57 | - name: {{ basedir }} 58 | - user: root 59 | - mode: "0755" 60 | - makedirs: True 61 | mount.mounted: 62 | - name: {{ basedir }} 63 | - device: /dev/vg_nfs/lv_sapdata 64 | - fstype: {{ fstype }} 65 | - mkmnt: True 66 | - persist: True 67 | - opts: defaults,nofail 68 | - pass_num: 2 69 | - require: 70 | - cmd: nfs_format_lv_vg_nfs_lv_sapdata 71 | 72 | -------------------------------------------------------------------------------- /salt/nfs_srv/nfs.sls: -------------------------------------------------------------------------------- 1 | create_nfs_folder: 2 | file.directory: 3 | - name: {{ grains['nfs_mounting_point'] }} 4 | - user: root 5 | - mode: "0755" 6 | - makedirs: True 7 | 8 | 9 | configure_nfs: 10 | nfs_export.present: 11 | - name: {{ grains['nfs_mounting_point'] }} 12 | - hosts: '*' 13 | - options: 14 | - rw 15 | - no_root_squash 16 | - fsid=0 17 | - no_subtree_check 18 | - require: 19 | - create_nfs_folder 20 | 21 | nfsserver: 22 | service: 23 | - running 24 | - enable: True 25 | - reload: True 26 | - watch: 27 | - configure_nfs 28 | -------------------------------------------------------------------------------- /salt/nfs_srv/packages.sls: -------------------------------------------------------------------------------- 1 | xfsprogs_package: 2 | pkg.installed: 3 | - name: xfsprogs 4 | - retry: 5 | attempts: 3 6 | interval: 15 7 | 8 | lvm2_package: 9 | pkg.installed: 10 | - name: lvm2 11 | - retry: 12 | attempts: 3 13 | interval: 15 14 | 15 | nfs_packages: 16 | pkg.installed: 17 | - name: nfs-kernel-server 18 | - retry: 19 | attempts: 3 20 | interval: 15 21 | -------------------------------------------------------------------------------- /salt/os_setup/auth_keys.sls: -------------------------------------------------------------------------------- 1 | {% if grains['authorized_keys'] %} 2 | authorized_keys: 3 | ssh_auth.present: 4 | - user: {{ grains['authorized_user'] }} 5 | - enc: ssh-rsa 6 | - names: 7 | {%- for key in grains['authorized_keys'] %} 8 | - {{ key }} 9 | {%- endfor %} 10 | {% endif %} 11 | -------------------------------------------------------------------------------- /salt/os_setup/init.sls: -------------------------------------------------------------------------------- 1 | include: 2 | {% if grains['provider'] == 'libvirt' %} 3 | - os_setup.ip_workaround 4 | {% endif %} 5 | - os_setup.auth_keys 6 | - os_setup.registration 7 | - os_setup.packages_repos 8 | - os_setup.minion_configuration 9 | - os_setup.requirements 10 | - os_setup.packages_install 11 | - os_setup.packages_update 12 | -------------------------------------------------------------------------------- /salt/os_setup/ip_workaround.sls: -------------------------------------------------------------------------------- 1 | enable_eth1: 2 | cmd.run: 3 | #- name: /sbin/ifconfig eth1 {{ grains['host_ip'] }} 4 | - name: /sbin/ip a add {{ grains['host_ip'] }}/24 dev eth1 & /sbin/ip link set eth1 up 5 | 6 | /etc/sysconfig/network/ifcfg-eth1: 7 | file.managed: 8 | - contents: | 9 | STARTMODE=onboot 10 | BOOTPROTO=static 11 | IPADDR={{ grains['host_ip'] }}/24 12 | -------------------------------------------------------------------------------- /salt/os_setup/minion_configuration.sls: -------------------------------------------------------------------------------- 1 | # The base environment should already come with the salt-standalone-formulas-configuration >= 3002.2-46.1 rpm. 2 | # We set it here anyway to be more resilient. 3 | /etc/salt/minion.d/environment_base.conf: 4 | file.managed: 5 | - contents: | 6 | file_roots: 7 | base: 8 | - /srv/salt 9 | - /usr/share/salt-formulas/states 10 | 11 | /etc/salt/minion.d/environment_predeployment.conf: 12 | file.managed: 13 | - contents: | 14 | file_roots: 15 | predeployment: 16 | - /srv/salt 17 | - /usr/share/salt-formulas/states 18 | 19 | /etc/salt/minion.d/environment_postdeployment.conf: 20 | file.managed: 21 | - contents: | 22 | file_roots: 23 | postdeployment: 24 | - /srv/salt 25 | - /usr/share/salt-formulas/states 26 | 27 | # prevent "[WARNING ] top_file_merging_strategy is set to 'merge' and multiple top files were found." 28 | /etc/salt/minion.d/top_file_merging_strategy.conf: 29 | file.managed: 30 | - contents: | 31 | top_file_merging_strategy: same 32 | 33 | # Old module.run style will be deprecated after sodium release 34 | /etc/salt/minion.d/use_superseded.conf: 35 | file.managed: 36 | - contents: | 37 | use_superseded: 38 | - module.run 39 | 40 | minion_service: 41 | service.dead: 42 | - name: salt-minion 43 | - enable: False 44 | -------------------------------------------------------------------------------- /salt/os_setup/packages_install.sls: -------------------------------------------------------------------------------- 1 | # check if iscsi-formula should be installed 2 | {%- if grains.get('role') == "iscsi_srv" %} 3 | iscsi-formula: 4 | pkg.installed: 5 | - retry: 6 | attempts: 3 7 | interval: 15 8 | {%- endif %} 9 | 10 | # iscsi kernel modules are not available in kernel-default-base 11 | kernel-default-base: 12 | pkg.removed: 13 | - retry: 14 | attempts: 3 15 | interval: 15 16 | 17 | kernel-default: 18 | pkg.installed: 19 | - retry: 20 | attempts: 3 21 | interval: 15 22 | # install kernel-default if kernel-default-base is installed, do not touch otherwise 23 | - require: 24 | - pkg: kernel-default-base 25 | -------------------------------------------------------------------------------- /salt/os_setup/packages_repos.sls: -------------------------------------------------------------------------------- 1 | {% if grains['os_family'] == 'Suse' %} 2 | 3 | {% if grains['ha_sap_deployment_repo'] %} 4 | {% if 'SLE_' in grains['ha_sap_deployment_repo'] %} 5 | {% set repository = grains['ha_sap_deployment_repo'] %} 6 | {% else %} 7 | {% set sle_version = 'SLE_'~grains['osrelease_info'][0] %} 8 | {% set sle_version = sle_version~'_SP'~grains['osrelease_info'][1] if grains['osrelease_info']|length > 1 else sle_version %} 9 | {% set repository = grains['ha_sap_deployment_repo']~"/"~sle_version %} 10 | {% endif %} 11 | allow_all_vendor_changes: 12 | file.append: 13 | - name: /etc/zypp/zypp.conf 14 | - text: solver.allowVendorChange = true 15 | 16 | ha_sap_deployments_repo: 17 | pkgrepo.managed: 18 | - name: ha_sap_deployments 19 | - baseurl: {{ repository }} 20 | 21 | set_priority_ha_sap_deployments_repo: 22 | cmd.run: 23 | - name: zypper mr -p 90 ha_sap_deployments 24 | - require: 25 | - ha_sap_deployments_repo 26 | {% endif %} 27 | 28 | refresh_repos_after_registration: 29 | cmd.run: 30 | - name: zypper --non-interactive --gpg-auto-import-keys refresh 31 | - retry: 32 | attempts: 3 33 | interval: 15 34 | - onlyif: 'zypper lr' 35 | 36 | {% endif %} 37 | -------------------------------------------------------------------------------- /salt/os_setup/packages_update.sls: -------------------------------------------------------------------------------- 1 | {% if grains['os_family'] == 'Suse' %} 2 | 3 | {% if not grains.get('offline_mode') %} 4 | update_system_packages: 5 | cmd.run: 6 | - name: zypper --non-interactive --gpg-auto-import-keys update --no-recommends --auto-agree-with-licenses 7 | - retry: 8 | attempts: 3 9 | interval: 15 10 | {% endif %} 11 | 12 | {% endif %} 13 | -------------------------------------------------------------------------------- /salt/os_setup/requirements.sls: -------------------------------------------------------------------------------- 1 | {% if grains['pkg_requirements'] and not grains['ha_sap_deployment_repo'] %} 2 | {% for role, packages in grains['pkg_requirements'].items() if role == grains['role'] %} 3 | install_package_requirements_{{ role }}: 4 | pkg.installed: 5 | - pkgs: 6 | {% for pkg, version in packages.items() %} 7 | - {{ pkg }}{% if version %}: {{ version }} {% endif %} 8 | {% endfor %} 9 | - resolve_capabilities: true 10 | - retry: 11 | attempts: 3 12 | interval: 15 13 | 14 | print_warning_message_{{ role }}: 15 | test.show_notification: 16 | - text: | 17 | Some of the previous packages with the specific version are not available. 18 | If the error persists try to set 'ha_sap_deployment_repo' value in your terraform.tfvars to 19 | https://download.opensuse.org/repositories/network:ha-clustering:sap-deployments:${specificversion} 20 | - onfail: 21 | - install_package_requirements_{{ role }} 22 | {% endfor %} 23 | {% endif %} 24 | -------------------------------------------------------------------------------- /salt/postdeployment/init.sls: -------------------------------------------------------------------------------- 1 | {% if grains.get('cleanup_secrets') == true %} 2 | include: 3 | - .remove_grains 4 | - .remove_salt_logs 5 | {% endif %} 6 | 7 | # dummy state to always include at least one state 8 | postdeployment ran: 9 | cmd.run: 10 | - name: echo "postdeployment ran" 11 | -------------------------------------------------------------------------------- /salt/postdeployment/remove_grains.sls: -------------------------------------------------------------------------------- 1 | # remove grains file as it might include sensitive information 2 | /etc/salt/grains: 3 | file.absent 4 | -------------------------------------------------------------------------------- /salt/postdeployment/remove_salt_logs.sls: -------------------------------------------------------------------------------- 1 | # remove logfiles that could contain sensitive information 2 | /var/log/salt-os-setup.log: 3 | file.absent 4 | /var/log/salt-predeployment.log: 5 | file.absent 6 | /var/log/salt-deployment.log: 7 | file.absent 8 | /var/log/salt-postdeployment.log: 9 | file.absent 10 | -------------------------------------------------------------------------------- /salt/provider/azure/nfsv4.sls: -------------------------------------------------------------------------------- 1 | # settings according to 2 | # https://docs.microsoft.com/en-us/azure/azure-netapp-files/azure-netapp-files-configure-nfsv41-domain 3 | /etc/idmapd.conf: 4 | file.line: 5 | - match: "^Domain = localdomain" 6 | - mode: replace 7 | - content: | 8 | Domain = defaultv4iddomain.com 9 | - require: 10 | - pkg: nfs-client 11 | 12 | nfs-idmapd: 13 | service.running: 14 | - enable: True 15 | - require: 16 | - pkg: nfs-client 17 | - file: /etc/idmapd.conf 18 | - watch: 19 | - pkg: nfs-client 20 | - file: /etc/idmapd.conf 21 | 22 | clear_idmap_cache: 23 | cmd.run: 24 | - name: nfsidmap -c 25 | - onchanges: 26 | - file: /etc/idmapd.conf 27 | -------------------------------------------------------------------------------- /salt/top.sls: -------------------------------------------------------------------------------- 1 | base: 2 | 'role:hana_node': 3 | - match: grain 4 | - hana 5 | 6 | 'G@role:hana_node and G@ha_enabled:true': 7 | - match: compound 8 | - cluster 9 | 10 | 'role:majority_maker_node': 11 | - match: grain 12 | - hana.packages 13 | - hana.ha_cluster 14 | - cluster 15 | 16 | 'role:drbd_node': 17 | - match: grain 18 | - drbd 19 | - cluster 20 | 21 | 'role:netweaver_node': 22 | - match: grain 23 | - netweaver 24 | 25 | 'G@role:netweaver_node and G@ha_enabled:true and P@hostname:.*(01|02)': 26 | - match: compound 27 | - cluster 28 | 29 | predeployment: 30 | '*': 31 | - default 32 | 33 | 'role:hana_node': 34 | - match: grain 35 | - cluster_node 36 | - hana_node 37 | 38 | 'role:majority_maker_node': 39 | - match: grain 40 | - cluster_node 41 | - majority_maker_node 42 | 43 | 'role:netweaver_node': 44 | - match: grain 45 | - cluster_node 46 | - netweaver_node 47 | 48 | 'role:drbd_node': 49 | - match: grain 50 | - cluster_node 51 | - drbd_node 52 | 53 | 'role:iscsi_srv': 54 | - match: grain 55 | - iscsi_srv 56 | 57 | 'role:monitoring_srv': 58 | - match: grain 59 | - monitoring_srv 60 | 61 | 'role:bastion': 62 | - match: grain 63 | - bastion 64 | 65 | # minimal NFS server on libvirt/openstack - should not be used for production 66 | 'role:nfs_srv': 67 | - match: grain 68 | - nfs_srv 69 | 70 | postdeployment: 71 | '*': 72 | - postdeployment 73 | --------------------------------------------------------------------------------