├── .coafile ├── .github ├── CODEOWNERS ├── PULL_REQUEST_TEMPLATE.md ├── dependabot.yml └── workflows │ ├── ci-validation.yml │ ├── ci-validation │ ├── salt-client-validation │ ├── salt-controller-validation │ ├── salt-minion-validation │ ├── salt-mirror-validation │ ├── salt-server-containerized-validation │ ├── salt-server-validation │ └── terraform-validation │ └── mirror-update-warning.yml ├── .gitignore ├── CONTRIBUTING.md ├── DESIGN.md ├── LICENSE ├── README.md ├── README_ADVANCED.md ├── README_TESTING.md ├── TROUBLESHOOTING.md ├── backend_modules ├── aws │ ├── README.md │ ├── README_ADVANCED.md │ ├── base │ │ ├── ami.tf │ │ ├── iam_instance_profile.tf │ │ ├── main.tf │ │ ├── variables.tf │ │ └── versions.tf │ ├── db_host │ │ ├── main.tf │ │ ├── variables.tf │ │ └── versions.tf │ ├── host │ │ ├── combustion │ │ ├── main.tf │ │ ├── user_data.yaml │ │ ├── variables.tf │ │ └── versions.tf │ └── network │ │ ├── main.tf │ │ ├── variables.tf │ │ └── versions.tf ├── azure │ ├── README.md │ ├── README_ADVANCED.md │ ├── base │ │ ├── main.tf │ │ ├── variables.tf │ │ └── versions.tf │ ├── host │ │ ├── main.tf │ │ ├── user_data.yaml │ │ ├── variables.tf │ │ └── versions.tf │ └── network │ │ ├── main.tf │ │ ├── variables.tf │ │ └── versions.tf ├── feilong │ ├── README.md │ ├── base │ │ ├── main.tf │ │ ├── variables.tf │ │ └── versions.tf │ └── host │ │ ├── main.tf │ │ ├── variables.tf │ │ └── versions.tf ├── libvirt │ ├── README.md │ ├── base │ │ ├── main.tf │ │ ├── variables.tf │ │ └── versions.tf │ └── host │ │ ├── combustion │ │ ├── config.ign │ │ ├── cpu_features.xsl │ │ ├── main.tf │ │ ├── network_config.yaml │ │ ├── pxe_boot.xsl │ │ ├── user_data.yaml │ │ ├── variables.tf │ │ ├── versions.tf │ │ └── virthost.xsl ├── null │ ├── README.md │ ├── base │ │ ├── main.tf │ │ ├── variables.tf │ │ └── versions.tf │ └── host │ │ ├── main.tf │ │ ├── variables.tf │ │ └── versions.tf └── ssh │ ├── README.md │ ├── base │ ├── main.tf │ ├── variables.tf │ └── versions.tf │ └── host │ ├── main.tf │ ├── variables.tf │ └── versions.tf ├── configure_aws_tunnels.rb ├── help ├── data-pool-configuration.png ├── sumaform-icon-black.svg ├── sumaform-icon-color.svg ├── sumaform-logo-black.svg └── sumaform-logo-color.svg ├── main.tf.aws-create-mirror-snapshot.example ├── main.tf.aws-testsuite.example ├── main.tf.aws.example ├── main.tf.azure.example ├── main.tf.libvirt-testsuite.example ├── main.tf.libvirt-testsuite.example.Manager-43 ├── main.tf.libvirt.example ├── main.tf.libvirt.example.Manager-43 ├── main.tf.ssh.example ├── modules ├── base │ ├── main.tf │ ├── variables.tf │ └── versions.tf ├── build_host │ ├── main.tf │ ├── variables.tf │ └── versions.tf ├── client │ ├── main.tf │ ├── variables.tf │ └── versions.tf ├── controller │ ├── main.tf │ ├── variables.tf │ └── versions.tf ├── cucumber_testsuite │ ├── main.tf │ ├── variables.tf │ └── versions.tf ├── dhcp_dns │ ├── README.md │ ├── main.tf │ ├── variables.tf │ └── versions.tf ├── grafana │ ├── main.tf │ ├── variables.tf │ └── versions.tf ├── host │ ├── main.tf │ ├── variables.tf │ └── versions.tf ├── jenkins │ ├── main.tf │ ├── variables.tf │ └── versions.tf ├── locust │ ├── main.tf │ ├── variables.tf │ └── versions.tf ├── minion │ ├── main.tf │ ├── variables.tf │ └── versions.tf ├── mirror │ ├── main.tf │ ├── variables.tf │ └── versions.tf ├── proxy │ ├── main.tf │ ├── variables.tf │ └── versions.tf ├── proxy_containerized │ ├── main.tf │ ├── variables.tf │ └── versions.tf ├── pxe_boot │ ├── main.tf │ ├── variables.tf │ └── versions.tf ├── rds │ ├── main.tf │ ├── variables.tf │ └── versions.tf ├── registry │ ├── main.tf │ ├── variables.tf │ └── versions.tf ├── salt_testenv │ ├── main.tf │ ├── variables.tf │ └── versions.tf ├── server │ ├── main.tf │ ├── variables.tf │ └── versions.tf ├── server_containerized │ ├── main.tf │ ├── variables.tf │ └── versions.tf ├── sshminion │ ├── main.tf │ ├── variables.tf │ └── versions.tf └── virthost │ ├── main.tf │ ├── variables.tf │ └── versions.tf └── salt ├── build_host ├── certs │ └── ca.cert.pem ├── init.sls └── keys │ ├── id_ed25519 │ └── id_ed25519.pub ├── client ├── init.sls └── testsuite.sls ├── controller ├── bashrc ├── http_testsuite.service ├── id_ed25519 ├── id_ed25519.pub ├── init.sls ├── run-testsuite └── virtualhostmanager.create.json ├── default ├── avahi.sls ├── chrony.conf ├── firewall.sls ├── gpg_keys │ ├── galaxy.key │ ├── saltstack.key │ ├── suse_ca.key │ ├── suse_el9.key │ ├── suse_res6.key │ ├── suse_res7.key │ ├── suse_staging.key │ └── uyuni.key ├── hostname.sls ├── ids.sls ├── init.sls ├── locale.sls ├── minimal.sls ├── network.sls ├── ntp.conf ├── pkgs.sls ├── set_ip_in_etc_hosts.py ├── sshd.sls ├── testsuite.sls ├── time.sls └── update.sls ├── first_deployment_highstate.sh ├── grafana ├── init.sls ├── provisioning │ ├── dashboards │ │ ├── suse_manager.json │ │ └── suse_manager.yml │ └── datasources │ │ └── prometheus_localhost.yml └── setup_grafana.py ├── highstate.sh ├── jenkins ├── basic-security.groovy ├── configuration.sls ├── configure-jenkins.sh ├── etc │ └── jenkins.conf ├── http_proxy.sls ├── init.sls └── podman.sls ├── locust ├── init.sls ├── locust-exporter.py ├── locustfile.py └── run-locust.py ├── minion ├── certs │ └── SUSE_Trust_Root.crt.pem ├── init.sls ├── reflector.sls └── testsuite.sls ├── mirror ├── configuration.sls ├── cron.sls ├── cron_scripts │ ├── apt-mirror.sh │ ├── docker-images.sh │ ├── minima.sh │ ├── mirror-images.sh │ └── scc-data.sh ├── etc │ ├── apt-mirror.list │ ├── docker-images.conf │ ├── minima.yaml │ ├── minimum_repositories_testsuite.yaml │ ├── mirror-images.conf │ ├── mirror.conf │ └── scc-data.conf ├── init.sls └── utils │ ├── adjust_external_repos │ ├── docker_images │ └── refresh_scc_data.py ├── post_provisioning_cleanup.sh ├── proxy ├── additional_disk.sls ├── config-answers.txt └── init.sls ├── proxy_containerized ├── config ├── id_ed25519 ├── id_ed25519.pub └── init.sls ├── registry └── init.sls ├── repos ├── additional.sls ├── build_host.sls ├── client_tools.sls ├── default_settings.sls ├── disable_local.sls ├── init.sls ├── jenkins.sls ├── minion.sls ├── os.sls ├── proxy.sls ├── proxy43.sls ├── proxyUyuni.sls ├── proxy_containerized.sls ├── proxy_containerized50.sls ├── proxy_containerized51.sls ├── proxy_containerizedHead.sls ├── proxy_containerizedUyuni.sls ├── ruby.sls ├── server.sls ├── server43.sls ├── serverUyuni.sls ├── server_containerized.sls ├── server_containerized50.sls ├── server_containerized51.sls ├── server_containerizedHead.sls ├── server_containerizedUyuni.sls ├── testsuite.sls ├── tools.sls ├── vendor.sls └── virthost.sls ├── salt_testenv ├── init.sls ├── postinstallation.sls ├── salt_bundle_package.sls └── salt_classic_package.sls ├── scc ├── build_host.sls ├── clean.sls ├── client.sls ├── init.sls ├── minion.sls ├── proxy.sls └── server.sls ├── server ├── additional_disk.sls ├── aws.crt ├── download_ubuntu_repo.sh ├── firewall.sls ├── firewalld_public.xml ├── init.sls ├── initial_content.sls ├── iss.sls ├── java_agent.yaml ├── large_deployment.sls ├── large_deployment_tune_tomcat.xslt ├── master-custom.conf ├── postgres-exporter ├── postgres.sls ├── postgres_exporter_queries.yaml ├── prometheus.sls ├── register_master.py ├── register_slave.py ├── rhn.sls ├── salt_master.sls ├── setup_env.sh ├── spacewalk-search.sls ├── taskomatic.sls ├── taskomatic_jmx.conf ├── tcpdump.service ├── tcpdump.sls ├── testsuite.sls ├── tomcat.sls ├── tomcat_jmx.conf ├── wait_for_mgr_sync.py └── wait_for_reposync.py ├── server_containerized ├── additional_disks.sls ├── init.sls ├── initial_content.sls ├── install_common.sls ├── install_k3s.sls ├── install_mgradm.sls ├── install_podman.sls ├── large_deployment.sls ├── large_deployment_tune_tomcat.xslt ├── mgradm.yaml ├── mirror-pv.yaml ├── rhn.sls ├── salt-events.service ├── testsuite.sls ├── wait_for_kube_resource.py └── wait_for_mgr_sync.sh ├── top.sls ├── virthost ├── init.sls └── systemd-detect-virt └── wait_for_salt.sh /.coafile: -------------------------------------------------------------------------------- 1 | [whitespace] 2 | bears = SpaceConsistencyBear 3 | files = **/*.(tf|md|py|sls|yaml|rb|txt|json) 4 | use_spaces = True 5 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | # CODEOWNERS info & syntax 2 | # Lines starting with "#" are comments. 3 | # 4 | # Each line is a file pattern followed by one or more owners. 5 | # 6 | # Order is important; the last matching pattern takes the most 7 | # precedence. 8 | # 9 | # Owners can be specified by email address or GitHub username 10 | # 11 | # Teams can be specified as code owners as well. Teams should 12 | # be identified in the format @org/team-name. Teams must have 13 | # explicit write access to the repository. 14 | # 15 | # Patterns 16 | # 17 | # Whole repository 18 | # * @global-owner 19 | # 20 | # Directory (without subdirectories) 21 | # docs/* @tech_writer 22 | # 23 | # Directory (including subdirectories) 24 | # apps/ @app_developer 25 | # 26 | # Adding a leading "/" to a pattern means the directory must 27 | # be in the root of the repository. 28 | # 29 | # Empty Pattern -> no owner (@app_developer owns all of apps/ except apps/github) 30 | # apps/ @app_developer 31 | # apps/github 32 | 33 | # Sumaform Code Owners 34 | 35 | # Owners of the codebase 36 | * @uyuni-project/sumaform-developers 37 | 38 | # Workflows reviewers 39 | .github/workflows @uyuni-project/workflows-reviewers 40 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | ## What does this PR change? 2 | 3 | **add description** 4 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates 5 | 6 | version: 2 7 | updates: 8 | - package-ecosystem: "github-actions" 9 | directory: "/" 10 | schedule: 11 | interval: "daily" 12 | -------------------------------------------------------------------------------- /.github/workflows/ci-validation.yml: -------------------------------------------------------------------------------- 1 | name: CI validation tests 2 | 3 | on: 4 | pull_request: 5 | branches: 6 | - master 7 | 8 | jobs: 9 | validate_salt: 10 | name: Validate Salt states 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2 14 | with: 15 | fetch-depth: 1 16 | # - uses: awalsh128/cache-apt-pkgs-action@5902b33ae29014e6ca012c5d8025d4346556bd40 #v1.4.3 17 | # with: 18 | # packages: salt-common 19 | # version: 1.0 20 | # execute_install_scripts: true 21 | - name: Install Salt using bootstrap 22 | run: | 23 | curl -fsSL https://github.com/saltstack/salt-bootstrap/releases/latest/download/bootstrap-salt.sh -o install_salt.sh 24 | sudo sh install_salt.sh -P -x python3 25 | - name: Validate server states 26 | if: always() 27 | run: bash .github/workflows/ci-validation/salt-server-validation 28 | - name: Validate client states 29 | if: always() 30 | run: bash .github/workflows/ci-validation/salt-client-validation 31 | - name: Validate minion states 32 | if: always() 33 | run: bash .github/workflows/ci-validation/salt-minion-validation 34 | - name: Validate controller states 35 | if: always() 36 | run: bash .github/workflows/ci-validation/salt-controller-validation 37 | - name: Validate mirror states 38 | if: always() 39 | run: bash .github/workflows/ci-validation/salt-mirror-validation 40 | - name: Validate containerized server states 41 | if: always() 42 | run: bash .github/workflows/ci-validation/salt-server-containerized-validation 43 | 44 | validate_terraform_config: 45 | name: Validate terraform configuration 46 | runs-on: ubuntu-latest 47 | steps: 48 | - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 #v4.2.2 49 | with: 50 | fetch-depth: 1 51 | - uses: hashicorp/setup-terraform@b9cd54a3c349d3f38e8881555d616ced269862dd #v3.1.2 52 | with: 53 | terraform_version: 1.0.10 54 | - name: Validate configuration using example files 55 | run: .github/workflows/ci-validation/terraform-validation 56 | -------------------------------------------------------------------------------- /.github/workflows/ci-validation/salt-client-validation: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | set -x 3 | 4 | mkdir -p testconfig 5 | 6 | cat > testconfig/grains < testconfig/minion < testconfig/grains < testconfig/minion < testconfig/grains < testconfig/minion < testconfig/grains < testconfig/minion < testconfig/grains < testconfig/minion < testconfig/grains < testconfig/minion <> $GITHUB_ENV 23 | 24 | git diff -p -U0 --no-color --diff-filter=M -G"\{var\.mirror\}" \ 25 | ${{ github.event.pull_request.base.sha }}..${{ github.event.pull_request.head.sha }} \ 26 | -- backend_modules/libvirt/base/main.tf | \ 27 | grep "^[+-] " | sed 's/^[+-]\s*\([^ ]*\).*$/ - `\1`/' | uniq >> $GITHUB_ENV 28 | 29 | echo "EOF" >> $GITHUB_ENV 30 | - name: Comment on the pull request 31 | uses: actions-cool/maintain-one-comment@4b2dbf086015f892dcb5e8c1106f5fccd6c1476b # v3.2.0 32 | with: 33 | delete: ${{ !env.IMAGE_LIST }} 34 | body: | 35 | This pull request updates the URLs of the following images: 36 | ${{ env.IMAGE_LIST }} 37 | 38 | Please consider updating the CI and BV mirror configurations accordingly. 39 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | .terraform.lock.hcl 3 | /local_workspaces 4 | 5 | /terraform.tfstate* 6 | /.terraform/ 7 | /.terraform.d/ 8 | /main.tf 9 | .terraform.tfstate.lock.info 10 | 11 | /salt/virthost/*.qcow2 12 | 13 | /modules/backend 14 | 15 | **/.*.sw* 16 | .*.sw* 17 | 18 | .idea 19 | sumaform.iml 20 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2015 SUSE LLC 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are 6 | met: 7 | 8 | * Redistributions of source code must retain the above copyright 9 | notice, this list of conditions and the following disclaimer. 10 | * Redistributions in binary form must reproduce the above 11 | copyright notice, this list of conditions and the following disclaimer 12 | in the documentation and/or other materials provided with the 13 | distribution. 14 | * Neither the name of SUSE LLC nor the names of its 15 | contributors may be used to endorse or promote products derived from 16 | this software without specific prior written permission. 17 | 18 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 19 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 20 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 21 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 22 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 23 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 24 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 28 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 | -------------------------------------------------------------------------------- /backend_modules/aws/base/iam_instance_profile.tf: -------------------------------------------------------------------------------- 1 | resource "aws_iam_instance_profile" "metering_full_access_instance_profile" { 2 | count = var.is_server_paygo_instance ? 1 : 0 3 | name = "${var.name_prefix}-paygo-metering" 4 | 5 | role = aws_iam_role.metering_full_access_role[count.index].name 6 | } 7 | 8 | resource "aws_iam_role" "metering_full_access_role" { 9 | count = var.is_server_paygo_instance ? 1 : 0 10 | name = "${var.name_prefix}metering-role" 11 | 12 | assume_role_policy = jsonencode({ 13 | Version = "2012-10-17", 14 | Statement = [ 15 | { 16 | Action = "sts:AssumeRole", 17 | Effect = "Allow", 18 | Principal = { 19 | Service = "ec2.amazonaws.com", 20 | }, 21 | }, 22 | ], 23 | }) 24 | } 25 | 26 | 27 | resource "aws_iam_role_policy_attachment" "metering_full_access_policy_attachment" { 28 | count = var.is_server_paygo_instance ? 1 : 0 29 | policy_arn = "arn:aws:iam::aws:policy/AWSMarketplaceMeteringFullAccess" 30 | role = aws_iam_role.metering_full_access_role[count.index].name 31 | } 32 | -------------------------------------------------------------------------------- /backend_modules/aws/base/variables.tf: -------------------------------------------------------------------------------- 1 | ../../null/base/variables.tf -------------------------------------------------------------------------------- /backend_modules/aws/base/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "1.0.10" 3 | required_providers { 4 | aws = { 5 | source = "hashicorp/aws" 6 | version = "~> 3.0" 7 | } 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /backend_modules/aws/db_host/main.tf: -------------------------------------------------------------------------------- 1 | 2 | locals { 3 | 4 | provider_settings = merge({ 5 | public_instance = false 6 | volume_size = 50 7 | instance_class = "db.t3.micro" }, 8 | var.provider_settings 9 | ) 10 | 11 | db_private_subnet_name = var.base_configuration.db_private_subnet_name 12 | db_security_group_id = var.base_configuration.private_db_security_group_id 13 | 14 | resource_name_prefix = "${var.base_configuration["name_prefix"]}${var.name}" 15 | availability_zone = var.base_configuration["availability_zone"] 16 | } 17 | 18 | resource "aws_db_instance" "instance" { 19 | instance_class = local.provider_settings["instance_class"] 20 | count = var.quantity 21 | identifier = local.resource_name_prefix 22 | db_subnet_group_name = local.db_private_subnet_name 23 | vpc_security_group_ids = [local.db_security_group_id] 24 | engine = var.engine 25 | engine_version = var.engine_version 26 | username = var.db_username 27 | password = var.db_password 28 | availability_zone = local.availability_zone 29 | publicly_accessible = var.publicly_accessible 30 | skip_final_snapshot = var.skip_final_snapshot 31 | 32 | allocated_storage = local.provider_settings["volume_size"] 33 | 34 | tags = { 35 | Name = "${local.resource_name_prefix}${var.quantity > 1 ? "-${count.index + 1}" : ""}" 36 | } 37 | 38 | lifecycle { 39 | ignore_changes = [tags] 40 | } 41 | } 42 | 43 | output "configuration" { 44 | depends_on = [aws_db_instance.instance] 45 | value = { 46 | ids = length(aws_db_instance.instance) > 0 ? aws_db_instance.instance[*].id : [] 47 | hostnames = length(aws_db_instance.instance) > 0 ? aws_db_instance.instance.*.address : [] 48 | username = length(aws_db_instance.instance) > 0 ? aws_db_instance.instance.*.username : [] 49 | port = length(aws_db_instance.instance) > 0 ? aws_db_instance.instance.*.port : [] 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /backend_modules/aws/db_host/variables.tf: -------------------------------------------------------------------------------- 1 | variable "base_configuration" { 2 | description = "use module.base.configuration, see the main.tf example file" 3 | } 4 | 5 | variable "name" { 6 | description = "hostname, without the domain part" 7 | type = string 8 | } 9 | 10 | variable "quantity" { 11 | description = "number of hosts like this one" 12 | default = 1 13 | } 14 | 15 | variable "provider_settings" { 16 | description = "Map of provider-specific settings, see the modules/libvirt/README.md" 17 | default = {} 18 | } 19 | 20 | variable "connect_to_base_network" { 21 | description = "true if you want a card connected to the main network, see README_ADVANCED.md" 22 | default = true 23 | } 24 | 25 | variable "connect_to_additional_network" { 26 | description = "true if you want a card connected to the additional network (if any), see README_ADVANCED.md" 27 | default = false 28 | } 29 | 30 | variable "publicly_accessible" { 31 | description = "true if you want the RDS to have a public address" 32 | default = false 33 | } 34 | 35 | variable "skip_final_snapshot" { 36 | description = "Determines whether a final DB snapshot is created before the DB instance is deleted. If true is specified, no DBSnapshot is created." 37 | default = true 38 | } 39 | 40 | variable "engine" { 41 | description = "RDS engine, by default postgres" 42 | } 43 | 44 | variable "engine_version" { 45 | description = "RDS engine version" 46 | } 47 | 48 | variable "db_username" { 49 | description = "RDS root user name" 50 | default = "username" 51 | } 52 | 53 | variable "db_password" { 54 | description = "RDS root user password" 55 | sensitive = true 56 | } 57 | -------------------------------------------------------------------------------- /backend_modules/aws/db_host/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "1.0.10" 3 | required_providers { 4 | aws = { 5 | source = "hashicorp/aws" 6 | version = "~> 3.0" 7 | } 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /backend_modules/aws/host/combustion: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # combustion: network prepare 3 | set -euxo pipefail 4 | 5 | function nm_config() { 6 | connectiondir="/etc/NetworkManager/system-connections" 7 | connectionfile="$connectiondir/Wired connection $1.nmconnection" 8 | mkdir -p "$connectiondir" 9 | cat > "$connectionfile" < >(exec tee -a /var/log/combustion) 2>&1 34 | 35 | # Name the Network Manager connections (final phase on real filesystem) 36 | nm_config 1 eth0 auto 37 | nm_config 2 eth1 manual 38 | 39 | # Set linux as password for root 40 | echo 'root:$6$3aQC9rrDLHiTf1yR$NoKe9tko0kFIpu0rQ2y/OzOOtbVvs0Amr2bx0T4cGf6aq8PG74EmVy8lSDJdbLVVFpOSzwELWyReRCiPHa7DG0' | chpasswd -e 41 | 42 | echo "PermitRootLogin yes" > /etc/ssh/sshd_config 43 | echo "ChallengeResponseAuthentication yes" >> /etc/ssh/sshd_config 44 | echo "PasswordAuthentication yes" >> /etc/ssh/sshd_config 45 | echo "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGEPA8UvZ/6tTiL+MKGUAsHJuPlDpueeDmbuJ+0giOCY root@controller" > /root/.ssh/authorized_keys 46 | 47 | # Add a public ssh key and enable sshd 48 | systemctl enable sshd.service 49 | systemctl restart sshd.service 50 | -------------------------------------------------------------------------------- /backend_modules/aws/host/variables.tf: -------------------------------------------------------------------------------- 1 | ../../null/host/variables.tf -------------------------------------------------------------------------------- /backend_modules/aws/host/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "1.0.10" 3 | required_providers { 4 | null = { 5 | source = "hashicorp/null" 6 | version = ">= 2.1.0" 7 | } 8 | aws = { 9 | source = "hashicorp/aws" 10 | version = "~> 3.0" 11 | } 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /backend_modules/aws/network/variables.tf: -------------------------------------------------------------------------------- 1 | variable "region" { 2 | description = "Region where the instance is created" 3 | type = string 4 | } 5 | 6 | variable "availability_zone" { 7 | description = "Availability zone where the instance is created" 8 | type = string 9 | } 10 | 11 | variable "ssh_allowed_ips" { 12 | description = "IP addresses allowed to open SSH connections to the public subnet created by this module" 13 | default = [] 14 | } 15 | 16 | variable "name_prefix" { 17 | description = "A prefix for names of objects created by this module" 18 | default = "sumaform" 19 | } 20 | 21 | variable "private_network" { 22 | description = "network cidr_block (of the form 172.16.x.x/24)" 23 | default = "172.16.1.0/24" 24 | } 25 | 26 | variable "additional_network" { 27 | description = "Additional network cidr_block (of the form 172.16.x.x/24)" 28 | default = "172.16.2.0/24" 29 | } 30 | 31 | variable "create_private_network" { 32 | description = "defined if a new private network should be created" 33 | default = true 34 | } 35 | 36 | variable "public_subnet_id" { 37 | description = "optional public subnet id" 38 | default = null 39 | } 40 | 41 | variable "create_additional_network" { 42 | description = "defined if a new additional private network should be created" 43 | default = true 44 | } 45 | 46 | variable "create_network" { 47 | description = "defined if a new network should be created" 48 | default = true 49 | } 50 | 51 | variable "create_db_network" { 52 | description = "defined if a new network should be created" 53 | default = false 54 | } 55 | 56 | variable "vpc_id" { 57 | description = "ID of the VPC where networks should be created in (optional)" 58 | default = null 59 | } 60 | 61 | variable "route53_domain"{ 62 | description = "Domain name for route53" 63 | default = null 64 | } 65 | -------------------------------------------------------------------------------- /backend_modules/aws/network/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "1.0.10" 3 | required_providers { 4 | aws = { 5 | source = "hashicorp/aws" 6 | version = "~> 3.0" 7 | } 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /backend_modules/azure/README_ADVANCED.md: -------------------------------------------------------------------------------- 1 | # Advanced configuration 2 | 3 | ## Re-use of existing network infrastructure resources 4 | 5 | One can deploy to existing pre-created infrastructure which should follow the pattern defined for the network. See README.md for more information. 6 | To use it, a set of properties should be set on sumaform base module. 7 | 8 | | Variable name | Type | Default value | Description | 9 | |--------------------------------------|---------|---------------|------------------------------------------------------------------| 10 | | create_network | boolean | `true` | flag indicate if a new infrastructure should be created | 11 | | public_subnet_id | string | `null` | public subnet id | 12 | | private_subnet_id | string | `null` | private subnet id | 13 | | private_additional_subnet_id | string | `null` | private additional subnet id | 14 | | public_security_group_id | string | `null` | public security group id | 15 | | private_security_group_id | string | `null` | private security group id | 16 | | private_additional_security_group_id | string | `null` | private additional security group id | 17 | | bastion_host | string | `null` | bastion machine hostname (to access machines in private network) | 18 | 19 | Example: 20 | ```hcl 21 | module "base" { 22 | source = "./modules/base" 23 | ... 24 | provider_settings = { 25 | create_network = false 26 | public_subnet_id = ... 27 | private_subnet_id = ... 28 | private_additional_subnet_id = ... 29 | public_security_group_id = ... 30 | private_security_group_id = ... 31 | private_additional_security_group_id = ... 32 | bastion_host = ... 33 | } 34 | } 35 | ``` 36 | -------------------------------------------------------------------------------- /backend_modules/azure/base/variables.tf: -------------------------------------------------------------------------------- 1 | ../../null/base/variables.tf -------------------------------------------------------------------------------- /backend_modules/azure/base/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "1.0.10" 3 | } 4 | -------------------------------------------------------------------------------- /backend_modules/azure/host/variables.tf: -------------------------------------------------------------------------------- 1 | ../../null/host/variables.tf -------------------------------------------------------------------------------- /backend_modules/azure/host/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "1.0.10" 3 | required_providers { 4 | null = ">= 2.1.0" 5 | azurerm = "~> 2.40" 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /backend_modules/azure/network/variables.tf: -------------------------------------------------------------------------------- 1 | variable "location" { 2 | description = "location where the instance is created" 3 | type = string 4 | } 5 | 6 | variable "name_prefix" { 7 | description = "resource name prefix" 8 | type = string 9 | } 10 | 11 | variable "create_network" { 12 | description = "defined if a new network should be created" 13 | type = bool 14 | default = true 15 | } 16 | 17 | variable "ssh_allowed_ips" { 18 | description = "list of ips allowed to ssh" 19 | default = [] 20 | } 21 | 22 | variable "additional_network" { 23 | description = "Additional network cidr_block (of the form 172.16.x.x/24)" 24 | default = "172.16.2.0/24" 25 | } 26 | -------------------------------------------------------------------------------- /backend_modules/azure/network/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "1.0.10" 3 | required_providers { 4 | azurerm = "~> 2.40" 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /backend_modules/feilong/base/main.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | name_prefix = var.name_prefix 3 | domain = var.domain 4 | ssh_key_path = var.ssh_key_path 5 | key_file = lookup(var.provider_settings, "key_file", "~/.ssh/id_ed25519") 6 | product_version = var.product_version 7 | } 8 | 9 | output "configuration" { 10 | value = { 11 | name_prefix = local.name_prefix 12 | domain = local.domain 13 | ssh_key_path = local.ssh_key_path 14 | key_file = local.key_file 15 | product_version = local.product_version 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /backend_modules/feilong/base/variables.tf: -------------------------------------------------------------------------------- 1 | ../../null/base/variables.tf -------------------------------------------------------------------------------- /backend_modules/feilong/base/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "1.0.10" 3 | required_providers { 4 | feilong = { 5 | source = "bischoff/feilong" 6 | version = "0.0.6" 7 | } 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /backend_modules/feilong/host/variables.tf: -------------------------------------------------------------------------------- 1 | ../../null/host/variables.tf -------------------------------------------------------------------------------- /backend_modules/feilong/host/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "1.0.10" 3 | required_providers { 4 | feilong = { 5 | source = "bischoff/feilong" 6 | version = "0.0.6" 7 | } 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /backend_modules/libvirt/base/variables.tf: -------------------------------------------------------------------------------- 1 | ../../null/base/variables.tf -------------------------------------------------------------------------------- /backend_modules/libvirt/base/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "1.0.10" 3 | required_providers { 4 | libvirt = { 5 | source = "dmacvicar/libvirt" 6 | version = "0.8.1" 7 | } 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /backend_modules/libvirt/host/config.ign: -------------------------------------------------------------------------------- 1 | { 2 | "ignition": { "version": "3.1.0" }, 3 | "passwd": { 4 | "users": [ 5 | { 6 | "name": "root", 7 | "passwordHash": "ZIy6Ivgw8UdAs" 8 | } 9 | ] 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /backend_modules/libvirt/host/cpu_features.xsl: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | host-model 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | -------------------------------------------------------------------------------- /backend_modules/libvirt/host/network_config.yaml: -------------------------------------------------------------------------------- 1 | %{ if image == "debian12o" || image == "amazonlinux2023o" } 2 | network: 3 | version: 2 4 | ethernets: 5 | ens3: 6 | dhcp4: true 7 | %{ else } 8 | network: 9 | version: 1 10 | config: 11 | - type: physical 12 | %{ if image == "ubuntu2404o" || image == "ubuntu2204o" || image == "ubuntu2004o" } 13 | name: ens3 14 | %{ else } 15 | name: eth0 16 | %{ endif } 17 | subnets: 18 | %{ if dhcp_dns } 19 | - type: static 20 | address: ${dhcp_dns_address} 21 | %{ else } 22 | - type: dhcp 23 | %{ endif } 24 | %{ endif } 25 | -------------------------------------------------------------------------------- /backend_modules/libvirt/host/variables.tf: -------------------------------------------------------------------------------- 1 | ../../null/host/variables.tf -------------------------------------------------------------------------------- /backend_modules/libvirt/host/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "1.0.10" 3 | required_providers { 4 | null = { 5 | source = "hashicorp/null" 6 | version = ">= 2.1.0" 7 | } 8 | libvirt = { 9 | source = "dmacvicar/libvirt" 10 | version = "0.8.1" 11 | } 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /backend_modules/null/README.md: -------------------------------------------------------------------------------- 1 | # null backend 2 | 3 | - Create a symbolic link to the `libvirt` backend module directory inside the `modules` directory: `ln -sfn ../backend_modules/null modules/backend` 4 | 5 | This backend goal is test configuration. Any change in the `main.tf` file in use will trigger changes in machines. 6 | 7 | Existing modules: 8 | 9 | - `base` 10 | - `bost` 11 | 12 | ## Interface for other backend's 13 | 14 | All `variable.tf` and `version.tf` present in these backend modules are shared with all other backend modules. 15 | The goal is to have a kind of interface that can be used but modules in `modules` directory and ensure compatibility between all backends. 16 | -------------------------------------------------------------------------------- /backend_modules/null/base/main.tf: -------------------------------------------------------------------------------- 1 | 2 | resource "null_resource" "base" { 3 | # Changes to any instance of the cluster requires re-provisioning 4 | triggers = { 5 | cc_username = var.cc_username 6 | cc_password = var.cc_password 7 | product_version = var.product_version 8 | timezone = var.timezone 9 | use_ntp = var.use_ntp 10 | ssh_key_path = var.ssh_key_path 11 | mirror = var.mirror 12 | use_mirror_images = var.use_mirror_images 13 | use_avahi = var.use_avahi 14 | domain = var.domain 15 | name_prefix = var.name_prefix 16 | use_shared_resources = var.use_shared_resources 17 | testsuite = var.testsuite 18 | provider_settings = yamlencode(var.provider_settings) 19 | images = yamlencode(var.images) 20 | } 21 | } 22 | 23 | output "configuration" { 24 | value = { 25 | cc_username = var.cc_username 26 | cc_password = var.cc_password 27 | timezone = var.timezone 28 | use_ntp = var.use_ntp 29 | ssh_key_path = var.ssh_key_path 30 | mirror = var.mirror 31 | use_mirror_images = var.use_mirror_images 32 | use_avahi = var.use_avahi 33 | domain = var.domain 34 | name_prefix = var.name_prefix 35 | use_shared_resources = var.use_shared_resources 36 | testsuite = var.testsuite 37 | product_version = var.product_version 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /backend_modules/null/base/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "1.0.10" 3 | required_providers { 4 | null = { 5 | source = "hashicorp/null" 6 | version = ">= 2.1.0" 7 | } 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /backend_modules/null/host/main.tf: -------------------------------------------------------------------------------- 1 | resource "null_resource" "domain" { 2 | triggers = { 3 | base_configuration = yamlencode(var.base_configuration) 4 | name = var.name 5 | roles = yamlencode(var.roles) 6 | use_os_released_updates = var.use_os_released_updates 7 | install_salt_bundle = var.install_salt_bundle 8 | additional_repos = yamlencode(var.additional_repos) 9 | additional_repos_only = var.additional_repos_only 10 | additional_certs = yamlencode(var.additional_certs) 11 | additional_packages = yamlencode(var.additional_packages) 12 | quantity = var.quantity 13 | grains = yamlencode(var.grains) 14 | swap_file_size = var.swap_file_size 15 | ssh_key_path = var.ssh_key_path 16 | gpg_keys = yamlencode(var.gpg_keys) 17 | ipv6 = yamlencode(var.ipv6) 18 | connect_to_base_network = var.connect_to_base_network 19 | connect_to_additional_network = var.connect_to_additional_network 20 | image = var.image 21 | provider_settings = yamlencode(var.provider_settings) 22 | main_disk_size = var.main_disk_size 23 | } 24 | } 25 | 26 | output "configuration" { 27 | value = { 28 | ids = ["1"] 29 | hostnames = ["domain"] 30 | macaddrs = [] 31 | public_names = [] 32 | ipaddrs = [] 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /backend_modules/null/host/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "1.0.10" 3 | required_providers { 4 | null = { 5 | source = "hashicorp/null" 6 | version = ">= 2.1.0" 7 | } 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /backend_modules/ssh/base/main.tf: -------------------------------------------------------------------------------- 1 | 2 | resource "null_resource" "base" { 3 | triggers = { 4 | cc_username = var.cc_username 5 | cc_password = var.cc_password 6 | timezone = var.timezone 7 | use_ntp = var.use_ntp 8 | ssh_key_path = var.ssh_key_path 9 | mirror = var.mirror 10 | use_mirror_images = var.use_mirror_images 11 | use_avahi = var.use_avahi 12 | domain = var.domain 13 | name_prefix = var.name_prefix 14 | use_shared_resources = var.use_shared_resources 15 | testsuite = var.testsuite 16 | provider_settings = yamlencode(var.provider_settings) 17 | } 18 | } 19 | 20 | output "configuration" { 21 | value = { 22 | cc_username = var.cc_username 23 | cc_password = var.cc_password 24 | timezone = var.timezone 25 | use_ntp = var.use_ntp 26 | ssh_key_path = var.ssh_key_path 27 | mirror = var.mirror 28 | use_mirror_images = var.use_mirror_images 29 | use_avahi = var.use_avahi 30 | domain = var.domain 31 | name_prefix = var.name_prefix 32 | use_shared_resources = var.use_shared_resources 33 | testsuite = var.testsuite 34 | 35 | additional_network = lookup(var.provider_settings, "additional_network", null) 36 | 37 | // only supported in ssh connection 38 | private_key = lookup(var.provider_settings, "private_key", null) 39 | certificate = lookup(var.provider_settings, "certificate", null) 40 | host_key = lookup(var.provider_settings, "host_key", null) 41 | 42 | bastion_host = lookup(var.provider_settings, "bastion_host", null) 43 | bastion_host_key = lookup(var.provider_settings, "bastion_host_key", null) 44 | bastion_port = lookup(var.provider_settings, "bastion_port", null) 45 | bastion_user = lookup(var.provider_settings, "bastion_user", null) 46 | bastion_password = lookup(var.provider_settings, "bastion_password", null) 47 | bastion_private_key = lookup(var.provider_settings, "bastion_private_key", null) 48 | bastion_certificate = lookup(var.provider_settings, "bastion_certificate", null) 49 | timeout = lookup(var.provider_settings, "timeout", "20s") 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /backend_modules/ssh/base/variables.tf: -------------------------------------------------------------------------------- 1 | ../../null/base/variables.tf -------------------------------------------------------------------------------- /backend_modules/ssh/base/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "1.0.10" 3 | required_providers { 4 | null = { 5 | source = "hashicorp/null" 6 | version = ">= 2.1.0" 7 | } 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /backend_modules/ssh/host/variables.tf: -------------------------------------------------------------------------------- 1 | ../../null/host/variables.tf -------------------------------------------------------------------------------- /backend_modules/ssh/host/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "1.0.10" 3 | required_providers { 4 | null = { 5 | source = "hashicorp/null" 6 | version = ">= 2.1.0" 7 | } 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /configure_aws_tunnels.rb: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | # encoding: UTF-8 3 | 4 | require 'json' 5 | 6 | output = JSON.parse(`terraform output -json`) 7 | 8 | bastion_public_name = output["bastion_public_name"]["value"] 9 | 10 | single_instances = output 11 | .map do |name, value| 12 | if name =~ /aws_(.*)_private_name$/ 13 | { symbolic_name: $1, private_name: value["value"] } 14 | end 15 | end 16 | .compact 17 | 18 | multiple_instances = output 19 | .map do |name, value| 20 | if name =~ /aws_(.*)_private_names$/ 21 | value["value"].each_with_index.map do |name, index| 22 | { symbolic_name: "#{$1}-#{index}", private_name: name } 23 | end 24 | end 25 | end 26 | .compact 27 | .flatten 28 | 29 | instances = single_instances + multiple_instances 30 | key_file = output["key_file"]["value"] 31 | 32 | tunnel_string = <<-eos 33 | # sumaform configuration start 34 | Host bastion 35 | HostName #{bastion_public_name} 36 | StrictHostKeyChecking no 37 | User ec2-user 38 | IdentityFile #{key_file} 39 | ServerAliveInterval 120 40 | eos 41 | 42 | instances.each do |instance| 43 | tunnel_string += <<-eos 44 | 45 | Host #{instance[:symbolic_name]} 46 | HostName #{instance[:private_name]} 47 | StrictHostKeyChecking no 48 | User ec2-user 49 | IdentityFile #{key_file} 50 | ProxyCommand ssh ec2-user@bastion -W %h:%p 51 | ServerAliveInterval 120 52 | eos 53 | if instance[:symbolic_name] =~ /suma/ 54 | tunnel_string += " LocalForward 8043 127.0.0.1:443\n" 55 | end 56 | if instance[:symbolic_name] =~ /grafana/ 57 | tunnel_string += " LocalForward 8080 127.0.0.1:8080\n" 58 | tunnel_string += " LocalForward 9090 127.0.0.1:9090\n" 59 | end 60 | end 61 | 62 | tunnel_string += "# sumaform configuration end" 63 | 64 | config_path = "#{Dir.home}/.ssh/config" 65 | config_string = File.read(config_path) 66 | 67 | if config_string =~ /(.*)^# sumaform configuration start$(.*)^# sumaform configuration end$(.*)/m 68 | File.write(config_path, "#{$1}#{tunnel_string}#{$3}") 69 | else 70 | File.write(config_path, "#{config_string}\n#{tunnel_string}\n") 71 | end 72 | -------------------------------------------------------------------------------- /help/data-pool-configuration.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/uyuni-project/sumaform/e5158b05d93232bb2c14f2863c4581fa2c1bf1e1/help/data-pool-configuration.png -------------------------------------------------------------------------------- /help/sumaform-icon-black.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 7 | 8 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | -------------------------------------------------------------------------------- /main.tf.libvirt.example: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "1.0.10" 3 | required_providers { 4 | libvirt = { 5 | source = "dmacvicar/libvirt" 6 | version = "0.8.1" 7 | } 8 | } 9 | } 10 | 11 | provider "libvirt" { 12 | uri = "qemu:///system" 13 | } 14 | 15 | module "base" { 16 | source = "./modules/base" 17 | 18 | // Set SUSE Customer Center credentials here if you have some 19 | // cc_username = "" 20 | // cc_password = "" 21 | product_version = "uyuni-master" 22 | 23 | // optional parameters with defaults below 24 | // use_avahi = true 25 | // name_prefix = "" // if you use name_prefix, make sure to update the server_configuration for clients/minions below 26 | // timezone = "Europe/Berlin" 27 | // use_ntp = true 28 | 29 | // provider_settings = { 30 | // network_name = "default" // change to null if you add a bridge below 31 | // additional_network = null 32 | // bridge = null 33 | // pool = "default" 34 | // } 35 | 36 | images = ["opensuse155o", "leapmicro55o"] 37 | } 38 | 39 | module "server" { 40 | source = "./modules/server_containerized" 41 | base_configuration = module.base.configuration 42 | 43 | name = "server" 44 | // see modules/server_containerized/variables.tf for possible values 45 | 46 | // connect_to_additional_network = true 47 | // if you want to use two networks 48 | 49 | // If you want to run the containers on k3s rather than podman 50 | // runtime = "k3s" 51 | 52 | // To override the default container repository containing the images 53 | // container_repository = "registry.opensuse.org/systemsmanagement/uyuni/master/servercontainer/containers/uyuni" 54 | 55 | // To define a specific container image tag to install 56 | // container_tag = 57 | } 58 | 59 | module "minion" { 60 | source = "./modules/minion" 61 | base_configuration = module.base.configuration 62 | 63 | name = "minion" 64 | image = "opensuse155o" 65 | server_configuration = module.server.configuration 66 | // see modules/minion/variables.tf for possible values 67 | } 68 | -------------------------------------------------------------------------------- /main.tf.libvirt.example.Manager-43: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "1.0.10" 3 | required_providers { 4 | libvirt = { 5 | source = "dmacvicar/libvirt" 6 | version = "0.8.1" 7 | } 8 | } 9 | } 10 | 11 | provider "libvirt" { 12 | uri = "qemu:///system" 13 | } 14 | 15 | module "base" { 16 | source = "./modules/base" 17 | 18 | // Set SUSE Customer Center credentials here if you have some 19 | // cc_username = "" 20 | // cc_password = "" 21 | product_version = "4.3-nightly" 22 | 23 | // optional parameters with defaults below 24 | // use_avahi = true 25 | // name_prefix = "" // if you use name_prefix, make sure to update the server_configuration for clients/minions below 26 | // timezone = "Europe/Berlin" 27 | // use_ntp = true 28 | 29 | // provider_settings = { 30 | // network_name = "default" // change to null if you add a bridge below 31 | // additional_network = null 32 | // bridge = null 33 | // pool = "default" 34 | // } 35 | 36 | images = ["sles15sp4o"] 37 | } 38 | 39 | module "server" { 40 | source = "./modules/server" 41 | base_configuration = module.base.configuration 42 | 43 | name = "server" 44 | // see modules/server_containerized/variables.tf for possible values 45 | 46 | // connect_to_additional_network = true 47 | // if you want to use two networks 48 | } 49 | 50 | module "minion" { 51 | source = "./modules/minion" 52 | base_configuration = module.base.configuration 53 | 54 | name = "minion" 55 | image = "sles15sp4o" 56 | server_configuration = module.server.configuration 57 | // see modules/minion/variables.tf for possible values 58 | } 59 | -------------------------------------------------------------------------------- /main.tf.ssh.example: -------------------------------------------------------------------------------- 1 | module "base" { 2 | source = "./modules/base" 3 | 4 | cc_username = "UC7" 5 | cc_password = ... 6 | product_version = "4.3-nightly" 7 | 8 | // optional parameters with defaults below 9 | // use_avahi = true 10 | // name_prefix = "" // if you use name_prefix, make sure to update the server_configuration for clients/minions below 11 | // timezone = "Europe/Berlin" 12 | // use_ntp = true 13 | 14 | // provider_settings = { 15 | // additional_network = null 16 | // } 17 | } 18 | 19 | module "server" { 20 | source = "./modules/server" 21 | base_configuration = module.base.configuration 22 | 23 | name = "server" 24 | // see modules/suse_manager/variables.tf for possible values 25 | 26 | // connect_to_additional_network = true 27 | // if you want to use two networks 28 | 29 | provider_settings = { 30 | host = "192.168.1.1" 31 | } 32 | } 33 | 34 | module "client" { 35 | source = "./modules/client" 36 | base_configuration = module.base.configuration 37 | 38 | name = "client" 39 | image = "sles15sp4o" 40 | server_configuration = module.server.configuration 41 | // see modules/client/variables.tf for possible values 42 | 43 | provider_settings = { 44 | host = "192.168.1.2" 45 | } 46 | } 47 | 48 | module "minion" { 49 | source = "./modules/minion" 50 | base_configuration = module.base.configuration 51 | 52 | name = "minion" 53 | image = "sles15sp4o" 54 | server_configuration = module.server.configuration 55 | // see modules/minion/variables.tf for possible values 56 | 57 | provider_settings = { 58 | host = "192.168.1.3" 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /modules/base/main.tf: -------------------------------------------------------------------------------- 1 | module "base_backend" { 2 | source = "../backend/base" 3 | 4 | cc_username = var.cc_username 5 | cc_password = var.cc_password 6 | product_version = var.product_version 7 | timezone = var.timezone 8 | use_ntp = var.use_ntp 9 | ssh_key_path = var.ssh_key_path 10 | mirror = var.mirror 11 | use_mirror_images = var.use_mirror_images 12 | use_avahi = var.use_avahi 13 | domain = var.domain 14 | name_prefix = var.name_prefix 15 | use_shared_resources = var.use_shared_resources 16 | testsuite = var.testsuite 17 | provider_settings = var.provider_settings 18 | images = var.images 19 | use_eip_bastion = var.use_eip_bastion 20 | is_server_paygo_instance = var.is_server_paygo_instance 21 | } 22 | 23 | output "configuration" { 24 | value = merge({ 25 | cc_username = var.cc_username 26 | cc_password = var.cc_password 27 | product_version = var.product_version 28 | timezone = var.timezone 29 | use_ntp = var.use_ntp 30 | ssh_key_path = var.ssh_key_path 31 | mirror = var.mirror 32 | use_mirror_images = var.use_mirror_images 33 | use_avahi = var.use_avahi 34 | domain = var.domain 35 | name_prefix = var.name_prefix 36 | use_shared_resources = var.use_shared_resources 37 | testsuite = var.testsuite 38 | use_eip_bastion = var.use_eip_bastion 39 | # WORKAROUND 40 | # For some reason, the key "additional_network" from AWS module gets lost 41 | # Force it into existence 42 | additional_network = null 43 | # END OF WORKAROUND 44 | }, module.base_backend.configuration) 45 | } 46 | -------------------------------------------------------------------------------- /modules/base/variables.tf: -------------------------------------------------------------------------------- 1 | ../../backend_modules/null/base/variables.tf -------------------------------------------------------------------------------- /modules/base/versions.tf: -------------------------------------------------------------------------------- 1 | ../backend/base/versions.tf -------------------------------------------------------------------------------- /modules/build_host/main.tf: -------------------------------------------------------------------------------- 1 | module "build_host" { 2 | source = "../host" 3 | 4 | base_configuration = var.base_configuration 5 | name = var.name 6 | quantity = var.quantity 7 | use_os_released_updates = var.use_os_released_updates 8 | install_salt_bundle = var.install_salt_bundle 9 | additional_repos = var.additional_repos 10 | additional_repos_only = var.additional_repos_only 11 | additional_packages = var.additional_packages 12 | gpg_keys = var.gpg_keys 13 | swap_file_size = var.swap_file_size 14 | ssh_key_path = var.ssh_key_path 15 | ipv6 = var.ipv6 16 | connect_to_base_network = true 17 | connect_to_additional_network = true 18 | roles = ["build_host"] 19 | disable_firewall = var.disable_firewall 20 | product_version = var.product_version 21 | grains = { 22 | mirror = var.base_configuration["mirror"] 23 | server = var.server_configuration["hostname"] 24 | auto_connect_to_master = var.auto_connect_to_master 25 | avahi_reflector = var.avahi_reflector 26 | sles_registration_code = var.sles_registration_code 27 | } 28 | 29 | image = var.image 30 | provider_settings = var.provider_settings 31 | } 32 | 33 | output "configuration" { 34 | value = module.build_host.configuration 35 | } 36 | -------------------------------------------------------------------------------- /modules/build_host/versions.tf: -------------------------------------------------------------------------------- 1 | ../backend/host/versions.tf -------------------------------------------------------------------------------- /modules/client/main.tf: -------------------------------------------------------------------------------- 1 | module "client" { 2 | source = "../host" 3 | 4 | base_configuration = var.base_configuration 5 | name = var.name 6 | quantity = var.quantity 7 | use_os_released_updates = var.use_os_released_updates 8 | install_salt_bundle = var.install_salt_bundle 9 | additional_repos = var.additional_repos 10 | additional_repos_only = var.additional_repos_only 11 | additional_packages = var.additional_packages 12 | gpg_keys = var.gpg_keys 13 | swap_file_size = var.swap_file_size 14 | ssh_key_path = var.ssh_key_path 15 | ipv6 = var.ipv6 16 | connect_to_base_network = true 17 | connect_to_additional_network = false 18 | roles = ["client"] 19 | disable_firewall = var.disable_firewall 20 | product_version = var.product_version 21 | grains = { 22 | mirror = var.base_configuration["mirror"] 23 | server = var.server_configuration["hostname"] 24 | auto_register = var.auto_register 25 | sles_registration_code = var.sles_registration_code 26 | } 27 | 28 | image = var.image 29 | provider_settings = var.provider_settings 30 | } 31 | 32 | output "configuration" { 33 | value = module.client.configuration 34 | } 35 | 36 | -------------------------------------------------------------------------------- /modules/client/versions.tf: -------------------------------------------------------------------------------- 1 | ../backend/host/versions.tf -------------------------------------------------------------------------------- /modules/controller/versions.tf: -------------------------------------------------------------------------------- 1 | ../backend/host/versions.tf -------------------------------------------------------------------------------- /modules/cucumber_testsuite/versions.tf: -------------------------------------------------------------------------------- 1 | ../backend/host/versions.tf -------------------------------------------------------------------------------- /modules/dhcp_dns/README.md: -------------------------------------------------------------------------------- 1 | This module creates a VM that hosts a DHCP and a DNS server. 2 | 3 | It is meant to be used on the private network, together with the proxy 4 | (5.0 and onwards), the PXE boot minions, and the hypervisor. 5 | 6 | Since it is on the private network, the DHCP and DNS server has no access 7 | to repositories, nor the Internet in general. This is why it is prepared 8 | directly from the hypervisor. As a consequence, the jenkins workers must 9 | drop the public SSH key of their "jenkins" user into the authorized hosts 10 | file of the hypervisor. Or, if you are testing yourself, drop your own key. 11 | -------------------------------------------------------------------------------- /modules/dhcp_dns/variables.tf: -------------------------------------------------------------------------------- 1 | variable "base_configuration" { 2 | description = "use module.base.configuration, see the main.tf example file" 3 | } 4 | 5 | variable "name" { 6 | description = "hostname, without the domain part" 7 | type = string 8 | } 9 | 10 | variable "quantity" { 11 | description = "number of hosts like this one" 12 | default = 1 13 | } 14 | 15 | variable "private_hosts" { 16 | description = "configuration of the various hosts in the private network" 17 | type = list(object({ 18 | private_mac = string 19 | private_ip = number 20 | private_name = string 21 | })) 22 | } 23 | 24 | variable "image" { 25 | description = "an image name, e.g. sles12sp4 or opensuse155o" 26 | type = string 27 | default = "opensuse155o" 28 | } 29 | 30 | variable "hypervisor" { 31 | description = "the hypervisor where the DHCP and DNS VM runs" 32 | type = object({ 33 | host = string 34 | user = string 35 | private_key = string 36 | }) 37 | default = null 38 | } 39 | -------------------------------------------------------------------------------- /modules/dhcp_dns/versions.tf: -------------------------------------------------------------------------------- 1 | ../backend/host/versions.tf -------------------------------------------------------------------------------- /modules/grafana/main.tf: -------------------------------------------------------------------------------- 1 | module "grafana" { 2 | source = "../host" 3 | 4 | base_configuration = var.base_configuration 5 | name = var.name 6 | quantity = var.quantity 7 | ssh_key_path = var.ssh_key_path 8 | roles = ["grafana"] 9 | grains = { 10 | mirror = var.base_configuration["mirror"] 11 | server = var.server_configuration["hostname"] 12 | locust = var.locust_configuration["hostname"] 13 | product_version = "4.3-nightly" 14 | } 15 | 16 | image = "sles15sp4o" 17 | provider_settings = var.provider_settings 18 | } 19 | 20 | output "configuration" { 21 | value = module.grafana.configuration 22 | } 23 | 24 | -------------------------------------------------------------------------------- /modules/grafana/variables.tf: -------------------------------------------------------------------------------- 1 | variable "base_configuration" { 2 | description = "use module.base.configuration, see the main.tf example file" 3 | } 4 | 5 | variable "name" { 6 | description = "hostname, without the domain part" 7 | default = "grafana" 8 | } 9 | 10 | variable "quantity" { 11 | description = "number of hosts like this one" 12 | default = 1 13 | } 14 | 15 | variable "server_configuration" { 16 | description = "use module..configuration, see the main.tf example file" 17 | } 18 | 19 | variable "locust_configuration" { 20 | description = "use module..configuration, see README_ADVANCED.md" 21 | default = { 22 | "hostname" = "none" 23 | } 24 | } 25 | 26 | variable "ssh_key_path" { 27 | description = "path of additional pub ssh key you want to use to access VMs, see README_ADVANCED.md" 28 | default = null 29 | } 30 | 31 | variable "provider_settings" { 32 | description = "Map of provider-specific settings, see the backend-specific README file" 33 | default = {} 34 | } 35 | -------------------------------------------------------------------------------- /modules/grafana/versions.tf: -------------------------------------------------------------------------------- 1 | ../backend/host/versions.tf -------------------------------------------------------------------------------- /modules/host/main.tf: -------------------------------------------------------------------------------- 1 | 2 | module "host" { 3 | source = "../backend/host" 4 | 5 | base_configuration = var.base_configuration 6 | name = var.name 7 | roles = var.roles 8 | use_os_released_updates = var.use_os_released_updates 9 | install_salt_bundle = var.install_salt_bundle 10 | additional_repos = var.additional_repos 11 | additional_repos_only = var.additional_repos_only 12 | additional_certs = var.additional_certs 13 | additional_packages = var.additional_packages 14 | quantity = var.quantity 15 | swap_file_size = var.swap_file_size 16 | ssh_key_path = var.ssh_key_path 17 | gpg_keys = var.gpg_keys 18 | ipv6 = var.ipv6 19 | connect_to_base_network = var.connect_to_base_network 20 | connect_to_additional_network = var.connect_to_additional_network 21 | image = var.image 22 | provision = var.provision 23 | provider_settings = var.provider_settings 24 | main_disk_size = var.main_disk_size 25 | additional_disk_size = var.additional_disk_size 26 | second_additional_disk_size = var.second_additional_disk_size 27 | volume_provider_settings = var.volume_provider_settings 28 | product_version = var.product_version 29 | 30 | grains = merge({ disable_firewall = var.disable_firewall }, 31 | var.grains) 32 | } 33 | 34 | output "configuration" { 35 | value = merge( { private_macs = [] }, module.host.configuration) 36 | } 37 | -------------------------------------------------------------------------------- /modules/host/variables.tf: -------------------------------------------------------------------------------- 1 | ../../backend_modules/null/host/variables.tf -------------------------------------------------------------------------------- /modules/host/versions.tf: -------------------------------------------------------------------------------- 1 | ../backend/host/versions.tf -------------------------------------------------------------------------------- /modules/jenkins/main.tf: -------------------------------------------------------------------------------- 1 | 2 | module "jenkins" { 3 | source = "../host" 4 | 5 | base_configuration = var.base_configuration 6 | name = "jenkins" 7 | use_os_released_updates = var.use_os_released_updates 8 | additional_repos = var.additional_repos 9 | additional_repos_only = var.additional_repos_only 10 | additional_packages = var.additional_packages 11 | swap_file_size = var.swap_file_size 12 | ssh_key_path = var.ssh_key_path 13 | roles = ["jenkins"] 14 | 15 | grains = { 16 | mirror = var.base_configuration["mirror"] 17 | data_disk_fstype = var.data_disk_fstype 18 | } 19 | 20 | image = var.image 21 | 22 | provider_settings = var.provider_settings 23 | additional_disk_size = var.data_disk_size 24 | volume_provider_settings = var.volume_provider_settings 25 | } 26 | 27 | output "configuration" { 28 | value = module.jenkins.configuration 29 | } 30 | -------------------------------------------------------------------------------- /modules/jenkins/variables.tf: -------------------------------------------------------------------------------- 1 | variable "base_configuration" { 2 | description = "use module.base.configuration, see the main.tf example file" 3 | } 4 | 5 | variable "use_os_released_updates" { 6 | description = "Apply all updates from SUSE Linux Enterprise repos" 7 | default = true 8 | } 9 | 10 | variable "additional_repos" { 11 | description = "extra repositories in the form {label = url}, see README_ADVANCED.md" 12 | default = {} 13 | } 14 | 15 | variable "additional_repos_only" { 16 | description = "whether to exclusively use additional repos" 17 | default = false 18 | } 19 | 20 | variable "additional_packages" { 21 | description = "extra packages to install, see README_ADVANCED.md" 22 | default = [] 23 | } 24 | 25 | variable "swap_file_size" { 26 | description = "Swap file size in MiB, or 0 for none" 27 | default = 0 28 | } 29 | 30 | variable "ssh_key_path" { 31 | description = "path of additional pub ssh key you want to use to access VMs, see README_ADVANCED.md" 32 | default = null 33 | } 34 | 35 | variable "provider_settings" { 36 | description = "Map of provider-specific settings, see the backend-specific README file" 37 | default = {} 38 | } 39 | 40 | variable "data_disk_size" { 41 | description = "Size of an aditional disk for the /var/lib/jenkins partition, defined in GiB" 42 | default = 1024 43 | } 44 | 45 | variable "data_disk_fstype" { 46 | description = "Data disk file system type" 47 | default = "ext4" 48 | } 49 | 50 | variable "volume_provider_settings" { 51 | description = "Map of volume-provider-specific settings, see the backend-specific README file" 52 | default = {} 53 | } 54 | 55 | variable "image" { 56 | description = "An image name, e.g. sles12sp4 or opensuse155o" 57 | type = string 58 | default = "opensuse155o" 59 | } 60 | -------------------------------------------------------------------------------- /modules/jenkins/versions.tf: -------------------------------------------------------------------------------- 1 | ../backend/host/versions.tf -------------------------------------------------------------------------------- /modules/locust/main.tf: -------------------------------------------------------------------------------- 1 | module "locust" { 2 | source = "../host" 3 | base_configuration = var.base_configuration 4 | name = var.name 5 | ssh_key_path = var.ssh_key_path 6 | roles = ["locust"] 7 | grains = { 8 | mirror = var.base_configuration["mirror"] 9 | server = var.server_configuration["hostname"] 10 | locust_file = base64encode(file(var.locust_file)) 11 | server_username = var.server_configuration["username"] 12 | server_password = var.server_configuration["password"] 13 | locust_master_host = null 14 | locust_slave_count = var.slave_quantity 15 | } 16 | 17 | image = "opensuse154" 18 | provider_settings = var.provider_settings 19 | } 20 | 21 | module "locust-slave" { 22 | source = "../host" 23 | base_configuration = var.base_configuration 24 | name = "${var.name}-slave" 25 | quantity = var.slave_quantity 26 | ssh_key_path = var.ssh_key_path 27 | roles = ["locust"] 28 | grains = { 29 | mirror = var.base_configuration["mirror"] 30 | server = var.server_configuration["hostname"] 31 | locust_file = base64encode(file(var.locust_file)) 32 | server_username = var.server_configuration["username"] 33 | server_password = var.server_configuration["password"] 34 | locust_master_host = length(module.locust.configuration["hostnames"]) > 0 ? module.locust.configuration["hostnames"][0] : null 35 | } 36 | 37 | image = "opensuse154" 38 | provider_settings = var.provider_settings 39 | } 40 | 41 | output "configuration" { 42 | value = { 43 | id = length(module.locust.configuration["ids"]) > 0 ? module.locust.configuration["ids"][0] : null 44 | hostname = length(module.locust.configuration["hostnames"]) > 0 ? module.locust.configuration["hostnames"][0] : null 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /modules/locust/variables.tf: -------------------------------------------------------------------------------- 1 | variable "base_configuration" { 2 | description = "use module.base.configuration, see the main.tf example file" 3 | } 4 | 5 | variable "server_configuration" { 6 | description = "use module..configuration, see the main.tf example file" 7 | } 8 | 9 | variable "name" { 10 | description = "hostname, without the domain part" 11 | default = "locust" 12 | } 13 | 14 | variable "ssh_key_path" { 15 | description = "path of additional pub ssh key you want to use to access VMs, see README_ADVANCED.md" 16 | default = null 17 | } 18 | 19 | variable "locust_file" { 20 | description = "path to a locustfile.py" 21 | default = "salt/locust/locustfile.py" 22 | } 23 | 24 | variable "slave_quantity" { 25 | description = "number of Locust slaves, set to 0 to disable master-slave mode" 26 | default = 0 27 | } 28 | 29 | variable "provider_settings" { 30 | description = "Map of provider-specific settings, see the backend-specific README file" 31 | default = {} 32 | } 33 | -------------------------------------------------------------------------------- /modules/locust/versions.tf: -------------------------------------------------------------------------------- 1 | ../backend/host/versions.tf -------------------------------------------------------------------------------- /modules/minion/main.tf: -------------------------------------------------------------------------------- 1 | module "minion" { 2 | source = "../host" 3 | 4 | base_configuration = var.base_configuration 5 | name = var.name 6 | quantity = var.quantity 7 | use_os_released_updates = var.use_os_released_updates 8 | install_salt_bundle = var.install_salt_bundle 9 | additional_repos = var.additional_repos 10 | additional_repos_only = var.additional_repos_only 11 | additional_packages = var.additional_packages 12 | gpg_keys = var.gpg_keys 13 | swap_file_size = var.swap_file_size 14 | ssh_key_path = var.ssh_key_path 15 | ipv6 = var.ipv6 16 | connect_to_base_network = true 17 | connect_to_additional_network = false 18 | roles = var.roles 19 | disable_firewall = var.disable_firewall 20 | product_version = var.product_version 21 | grains = merge({ 22 | mirror = var.base_configuration["mirror"] 23 | server = var.auto_connect_to_master ? var.server_configuration["hostname"] : null 24 | auto_connect_to_master = var.auto_connect_to_master 25 | avahi_reflector = var.avahi_reflector 26 | susemanager = { 27 | activation_key : var.activation_key 28 | } 29 | evil_minion_count = var.evil_minion_count 30 | evil_minion_slowdown_factor = var.evil_minion_slowdown_factor 31 | sles_registration_code = var.sles_registration_code 32 | }, var.additional_grains) 33 | 34 | image = var.image 35 | provider_settings = var.provider_settings 36 | } 37 | 38 | output "configuration" { 39 | value = module.minion.configuration 40 | } 41 | -------------------------------------------------------------------------------- /modules/minion/versions.tf: -------------------------------------------------------------------------------- 1 | ../backend/host/versions.tf -------------------------------------------------------------------------------- /modules/mirror/main.tf: -------------------------------------------------------------------------------- 1 | 2 | module "mirror" { 3 | source = "../host" 4 | 5 | base_configuration = var.base_configuration 6 | name = "mirror" 7 | use_os_released_updates = var.use_os_released_updates 8 | install_salt_bundle = var.install_salt_bundle 9 | additional_repos = var.additional_repos 10 | additional_repos_only = var.additional_repos_only 11 | additional_packages = var.additional_packages 12 | swap_file_size = var.swap_file_size 13 | ssh_key_path = var.ssh_key_path 14 | roles = ["mirror"] 15 | 16 | grains = { 17 | cc_username = var.base_configuration["cc_username"] 18 | cc_password = var.base_configuration["cc_password"] 19 | ubuntu_distros = var.ubuntu_distros 20 | use_mirror_images = var.base_configuration["use_mirror_images"] 21 | data_disk_fstype = var.data_disk_fstype 22 | customize_minima_file = var.customize_minima_file 23 | synchronize_immediately = var.synchronize_immediately 24 | disable_cron = var.disable_cron 25 | } 26 | 27 | image = var.image 28 | 29 | provider_settings = var.provider_settings 30 | additional_disk_size = var.repository_disk_size 31 | volume_provider_settings = var.volume_provider_settings 32 | } 33 | 34 | output "configuration" { 35 | value = module.mirror.configuration 36 | } 37 | -------------------------------------------------------------------------------- /modules/mirror/versions.tf: -------------------------------------------------------------------------------- 1 | ../backend/host/versions.tf -------------------------------------------------------------------------------- /modules/proxy/versions.tf: -------------------------------------------------------------------------------- 1 | ../backend/host/versions.tf -------------------------------------------------------------------------------- /modules/proxy_containerized/versions.tf: -------------------------------------------------------------------------------- 1 | ../backend/host/versions.tf -------------------------------------------------------------------------------- /modules/pxe_boot/main.tf: -------------------------------------------------------------------------------- 1 | module "pxe_boot" { 2 | source = "../host" 3 | 4 | base_configuration = var.base_configuration 5 | name = var.name 6 | quantity = var.quantity 7 | image = var.image 8 | 9 | connect_to_base_network = false 10 | connect_to_additional_network = true 11 | roles = ["pxe_boot"] 12 | provision = false 13 | 14 | provider_settings = var.provider_settings 15 | } 16 | 17 | output "configuration" { 18 | value = { 19 | id = length(module.pxe_boot.configuration["ids"]) > 0 ? module.pxe_boot.configuration["ids"][0] : null 20 | hostname = length(module.pxe_boot.configuration["hostnames"]) > 0 ? module.pxe_boot.configuration["hostnames"][0] : null 21 | private_mac = length(module.pxe_boot.configuration["macaddrs"]) > 0 ? module.pxe_boot.configuration["macaddrs"][0] : null 22 | private_ip = var.private_ip 23 | private_name = var.private_name 24 | image = var.image 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /modules/pxe_boot/variables.tf: -------------------------------------------------------------------------------- 1 | variable "base_configuration" { 2 | description = "use module.base.configuration, see the main.tf example file" 3 | } 4 | 5 | variable "name" { 6 | description = "hostname, without the domain part" 7 | type = string 8 | } 9 | 10 | variable "quantity" { 11 | description = "number of hosts like this one" 12 | default = 1 13 | } 14 | 15 | variable "private_ip" { 16 | description = "last digit of IP address in private network" 17 | type = number 18 | } 19 | 20 | variable "private_name" { 21 | description = "hostname inside the private network" 22 | type = string 23 | } 24 | 25 | variable "image" { 26 | description = "an image name, e.g. sles12sp4 or opensuse155o" 27 | type = string 28 | } 29 | 30 | variable "provider_settings" { 31 | description = "settings specific to the provider" 32 | default = {} 33 | } 34 | -------------------------------------------------------------------------------- /modules/pxe_boot/versions.tf: -------------------------------------------------------------------------------- 1 | ../backend/host/versions.tf -------------------------------------------------------------------------------- /modules/rds/main.tf: -------------------------------------------------------------------------------- 1 | module "rds" { 2 | source = "../backend/db_host" 3 | 4 | base_configuration = var.base_configuration 5 | name = var.name 6 | connect_to_base_network = true 7 | connect_to_additional_network = false 8 | 9 | db_username = var.db_username 10 | db_password = var.db_password 11 | engine = var.engine 12 | engine_version = var.engine_version 13 | provider_settings = var.provider_settings 14 | 15 | publicly_accessible = var.publicly_accessible 16 | skip_final_snapshot = var.skip_final_snapshot 17 | } 18 | 19 | output "configuration" { 20 | value = { 21 | id = length(module.rds.configuration["ids"]) > 0 ? module.rds.configuration["ids"][0] : null 22 | hostname = length(module.rds.configuration["hostnames"]) > 0 ? module.rds.configuration["hostnames"][0] : null 23 | superuser = length(module.rds.configuration["username"]) > 0 ? module.rds.configuration["username"][0] : null 24 | superuser_password = var.db_password 25 | port = length(module.rds.configuration["port"]) > 0 ? module.rds.configuration["port"][0] : null 26 | certificate = var.db_certificate 27 | local = false 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /modules/rds/variables.tf: -------------------------------------------------------------------------------- 1 | variable "base_configuration" { 2 | description = "use module.base.configuration, see the main.tf example file" 3 | } 4 | 5 | variable "name" { 6 | description = "hostname, without the domain part" 7 | type = string 8 | default = "rds" 9 | } 10 | 11 | variable "quantity" { 12 | description = "number of hosts like this one" 13 | default = 1 14 | } 15 | 16 | variable "provider_settings" { 17 | description = "Map of provider-specific settings, see the modules/libvirt/README.md" 18 | default = {} 19 | } 20 | 21 | variable "publicly_accessible" { 22 | description = "true if you want the RDS to have a public address" 23 | type = bool 24 | default = false 25 | } 26 | 27 | variable "skip_final_snapshot" { 28 | description = "Determines whether a final DB snapshot is created before the DB instance is deleted. If true is specified, no DBSnapshot is created." 29 | type = bool 30 | default = true 31 | } 32 | 33 | variable "engine" { 34 | description = "RDS engine, by default postgres " 35 | default = "postgres" 36 | } 37 | 38 | variable "engine_version" { 39 | description = "RDS engine version" 40 | default = "14.3" 41 | } 42 | 43 | 44 | variable "db_username" { 45 | description = "RDS root user name" 46 | default = "postgres" 47 | } 48 | 49 | variable "db_password" { 50 | description = "RDS root user password" 51 | sensitive = true 52 | default = "spacewalk" 53 | } 54 | 55 | variable "db_certificate" { 56 | description = "Certificate needed to connect to remote AWS database. https://lightsail.aws.amazon.com/ls/docs/en_us/articles/amazon-lightsail-download-ssl-certificate-for-managed-database " 57 | default = "/root/aws.crt" 58 | } 59 | -------------------------------------------------------------------------------- /modules/rds/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "1.0.10" 3 | required_providers { 4 | aws = { 5 | source = "hashicorp/aws" 6 | version = "~> 3.0" 7 | } 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /modules/registry/main.tf: -------------------------------------------------------------------------------- 1 | module "registry" { 2 | source = "../host" 3 | 4 | base_configuration = var.base_configuration 5 | name = var.name 6 | quantity = var.quantity 7 | use_os_released_updates = var.use_os_released_updates 8 | additional_repos = var.additional_repos 9 | additional_repos_only = var.additional_repos_only 10 | additional_packages = var.additional_packages 11 | swap_file_size = var.swap_file_size 12 | ssh_key_path = var.ssh_key_path 13 | gpg_keys = var.gpg_keys 14 | ipv6 = var.ipv6 15 | connect_to_base_network = var.connect_to_base_network 16 | connect_to_additional_network = var.connect_to_additional_network 17 | roles = ["registry"] 18 | disable_firewall = var.disable_firewall 19 | grains = { 20 | mirror = var.base_configuration["mirror"] 21 | } 22 | 23 | image = "opensuse155o" 24 | provider_settings = var.provider_settings 25 | } 26 | 27 | output "configuration" { 28 | value = { 29 | id = length(module.registry.configuration["ids"]) > 0 ? module.registry.configuration["ids"][0] : null 30 | hostname = length(module.registry.configuration["hostnames"]) > 0 ? module.registry.configuration["hostnames"][0] : null 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /modules/registry/versions.tf: -------------------------------------------------------------------------------- 1 | ../backend/host/versions.tf -------------------------------------------------------------------------------- /modules/salt_testenv/main.tf: -------------------------------------------------------------------------------- 1 | module "salt_testenv" { 2 | source = "../host" 3 | 4 | base_configuration = var.base_configuration 5 | use_os_released_updates = var.use_os_released_updates 6 | name = var.name 7 | additional_repos = var.additional_repos 8 | additional_repos_only = var.additional_repos_only 9 | additional_packages = var.additional_packages 10 | swap_file_size = var.swap_file_size 11 | ssh_key_path = var.ssh_key_path 12 | image = var.image 13 | provider_settings = var.provider_settings 14 | install_salt_bundle = var.install_salt_bundle 15 | 16 | grains = { 17 | mirror = var.base_configuration["mirror"] 18 | salt_obs_flavor = var.salt_obs_flavor 19 | } 20 | 21 | roles = ["salt_testenv"] 22 | } 23 | 24 | output "configuration" { 25 | value = module.salt_testenv.configuration 26 | } 27 | -------------------------------------------------------------------------------- /modules/salt_testenv/variables.tf: -------------------------------------------------------------------------------- 1 | variable "base_configuration" { 2 | description = "use module.base.configuration, see the main.tf example file" 3 | } 4 | 5 | variable "name" { 6 | description = "hostname, without the domain part" 7 | type = string 8 | } 9 | 10 | variable "use_os_released_updates" { 11 | description = "Apply all updates from SUSE Linux Enterprise repos" 12 | default = false 13 | } 14 | 15 | variable "salt_obs_flavor" { 16 | description = "One of: saltstack, saltstack:products, saltstack:products:testing or saltstack:products:next" 17 | type = string 18 | default = "saltstack:products:testing" 19 | } 20 | 21 | variable "additional_repos" { 22 | description = "extra repositories in the form {label = url}, see README_ADVANCED.md" 23 | default = {} 24 | } 25 | 26 | variable "additional_repos_only" { 27 | description = "whether to exclusively use additional repos" 28 | default = false 29 | } 30 | 31 | variable "additional_packages" { 32 | description = "extra packages to install, see README_ADVANCED.md" 33 | default = [] 34 | } 35 | 36 | variable "swap_file_size" { 37 | description = "Swap file size in MiB, or 0 for none" 38 | default = 0 39 | } 40 | 41 | variable "ssh_key_path" { 42 | description = "path of additional pub ssh key you want to use to access VMs, see README_ADVANCED.md" 43 | default = null 44 | } 45 | 46 | variable "provider_settings" { 47 | description = "Map of provider-specific settings, see the backend-specific README file" 48 | default = {} 49 | } 50 | 51 | variable "image" { 52 | description = "An image name, e.g. sles12sp4 or opensuse155o" 53 | type = string 54 | default = "opensuse155o" 55 | } 56 | 57 | variable "install_salt_bundle" { 58 | description = "use true to install the venv-salt-minion package in the hosts" 59 | default = true 60 | } 61 | -------------------------------------------------------------------------------- /modules/salt_testenv/versions.tf: -------------------------------------------------------------------------------- 1 | ../backend/host/versions.tf -------------------------------------------------------------------------------- /modules/server/versions.tf: -------------------------------------------------------------------------------- 1 | ../backend/host/versions.tf -------------------------------------------------------------------------------- /modules/server_containerized/versions.tf: -------------------------------------------------------------------------------- 1 | ../backend/host/versions.tf -------------------------------------------------------------------------------- /modules/sshminion/main.tf: -------------------------------------------------------------------------------- 1 | module "sshminion" { 2 | source = "../host" 3 | 4 | base_configuration = var.base_configuration 5 | name = var.name 6 | quantity = var.quantity 7 | use_os_released_updates = var.use_os_released_updates 8 | install_salt_bundle = var.install_salt_bundle 9 | additional_repos = var.additional_repos 10 | additional_repos_only = var.additional_repos_only 11 | additional_packages = var.additional_packages 12 | gpg_keys = var.gpg_keys 13 | swap_file_size = var.swap_file_size 14 | ssh_key_path = var.ssh_key_path 15 | ipv6 = var.ipv6 16 | connect_to_base_network = true 17 | connect_to_additional_network = false 18 | roles = ["sshminion"] 19 | disable_firewall = var.disable_firewall 20 | product_version = var.product_version 21 | grains = { 22 | mirror = var.base_configuration["mirror"] 23 | sles_registration_code = var.sles_registration_code 24 | } 25 | 26 | 27 | image = var.image 28 | provider_settings = var.provider_settings 29 | } 30 | 31 | output "configuration" { 32 | value = module.sshminion.configuration 33 | } 34 | 35 | -------------------------------------------------------------------------------- /modules/sshminion/versions.tf: -------------------------------------------------------------------------------- 1 | ../backend/host/versions.tf -------------------------------------------------------------------------------- /modules/virthost/main.tf: -------------------------------------------------------------------------------- 1 | module "virthost" { 2 | source = "../minion" 3 | 4 | base_configuration = var.base_configuration 5 | name = var.name 6 | product_version = var.product_version 7 | server_configuration = var.server_configuration 8 | activation_key = var.activation_key 9 | auto_connect_to_master = var.auto_connect_to_master 10 | use_os_released_updates = var.use_os_released_updates 11 | install_salt_bundle = var.install_salt_bundle 12 | additional_repos = var.additional_repos 13 | additional_repos_only = var.additional_repos_only 14 | additional_packages = var.additional_packages 15 | quantity = var.quantity 16 | gpg_keys = var.gpg_keys 17 | ssh_key_path = var.ssh_key_path 18 | ipv6 = var.ipv6 19 | roles = ["minion", "virthost"] 20 | additional_grains = merge({ 21 | hvm_disk_image = var.hvm_disk_image 22 | sles_registration_code = var.sles_registration_code 23 | },var.additional_grains) 24 | 25 | image = var.image 26 | provider_settings =var.provider_settings 27 | } 28 | 29 | output "configuration" { 30 | value = module.virthost.configuration 31 | } 32 | -------------------------------------------------------------------------------- /modules/virthost/versions.tf: -------------------------------------------------------------------------------- 1 | ../backend/host/versions.tf -------------------------------------------------------------------------------- /salt/build_host/certs/ca.cert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIGDDCCA/SgAwIBAgIJAKfUYNnZ1+8TMA0GCSqGSIb3DQEBCwUAMIGSMQswCQYD 3 | VQQGEwJERTEPMA0GA1UECAwGQmF5ZXJuMRIwEAYDVQQHDAlOdXJlbWJlcmcxDTAL 4 | BgNVBAoMBFNVU0UxFTATBgNVBAsMDFNVU0UgTWFuYWdlcjETMBEGA1UEAwwKTWFu 5 | YWdlciBDQTEjMCEGCSqGSIb3DQEJARYUZ2FsYXh5LWRldmVsQHN1c2UuZGUwHhcN 6 | MTYwNTA5MDg1NzAwWhcNMzYwNTA0MDg1NzAwWjCBkjELMAkGA1UEBhMCREUxDzAN 7 | BgNVBAgMBkJheWVybjESMBAGA1UEBwwJTnVyZW1iZXJnMQ0wCwYDVQQKDARTVVNF 8 | MRUwEwYDVQQLDAxTVVNFIE1hbmFnZXIxEzARBgNVBAMMCk1hbmFnZXIgQ0ExIzAh 9 | BgkqhkiG9w0BCQEWFGdhbGF4eS1kZXZlbEBzdXNlLmRlMIICIjANBgkqhkiG9w0B 10 | AQEFAAOCAg8AMIICCgKCAgEAwDv9eWqMWHav4yozUfZS52elPBJg9k3X/NNv/O4N 11 | sOoISEOLVryPUEYF7JNQsE+GeXRMWCpKft3wf43zrzm2lBMgpNQSQI0OVriKmp6I 12 | qGoBLTwI2A6gCNAmh8VNFrDeKUEpTT9ZZDT9Mo3qa1jSTnbNgDqCua4fDlJ83Gm/ 13 | WaLseT6QMK2gkfYZ5HJgzax0eNsM0qLmcZ5HwzJLE1jojwIJM1EOCqz+tNtagXYH 14 | MjI9c/NCHNBYGYShegf7ZZ2SAe1pZLr8lpF+vpmP7Z38bdFaeOyZimj8UUkGEwn9 15 | K3r3msNpcqTogzbgmKvjaTU0572cHoQEKA1cq1qQIeXN10+B9iW8HREnaDcbyQGN 16 | 2NiR0SKTDOp8hw25xDb9hPpztZaL73e2lttkMgSu057ubCTiKuRDlXAmxdbdJPXB 17 | LRdQ0qu72WNaNZ3e9cH4Lf6LSL51PREdr0iWtZDFq/XvTFzdyTxDpmZwLKg3TbNZ 18 | sbkveWOThB4ZzrbmXt1lD8NVCZglNjYOB3UA38ShYZoYuSDKoC90sFQR7DsF1iLi 19 | smPduDZ/dulGCIHQjKI+sEu8ckk7QEvxBVfUJ7xVC6scgTCrhVJ9FUQ+jeuTQ618 20 | XXlZV2Ol2dM5mK4dh+pvAYPSKq1IxrQYVWdc6X6NkFK1KO2wDWqgidSTU32/+I8m 21 | F+UCAwEAAaNjMGEwHQYDVR0OBBYEFH0s8wjITprBH+M6KonXjTrz8H3jMB8GA1Ud 22 | IwQYMBaAFH0s8wjITprBH+M6KonXjTrz8H3jMA8GA1UdEwEB/wQFMAMBAf8wDgYD 23 | VR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQA7vcA4BOhRBwTlIATnUkko 24 | 7j9IQl4sp1+MHXYJEtzfpLUiW9lP4oLT5unLIhTmPHyMW65TZVIrjt0KuABz7/u6 25 | EgCVHceN33BFAsVqf9SUuxxTuM8FTbbp3eM7GO37T5n2KzFgXjI0degZwERn2zX8 26 | /hFNhyAEWTXSGeyoWSTDR4sAQVxEPkjw8a7v2TVWYKvVLxjEPfjtLb+fLFCuiEPE 27 | Gxw8Oza9x4MNrAG5Un28KPzc2cHxabW11Mv2H/OZwyw2MKLktb/LXFNx/B7WU1qT 28 | Yg8Y8+jq/WdSSkll+lTGxk2lRMkt5rIGmCQlrj5yQfkRKWAFhU02QEczll2UdJW8 29 | 0VJKA/vCsXz297ZA3utBEK98jquFPry8KGa+BLrOuj8pQTuEuhT5cm7iJxKTxSoH 30 | dcTms/UIKjLle7ND7u3Scn/sfUz/suS8lzuvfjKKYu/ojxpFDtPER7N0YUX56PZW 31 | /XVwk1ljiKvwPn71nhbwr5xnIVzYA9glmMSimMQqH3M+cgQM/KfCfBLEesjqM8Xg 32 | GZSFizAvtHP77Lmz70wzRMPLz+blf7RtoOm9YiVDWFoLQ0SyQQ9c/83dzuEdqj+7 33 | 7v7Horw5JMn1RvGqNOCIR//zk3AkQgTi/AbuYE9LflN+7iuqO6k3beycRcwqca6w 34 | IOipUNqWJ/CanvPJl4OITg== 35 | -----END CERTIFICATE----- 36 | -------------------------------------------------------------------------------- /salt/build_host/init.sls: -------------------------------------------------------------------------------- 1 | include: 2 | - scc.build_host 3 | 4 | certificate_authority_certificate: 5 | file.managed: 6 | - name: /etc/pki/trust/anchors/ca.cert.pem 7 | - source: salt://build_host/certs/ca.cert.pem 8 | - makedirs: True 9 | 10 | ssh_private_key: 11 | file.managed: 12 | - name: /root/.ssh/id_ed25519 13 | - source: salt://build_host/keys/id_ed25519 14 | - makedirs: True 15 | - user: root 16 | - group: root 17 | - mode: 600 18 | 19 | ssh_public_key: 20 | file.managed: 21 | - name: /root/.ssh/id_ed25519.pub 22 | - source: salt://build_host/keys/id_ed25519.pub 23 | - makedirs: True 24 | - user: root 25 | - group: root 26 | - mode: 600 27 | 28 | {% if '11' in grains['osrelease'] %} 29 | 30 | update_ca_truststore_registry_build_host: 31 | cmd.run: 32 | - name: cp /etc/pki/trust/anchors/ca.cert.pem /etc/ssl/certs/ && /usr/bin/c_rehash /etc/ssl/certs 33 | - onchanges: 34 | - file: certificate_authority_certificate 35 | 36 | {% elif '12' in grains['osrelease'] %} 37 | 38 | update_ca_truststore_registry_build_host: 39 | cmd.run: 40 | - name: /usr/sbin/update-ca-certificates 41 | - onchanges: 42 | - file: certificate_authority_certificate 43 | 44 | {% elif '15' in grains['osrelease'] %} 45 | 46 | {# Do not run update-ca-certificates on SLE 15 because there is #} 47 | {# already a systemd unit that watches for changes and runs it: #} 48 | {# /usr/lib/systemd/system/ca-certificates.path #} 49 | 50 | {% if "opensuse" not in grains['oscodename']|lower %} 51 | 52 | cloud_flavor_check: 53 | pkg.installed: 54 | - name: python-instance-billing-flavor-check 55 | 56 | {% endif %} 57 | {% endif %} 58 | -------------------------------------------------------------------------------- /salt/build_host/keys/id_ed25519: -------------------------------------------------------------------------------- 1 | -----BEGIN OPENSSH PRIVATE KEY----- 2 | b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW 3 | QyNTUxOQAAACDu94k1jgqMKGdEz6auq26GhRWOL/VeatflehsdmfbsqAAAAJjw8SGP8PEh 4 | jwAAAAtzc2gtZWQyNTUxOQAAACDu94k1jgqMKGdEz6auq26GhRWOL/VeatflehsdmfbsqA 5 | AAAEBoK9M7WXJDJgujoo93gNqn8oIJi2SnKmCUvMalpM8i9u73iTWOCowoZ0TPpq6rboaF 6 | FY4v9V5q1+V6Gx2Z9uyoAAAAD3Jvb3RAYnVpbGRfaG9zdAECAwQFBg== 7 | -----END OPENSSH PRIVATE KEY----- 8 | -------------------------------------------------------------------------------- /salt/build_host/keys/id_ed25519.pub: -------------------------------------------------------------------------------- 1 | ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIO73iTWOCowoZ0TPpq6rboaFFY4v9V5q1+V6Gx2Z9uyo root@build_host 2 | -------------------------------------------------------------------------------- /salt/client/init.sls: -------------------------------------------------------------------------------- 1 | include: 2 | - scc.client 3 | - client.testsuite 4 | 5 | wget: 6 | pkg.installed: 7 | - require: 8 | - sls: default 9 | 10 | {% if grains.get('auto_register') %} 11 | 12 | base_bootstrap_script: 13 | file.managed: 14 | - name: /root/bootstrap.sh 15 | - source: http://{{grains['server']}}/pub/bootstrap/bootstrap.sh 16 | - source_hash: http://{{grains['server']}}/pub/bootstrap/bootstrap.sh.sha512 17 | - mode: 755 18 | 19 | bootstrap_script: 20 | file.replace: 21 | - name: /root/bootstrap.sh 22 | - pattern: ^PROFILENAME="".*$ 23 | {% if grains['hostname'] and grains['domain'] %} 24 | - repl: PROFILENAME="{{ grains['hostname'] }}.{{ grains['domain'] }}" 25 | {% else %} 26 | - repl: PROFILENAME="{{grains['fqdn']}}" 27 | {% endif %} 28 | - require: 29 | - file: base_bootstrap_script 30 | cmd.run: 31 | - name: /root/bootstrap.sh 32 | - require: 33 | - file: bootstrap_script 34 | - pkg: wget 35 | 36 | {% endif %} 37 | -------------------------------------------------------------------------------- /salt/client/testsuite.sls: -------------------------------------------------------------------------------- 1 | {% if grains.get('testsuite') | default(false, true) %} 2 | 3 | include: 4 | - repos 5 | - client 6 | 7 | client_cucumber_requisites: 8 | pkg.installed: 9 | - pkgs: 10 | - spacewalk-client-setup 11 | - spacewalk-check 12 | - mgr-cfg-actions 13 | # Debian based systems don't come with curl installed, the test suite handles it with wget instead 14 | {% if grains['os_family'] == 'Debian' %} 15 | - wget 16 | {% endif %} 17 | - require: 18 | - sls: default 19 | 20 | {% if grains['os'] == 'SUSE' and '12' in grains['osrelease'] %} 21 | 22 | suse_client_cucumber_requisites: 23 | pkg.installed: 24 | - pkgs: 25 | - aaa_base-extras 26 | - require: 27 | - sls: repos 28 | 29 | {% endif %} 30 | 31 | {% endif %} 32 | -------------------------------------------------------------------------------- /salt/controller/http_testsuite.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Testsuite HTTP Service 3 | After=network.target 4 | 5 | [Service] 6 | Type=simple 7 | User=root 8 | WorkingDirectory=/root/spacewalk/testsuite 9 | ExecStart=/usr/bin/python3 -m http.server 80 10 | Restart=on-abort 11 | 12 | [Install] 13 | WantedBy=multi-user.target 14 | -------------------------------------------------------------------------------- /salt/controller/id_ed25519: -------------------------------------------------------------------------------- 1 | -----BEGIN OPENSSH PRIVATE KEY----- 2 | b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW 3 | QyNTUxOQAAACBhDwPFL2f+rU4i/jChlALBybj5Q6bnng5m7iftIIjgmAAAAJAqWvyhKlr8 4 | oQAAAAtzc2gtZWQyNTUxOQAAACBhDwPFL2f+rU4i/jChlALBybj5Q6bnng5m7iftIIjgmA 5 | AAAEAbj/tZzeu2JuFmYIwVkjOvK+mdUFlnsGK0AluVz9V3A2EPA8UvZ/6tTiL+MKGUAsHJ 6 | uPlDpueeDmbuJ+0giOCYAAAAC3Bhb2xvQGxpbnV4AQI= 7 | -----END OPENSSH PRIVATE KEY----- 8 | -------------------------------------------------------------------------------- /salt/controller/id_ed25519.pub: -------------------------------------------------------------------------------- 1 | ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGEPA8UvZ/6tTiL+MKGUAsHJuPlDpueeDmbuJ+0giOCY root@controller 2 | -------------------------------------------------------------------------------- /salt/controller/run-testsuite: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | cd /root/spacewalk/testsuite 4 | 5 | ABORT_ON_ERROR=1 6 | if [ "$1" = "-d" -o "$1" = "--dont-fail" ]; then 7 | ABORT_ON_ERROR=0 8 | shift 9 | fi 10 | RESULT=0 11 | 12 | stage() { 13 | rake "cucumber:$1" 14 | if [ $? -ne 0 ]; then 15 | [ $ABORT_ON_ERROR -ne 0 ] && exit -1 16 | RESULT=1 17 | fi 18 | } 19 | 20 | parallel_stage() { 21 | rake "parallel:$1" 22 | if [ $? -ne 0 ]; then 23 | [ $ABORT_ON_ERROR -ne 0 ] && exit -1 24 | RESULT=1 25 | fi 26 | } 27 | 28 | # normal run 29 | if [ -z "$1" ]; then 30 | stage sanity_check 31 | stage core 32 | stage reposync 33 | stage proxy 34 | stage init_clients 35 | stage secondary 36 | stage secondary_parallelizable 37 | stage finishing 38 | fi 39 | 40 | # parallel run 41 | if [ "$1" = "parallel" ]; then 42 | parallel_stage sanity_check 43 | stage core 44 | stage reposync 45 | stage proxy 46 | parallel_stage init_clients 47 | stage secondary 48 | parallel_stage secondary_parallelizable 49 | stage finishing 50 | fi 51 | 52 | # essential features 53 | if [ "$1" = "essential" ]; then 54 | stage sanity_check 55 | stage core 56 | stage reposync 57 | stage proxy 58 | stage init_clients 59 | fi 60 | 61 | # prepare reference host 62 | if [ "$1" = "refhost" ]; then 63 | stage refhost 64 | fi 65 | 66 | # virtualization tests 67 | if [ "$1" = "virtualization" ]; then 68 | stage virtualization 69 | fi 70 | 71 | # SLE updates tests 72 | if [ "$1" = "sle-updates" ]; then 73 | stage sle-updates 74 | fi 75 | 76 | # QAM 77 | if [ "$1" = "qam" ]; then 78 | stage qam_add_custom_repositories 79 | stage qam_add_activation_keys 80 | stage qam_init_proxy 81 | stage qam_init_clients 82 | stage qam_smoke_tests 83 | stage qam_finishing 84 | fi 85 | 86 | if [ "$1" = "qam-parallel" ]; then 87 | parallel_stage qam_add_custom_repositories 88 | parallel_stage qam_add_activation_keys 89 | stage qam_init_proxy 90 | parallel_stage qam_init_clients 91 | parallel_stage qam_smoke_tests 92 | stage qam_finishing 93 | fi 94 | 95 | exit $RESULT 96 | -------------------------------------------------------------------------------- /salt/controller/virtualhostmanager.create.json: -------------------------------------------------------------------------------- 1 | { 2 | "hostname" : "10.162.186.115", 3 | "port" : "443", 4 | "username" : "root", 5 | "password" : "password" 6 | } 7 | -------------------------------------------------------------------------------- /salt/default/chrony.conf: -------------------------------------------------------------------------------- 1 | pool 0.pool.ntp.org iburst 2 | 3 | driftfile /var/lib/chrony/drift 4 | 5 | makestep 1.0 3 6 | 7 | rtcsync 8 | 9 | logdir /var/log/chrony 10 | -------------------------------------------------------------------------------- /salt/default/firewall.sls: -------------------------------------------------------------------------------- 1 | 2 | {% if grains.get('disable_firewall') | default(true, true) %} 3 | 4 | disable_firewall: 5 | service.dead: 6 | {% if grains.get('osmajorrelease', None)|int() == 15 %} 7 | - name: firewalld 8 | {% else %} 9 | - name: SuSEfirewall2 10 | {% endif %} 11 | - enable: False 12 | {% endif %} 13 | -------------------------------------------------------------------------------- /salt/default/gpg_keys/galaxy.key: -------------------------------------------------------------------------------- 1 | -----BEGIN PGP PUBLIC KEY BLOCK----- 2 | Version: GnuPG v2.0.15 (GNU/Linux) 3 | 4 | mQENBGLwyaYBCADNYZW2qfvrxVYK3A9ArHdCP2GuHo0tdZzvaVor3hEGZ1A6lwLR 5 | S+r+l+o1pDywjZDSSSpdrLD3DT7T//OBZT9e0s9cluqy18FoGuXymvlL7MoeENo6 6 | Q1nD379T0UBAnfF3TEIoTuFCWzQX2z668AK/bBQNLzAU2KylusGQlLjpraOlz+eO 7 | LHXCc2EXZMiknbHuhku48j6YrvDCARdeQUFl+bHMEtZi2FXTEpFacy/oUbXIr8YG 8 | /i6pBa9oDQr9R7lgU8rZtb4YfeX9jr3+1SehNogxwmh/QDpC13lqGqJ7rBNhW6p4 9 | vT7rPlUgASV4ab+O4LDqexg4U96YV8DvgZIJABEBAAG0NURldmVsOkdhbGF4eSBP 10 | QlMgUHJvamVjdCA8RGV2ZWw6R2FsYXh5QGJ1aWxkLnN1c2UuZGU+iQE+BBMBAgAo 11 | BQJi8MmmAhsDBQkEHrAABgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAAKCRDkXcGY 12 | beVjdt1DB/9V0sqsPV7xpeYB1aCQfXqiuX/L2d7ZWXrQJiMkH/DHSP+x96nFlMtf 13 | Z40nDui1a5uJJ21f8qUWXHSMwCn4xED0eaSzrZhkcrYuTIHQMQFFD8/gR7vJWVuA 14 | aKhpZEm0D+3GQnOzlpSW4t4nZfG4AFyB4hO2M8DQjiLV2XKsobnuodI5yXkfNSnw 15 | w3HwIqjMvSY0y8zKrJDKge1o+rNsm2XWKv0kjGxoCTnKnJEubJpUXJ/BATbnidho 16 | iy9Vukb+aDWQOwxiumjj1o/A9oExRkOfnQ/2e5Du4jdtApjdUHb0rzOHx213bZ5J 17 | +wh6+wW+NYLS6a2aDbxv+3SYNf/YdpLpiEYEExECAAYFAmLwyaYACgkQqE7a6JyA 18 | Cso07QCePkklGUUjNG4p7x+TVPIANEs60dUAn20/EMPdf0BQNGW1/qdCb5MT2AIQ 19 | =TZhb 20 | -----END PGP PUBLIC KEY BLOCK----- 21 | -------------------------------------------------------------------------------- /salt/default/gpg_keys/saltstack.key: -------------------------------------------------------------------------------- 1 | -----BEGIN PGP PUBLIC KEY BLOCK----- 2 | Version: GnuPG v1.4.5 (GNU/Linux) 3 | 4 | mQENBFspGl8BCADC0Lmh3GGIaWI4FvekwxbCqsRlqWQhdtSgQiFXlzjKVPl5kPSt 5 | oYvvIVV5M33SaHK1s2/i2dbiL6vmw+NazQueOcHw//uAikugssmvwK1iYhbwl/TW 6 | EZDVfW0pW33XR9IHzhbONV2293pjwScemmcEYghrN7TDw6cwcMs14ju0Jbh03+jN 7 | lW+Ryrazeb7O9lkdeaA/Fq4c3miw156uWqdgKr7kPnp0GjMWRSBVIeUK936PJyNV 8 | ibcwsvP1K2sqF9TtbDzRMdqw5GOdHIUd0Ml0Tb8Sg+kSxdhkhgBUslZW77FrEWMS 9 | HJD1rw77VUL+v7BzKBKl7mLXlfaNpmfkXWJTABEBAAG0RHN5c3RlbXNtYW5hZ2Vt 10 | ZW50IE9CUyBQcm9qZWN0IDxzeXN0ZW1zbWFuYWdlbWVudEBidWlsZC5vcGVuc3Vz 11 | ZS5vcmc+iQE+BBMBCAAoBQJlcECTAhsDBQkOZdY0BgsJCAcDAgYVCAIJCgsEFgID 12 | AQIeAQIXgAAKCRCsqpz35uWiE9NYCAC4BiV/CV/9QzWbwUup/pmLOD84JPo72C15 13 | Z+FARqk2LS5JhfxZRFaG13S2/+Ua3gN4RG0WqOxRaA6oKPs2V81X3I3ysq9660MW 14 | 4qPbhGVZaBzPLat+9Q6MaRaqu9G3sEePGDWR1sR3s/d3pJl0kRK2fyQILnUx65zb 15 | r77ErCox/kwAB5bu5Nv9d4YuwS7oY+b1Nw1vvmmBu8z8acd6iNX5inIX+Oy06WTE 16 | lO0gh54GjVWIK1WlJYcV73GIu2jUBQJhP3GAaf9kp2Mtv/W9+D2XxGaFslZShiMp 17 | /FBcmYQeJAfh8WmxdWFZRlvbpqNbC6Gxmb/+euIZz43U6B1qQhRgiEYEExECAAYF 18 | AlspGl8ACgkQOzARt2udZSOfAACdEKhSoziJSqKEvrsGEBq0QNc4CwcAniYQosXY 19 | +OdtQPCAY1lf4oqKOLpD 20 | =/8kX 21 | -----END PGP PUBLIC KEY BLOCK----- 22 | -------------------------------------------------------------------------------- /salt/default/gpg_keys/suse_el9.key: -------------------------------------------------------------------------------- 1 | -----BEGIN PGP PUBLIC KEY BLOCK----- 2 | Version: GnuPG v2.0.15 (GNU/Linux) 3 | 4 | mQINBGMq/2cBEACpQwuUAT7BxUqq2EQDL7Kp0uu2KJNCL4lBegvy7OnIjKTCCh0x 5 | wLfbKX41Cl+xZzvNYc7z39cDoE3MJMxyNBWR/R+h2CaY4S2jegpLWF3jZ+QwApYt 6 | QonJWCgaQmyBfkeqQNfKcep8cqK9dcmtubCehtjnU84+S0qH3DB6zIqCFHrYNdsV 7 | fuDNwIqa+0+6OzHvq/Y2hD7e8OomFForwYfFnJp//3aY1ENj35jpo2lJp4qaDqVt 8 | Cw+sfE6CtjOqloIQ+MOClRc7CTXY0nTg2gqyHA/ifsv9KqxoK9rvew9SKPnM5AF3 9 | Ffz993u+aLIkjg4HR255AVGJxMAO52JhhGNQayjDs4jiyT7OfKUePPHt7xEMHfba 10 | VMZNmXQ0Io2x2Mj0+25tfY1OYNcGgsK6ySsQnOjVa1Ssj/sPGDH3gHK01QTx2KQo 11 | WSkvTflP6Yj5WTHgKmpIAnubuYntyjftZsYscHJWZYsf050VlyA6WLhdsJ/FBG68 12 | LKSCiI+puktGfLovDRn8OCXs2AG7Oikow4E/WyHvt3lGKTeJ2h1hPb6JuqcWjBvk 13 | /R5zfjazGgLWFkCWEu8+DZpPycgxEARDHsnMk9gKS0xNQqWo14ABLERI9OSoyxuG 14 | BBNDwqhlqzQ47ZybIfUhVme95sLpU/ikEAJ8f6CoFMtPmIQtbmHF/t4GZwARAQAB 15 | tChTVVNFIHByb2R1Y3QgYWRkb24gPGJ1aWxkLWFkZG9uQHN1c2UuZGU+iQI+BBMB 16 | AgAoBQJjKv9nAhsDBQkHhM4ABgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAAKCRCX 17 | pjbbC62OzEIOD/964d1cpanbTlnl+vq/OT+9TVmRsGp5odUoaBt05w9bEse2Pejo 18 | J+QXduENnHkGX3sQrBT2S/x8gWWE8u3bmtAiyM2ScCTvIQ+Q0hD4TszpJjyxU3y9 19 | 1Blkkuljp169zXHWYHG8yWXIMh5Z6p4A9Ofda6KtY5GJnvMKJ39r4m9Pu0kDLFX+ 20 | KKW6SEgbKWLU81SGIFujMJVOhO3SpBtH07oWhoJMcOnTOkaDX5m7JmkIoWp7CJDV 21 | kEpE3b6zN6cu6hlE8vb0NcNOLRYttpSXTAHSz7BsWSzToSYR2mY5nttx5+2SjzZ+ 22 | eOgxDk0fwL4IgfCL3AsnPOH9H/HvbJGBcNUM7svrmyArCDPcyibrxOg2I0/AlT+O 23 | 6PkeLXvewO9u6HYOmvEHZUB+hoeQJ07IG8GWgazb9i5JT5HQLNAHUiZ42jSphu5n 24 | OVUVqI/K5rbU/cnPQoBj+ywgH8UxtY9IuHezoQEQK1bZ9ZTXB53AbQAJd4vPVIF8 25 | UViTaa6bI1NdieGzVuY6gaL6pLrjlbQla1/1ZRkeES6YCNxWwSm39vBDdADXe8+x 26 | dq1QS4YYS8ZcCtGwbNnPf7YJHa2Y+jzN2to0qYGdCpXJLZqB+/m+ivcnaKatQjAt 27 | PbklS1COEqF1bpD7ZxDEs7II4RsZW1KmWnIOZUc/wfCPGF+WIE4umvkbQg== 28 | =UtQK 29 | -----END PGP PUBLIC KEY BLOCK----- 30 | -------------------------------------------------------------------------------- /salt/default/gpg_keys/suse_res6.key: -------------------------------------------------------------------------------- 1 | -----BEGIN PGP PUBLIC KEY BLOCK----- 2 | Version: GnuPG v2.0.15 (GNU/Linux) 3 | 4 | mIsERCAdXQEEAL7MrBTz+3SBWpCm2ae2yaDqV3ezQcs2JlvqidJVhsZqQe9/jkxi 5 | KTEQW5+TXF/+BlQSiebunRI7oo3+9U8GyRCgs1sf+yRQWMLzZqRaarzRhw9w+Ihl 6 | edtqYl6/U2JZCb8Adp6d7RzlRliJdJ/VtsfXj2ef7Dwu7elOVSsmaBdtAAYptChT 7 | dVNFIFBhY2thZ2UgU2lnbmluZyBLZXkgPGJ1aWxkQHN1c2UuZGU+iLgEEwECACIC 8 | GwMECwcDAgMVAgMDFgIBAh4BAheABQJaqpClBQkeD0FIAAoJEOOlw2Awfj1UeSEE 9 | AItAomled1lY+qcJXOKjNA6NKFBwbnRC6IZ8jMIBmq6MO9KK4lkbEiFdRB98klJ0 10 | kofFjO0DryFyfvHEBYPwko2HPpVHp3QKMjwhvayUIAaCZg8eRq/7nE2KNlkHBHmg 11 | raADZbBA/ktXY3qt1yTePb8Sw29/mN3/hrfEdjCs6Cgy 12 | =blUq 13 | -----END PGP PUBLIC KEY BLOCK----- 14 | -------------------------------------------------------------------------------- /salt/default/gpg_keys/suse_res7.key: -------------------------------------------------------------------------------- 1 | -----BEGIN PGP PUBLIC KEY BLOCK----- 2 | Version: GnuPG v2.0.15 (GNU/Linux) 3 | 4 | mQENBFEKlmsBCADbpZZbbSC5Zi+HxCR/ynYsVxU5JNNiSSZabN5GMgc9Z0hxeXxp 5 | YWvFoE/4n0+IXIsp83iKvxf06Eu8je/DXp0lMqDZu7WiT3XXAlkOPSNV4akHTDoY 6 | 91SJaZCpgUJ7K1QXOPABNbREsAMN1a7rxBowjNjBUyiTJ2YuvQRLtGdK1kExsVma 7 | hieh/QxpoDyYd5w/aky3z23erCoEd+OPfAqEHd5tQIa6LOosa63BSCEl3milJ7J9 8 | vDmoGPAoS6ui7S2R5X4/+PLN8Mm2kOBrFjhmL93LX0mrGCMxsNsKgP6zabYKQEb8 9 | L028SXvl7EGoA+Vw5Vd3wIGbM73PfbgNrXjfABEBAAG0KFN1U0UgUGFja2FnZSBT 10 | aWduaW5nIEtleSA8YnVpbGRAc3VzZS5kZT6JATwEEwECACYCGwMGCwkIBwMCBBUC 11 | CAMEFgIDAQIeAQIXgAUCWEfrHwUJDsIitAAKCRBwr56BOdt8gpqUB/wPSSS5BcDu 12 | Oi4n02cj4Hdt7WITKBjjo0lG1fXG1ppx1wOST+s8FertMVFY53TW6FGjcYtwVOIq 13 | rsMYiV6kf1NxUV/jcAy7VmC5EZnO0R/D3sT4Oh5hsLtERauZolK5BZmd0S51Qa8e 14 | TxZ5mX9PL2i3s/ShETc30drf83ugc7B4yZPNQWXNDPgGcC+hEeC5qw48RzHYIpUt 15 | RzHmefR5Z3ioTUbDlzy+SGP2uA7mhR4Lfk/df5fYxWfCoKlyGjtrvA65cB+Pksyn 16 | xrAeBuB+vBM+KnDrxW2Sn4AbWkzH//dfz9OJDJu4UM91hb7qxM0OkrXHQV3iNqzg 17 | MDEhky/9NqMy 18 | =GdP5 19 | -----END PGP PUBLIC KEY BLOCK----- 20 | -------------------------------------------------------------------------------- /salt/default/gpg_keys/suse_staging.key: -------------------------------------------------------------------------------- 1 | -----BEGIN PGP PUBLIC KEY BLOCK----- 2 | Version: GnuPG v2.0.15 (GNU/Linux) 3 | 4 | mQENBFErWUoBCADTYW0S3mAHLCLLFuaRWxsK6NwGDmW6vSSkRt8OTXSAJyz2yFUS 5 | oQ7dmw5ce6uyChoMBap0+2xiGyBzMZ4nP3ADD6voG5FXxg6QTfsDyz/sg5BRbS2W 6 | q2e3XU4qEiJn357mXd53YfXKYpA7d2Ct5LKtKGph0DeGsi2zDKnjUCkXrOGNI+VH 7 | qUCardfjAxgsA+m2bN/GKBOdHiHEHlQ0+GyLaoDi8utQz+L42iEQK4uvUQDKa+6k 8 | fbocsR0z55meVCf5ZuEg/katzhtrtesG/sqTLCBMLsLGSIllHSZRxQPbBNf8l7wT 9 | CPqg9JqXHT17aO3O+ZTkqv1dTIwXtmyPkIAjABEBAAG0JVVuc3VwcG9ydGVkIDx1 10 | bnN1cHBvcnRlZC1yc2FAc3VzZS5kZT6JATwEEwECACYCGwMGCwkIBwMCBBUCCAME 11 | FgIDAQIeAQIXgAUCY+utDQUJGkUhwwAKCRCcdTFJzjtnLkZzCACJV8KKLtRtlw5a 12 | MZJdFz1TKFzpAYd2Wcdtl7R/gYa+z4A91UbqtFHdK91EKXAvEkf4f88wj2Dvqj/1 13 | omBhO8si+p3Hxm1UVD91nTBNVWyjn33uai5YP2VEKQOzgBV2swVWuzSsJq0Kc1Yk 14 | zKCPdtFpENdHfRKIXVgaDmUy0EAQJhBLx4SGD9l3DnG5Lt4d+jXe49BgCaGn+5eB 15 | zl23nwL02wSD6uTShsqtdGQCaZtl2WjIb81H6rTRLcSPhyClpy/s3GV9wFri0zLL 16 | Q6eiIbNj8uAvCDdGACEpmXic0lgIQdKsnJLelqJtwCpAmi3ma4cV1kCLD9z2Qtxl 17 | ePb5BMBz 18 | =xYx6 19 | -----END PGP PUBLIC KEY BLOCK----- 20 | -------------------------------------------------------------------------------- /salt/default/gpg_keys/uyuni.key: -------------------------------------------------------------------------------- 1 | -----BEGIN PGP PUBLIC KEY BLOCK----- 2 | Version: GnuPG v1.4.5 (GNU/Linux) 3 | 4 | mQENBFsnulUBCADNjL4hvhVtSzqVDlMtFFFP28Acq+UNF8WKKMhbBirfOpXwwI1C 5 | NR3i0CXPOce5eKShuuWAjD2E36e2XAp3rUAo/aCA7UgtJkMNKzzlTOcqHHxKTx6H 6 | gvp0Fb6xTKywZ7VttGhwUynl+CsDuOst3ROXTNdb8XMfm4joH2FW5D3ACN2qNiv0 7 | MVcFNKxQ98w8M9xJxdI8DuyngnSeZwAosNzEio3JhTPiTv9ngY2Z3AuYUcwTEt7o 8 | feEN+ivAgYnn+a6DBKFBeCW7VUD3V+tH8/fKnkvI4gf2o3N7Ok+/uE+DPUBb+14f 9 | +9dhBjd+7+pR3ayEZFjQns5XFShoYu2+CQspABEBAAG0UHN5c3RlbXNtYW5hZ2Vt 10 | ZW50OlV5dW5pIE9CUyBQcm9qZWN0IDxzeXN0ZW1zbWFuYWdlbWVudDpVeXVuaUBi 11 | dWlsZC5vcGVuc3VzZS5vcmc+iQE+BBMBCAAoBQJjQDEEAhsDBQkMNyavBgsJCAcD 12 | AgYVCAIJCgsEFgIDAQIeAQIXgAAKCRCXLl1sDSCDPjsSCAC1v9YHwuP0kRt8VPlq 13 | /RLgADb5TsUPOaDcZ/maKVxhL5EgY2mX1ViCO4Bm+VFL2ZSJEXth8/Zp/dZe80e9 14 | tlZgag5uPQe9FV0IAHXYt91DYJlE7VuxvdhADIt9RcDmS4OrSAfQoroyh5OW3ZRW 15 | Kqa68L6RBhiyuvBTaRCUdIhqDBjVCgMlLJxC5soOIVCEvMRzOxHqO0+gvKomvM1P 16 | iK4cio2OcIqZb8vCyMIXtYniHqA0rUZD4U+EB9enmYcj9ZhWO9oQXZ0qCQN6ve/K 17 | 1Q7NjImT5oEHWGFeLmwWZMe2+djFcHiCQM1bFN1gC+2ASz5XPC7OKdrIi+E85gMo 18 | cYu+iEYEExECAAYFAlsnulUACgkQOzARt2udZSO/4QCcDf+j/XRbJn2PudsSoyjw 19 | 3B2boakAnA9A9b8UoEYgmLTRpwXYuhsxOCDE 20 | =8MsV 21 | -----END PGP PUBLIC KEY BLOCK----- 22 | -------------------------------------------------------------------------------- /salt/default/ids.sls: -------------------------------------------------------------------------------- 1 | {% if grains['os_family'] == 'RedHat' %} 2 | {% if grains['osfinger'] == 'Red Hat Enterprise Linux-9' or grains['osfinger'] == 'Amazon Linux-2023' %} 3 | install_dbus_uuidgen: 4 | pkg.installed: 5 | - pkgs: 6 | - dbus-tools 7 | {% endif %} 8 | {% endif %} 9 | 10 | systemd_machine_id: 11 | cmd.run: 12 | - name: | 13 | bash -c ' 14 | rm -f /etc/machine-id 15 | rm -f /var/lib/dbus/machine-id 16 | mkdir -p /var/lib/dbus 17 | dbus-uuidgen --ensure 18 | systemd-machine-id-setup 19 | new_id="$(cat /etc/machine-id)" 20 | mkdir -p "/var/log/journal/$new_id" 21 | for d in /var/log/journal/*; do 22 | [ "$(basename "$d")" != "$new_id" ] || continue 23 | [ -d "$d" ] || continue 24 | mv "$d"/* "$d"/.* "/var/log/journal/$new_id/" 2>/dev/null || true 25 | done 26 | touch /etc/machine-id-already-setup 27 | ' 28 | - creates: /etc/machine-id-already-setup 29 | - onlyif: test -f /usr/bin/systemd-machine-id-setup -o -f /bin/systemd-machine-id-setup 30 | 31 | dbus_machine_id: 32 | cmd.run: 33 | - name: rm -f /var/lib/dbus/machine-id && dbus-uuidgen --ensure 34 | - creates: /var/lib/dbus/machine-id 35 | - unless: test -f /usr/bin/systemd-machine-id-setup -o -f /bin/systemd-machine-id-setup 36 | 37 | minion_id_cleared: 38 | file.absent: 39 | {% if grains['install_salt_bundle'] %} 40 | - name: /etc/venv-salt-minion/minion_id 41 | {% else %} 42 | - name: /etc/salt/minion_id 43 | {% endif %} 44 | -------------------------------------------------------------------------------- /salt/default/init.sls: -------------------------------------------------------------------------------- 1 | include: 2 | - default.locale 3 | - default.update 4 | - default.minimal 5 | - default.pkgs 6 | - default.sshd 7 | {% if grains.get('reset_ids') | default(false, true) %} 8 | - default.ids 9 | {% endif %} 10 | {% if not grains['osfullname'] in ['SLE Micro', 'SL-Micro'] %} 11 | # Dependencies already satisfied by the images 12 | # https://build.opensuse.org/project/show/systemsmanagement:sumaform:images:microos 13 | - default.testsuite 14 | {% endif %} 15 | 16 | 17 | {% if grains.get('swap_file_size', "0")|int() > 0 %} 18 | file_swap: 19 | cmd.run: 20 | - name: | 21 | {% if grains['os_family'] == 'RedHat' %}dd if=/dev/zero of=/extra_swapfile bs=1048576 count={{grains['swap_file_size']}}{% else %}fallocate --length {{grains['swap_file_size']}}MiB /extra_swapfile{% endif %} 22 | chmod 0600 /extra_swapfile 23 | mkswap /extra_swapfile 24 | - creates: /extra_swapfile 25 | mount.swap: 26 | - name: /extra_swapfile 27 | - persist: true 28 | - require: 29 | - cmd: file_swap 30 | {% endif %} 31 | 32 | {% if grains['authorized_keys'] %} 33 | authorized_keys: 34 | file.append: 35 | - name: /root/.ssh/authorized_keys 36 | - text: 37 | {% for key in grains['authorized_keys'] %} 38 | - {{ key }} 39 | {% endfor %} 40 | - makedirs: True 41 | {% endif %} 42 | -------------------------------------------------------------------------------- /salt/default/locale.sls: -------------------------------------------------------------------------------- 1 | {% if grains['os_family'] == 'Suse' %} 2 | manually_set_locale_rc_lang: 3 | file.replace: 4 | - name: /etc/sysconfig/language 5 | - pattern: ^RC_LANG=".*" 6 | - repl: RC_LANG="en_US.UTF-8" 7 | - onlyif: test ! -f /usr/bin/localectl 8 | 9 | manually_set_locale_root_uses_lang: 10 | file.replace: 11 | - name: /etc/sysconfig/language 12 | - pattern: ^ROOT_USES_LANG=".*" 13 | - repl: ROOT_USES_LANG="ctype" 14 | - onlyif: test ! -f /usr/bin/localectl 15 | 16 | manually_set_locale_installed_languages: 17 | file.replace: 18 | - name: /etc/sysconfig/language 19 | - pattern: ^INSTALLED_LANGUAGES=".*" 20 | - repl: INSTALLED_LANGUAGES="" 21 | - onlyif: test ! -f /usr/bin/localectl 22 | 23 | {% elif grains['os_family'] == 'RedHat' %} 24 | 25 | {% if grains.get('osmajorrelease', None)|int() == 9 %} 26 | langpack_package: 27 | pkg.installed: 28 | - name: glibc-langpack-en 29 | {% endif %} 30 | 31 | {% endif %} 32 | 33 | fix_en_US_UTF8_as_system_locale_with_localectl: 34 | cmd.run: 35 | - name: localectl set-locale LANG=en_US.UTF-8 36 | - onlyif: test -f /usr/bin/localectl 37 | -------------------------------------------------------------------------------- /salt/default/minimal.sls: -------------------------------------------------------------------------------- 1 | include: 2 | {% if grains['hostname'] and grains['domain'] %} 3 | - default.hostname 4 | {% endif %} 5 | - default.network 6 | - default.firewall 7 | {% if 'build_image' not in grains.get('product_version', '') and 'paygo' not in grains.get('product_version', '') %} 8 | - repos 9 | {% else %} 10 | - repos.testsuite 11 | {% endif %} 12 | - default.avahi 13 | - default.time 14 | 15 | minimal_package_update: 16 | {% if grains['os_family'] == 'Suse' and grains['osfullname'] in ['SLE Micro', 'SL-Micro', 'openSUSE Leap Micro'] %} 17 | cmd.run: 18 | {% if grains['install_salt_bundle'] %} 19 | - name: transactional-update -n -c package up zypper libzypp venv-salt-minion 20 | {% else %} 21 | - name: transactional-update -n -c package up zypper libzypp salt-minion 22 | {% endif %} 23 | {% else %} 24 | pkg.latest: 25 | - pkgs: 26 | {% if grains['install_salt_bundle'] %} 27 | - venv-salt-minion 28 | {% else %} 29 | - salt-minion 30 | {% endif %} 31 | # WORKAROUND: don't update zypper and libzypp for opensuse leap 15.5 because the last zypper version is broken 32 | {% if grains['os_family'] == 'Suse' and grains['oscodename'] != 'openSUSE Leap 15.5' %} 33 | - zypper 34 | - libzypp 35 | # WORKAROUND: avoid a segfault on old versions 36 | {% if '12' in grains['osrelease'] %} 37 | - libgio-2_0-0 38 | {% endif %} 39 | {% endif %} 40 | - order: last 41 | {% endif %} 42 | -------------------------------------------------------------------------------- /salt/default/ntp.conf: -------------------------------------------------------------------------------- 1 | restrict 127.0.0.1 2 | restrict ::1 3 | 4 | driftfile /var/lib/ntp/drift/ntp.drift 5 | logfile /var/log/ntp 6 | keys /etc/ntp.keys 7 | 8 | trustedkey 1 9 | requestkey 1 10 | controlkey 1 11 | 12 | pool 0.pool.ntp.org iburst 13 | -------------------------------------------------------------------------------- /salt/default/pkgs.sls: -------------------------------------------------------------------------------- 1 | include: 2 | - scc 3 | 4 | {% if grains['additional_packages'] %} 5 | install_additional_packages: 6 | {% if grains['os_family'] == 'Suse' and grains['osfullname'] in ['SLE Micro', 'SL-Micro'] %} 7 | cmd.run: 8 | {% for package in grains['additional_packages'] %} 9 | - name: transactional-update -c -n pkg in {{ package }} 10 | {% endfor %} 11 | {% else %} 12 | pkg.latest: 13 | - pkgs: 14 | {% for package in grains['additional_packages'] %} 15 | - {{ package }} 16 | {% endfor %} 17 | {% endif %} 18 | {% if 'paygo' not in grains.get('product_version', '') %} 19 | - require: 20 | - sls: repos 21 | {% if grains.get('server_registration_code') or grains.get('proxy_registration_code') or grains.get('sles_registration_code') %} 22 | - sls: scc 23 | {% endif %} 24 | {% endif %} 25 | {% endif %} 26 | -------------------------------------------------------------------------------- /salt/default/set_ip_in_etc_hosts.py: -------------------------------------------------------------------------------- 1 | #!{{grains['pythonexecutable']}} 2 | 3 | import errno 4 | import os 5 | import re 6 | import socket 7 | import subprocess 8 | import sys 9 | import optparse 10 | 11 | parser = optparse.OptionParser() 12 | parser.add_option('--no-ipv6', action="store_false", dest="ipv6_is_enabled", default=True, help="do not set up IPv6 address") 13 | options, args = parser.parse_args() 14 | 15 | if len(args) != 2: 16 | print("Usage: set_ip_in_etc_hosts.py [--no-ipv6] ") 17 | sys.exit(1) 18 | 19 | hostname, domain = args 20 | fqdn = hostname + "." + domain 21 | 22 | def guess_address(fqdn, hostname, socket_type, invalid_prefixes, default): 23 | infos = [] 24 | try: 25 | infos += socket.getaddrinfo(fqdn, None, socket_type) 26 | except socket.error: 27 | pass 28 | try: 29 | infos += socket.getaddrinfo(hostname, None, socket_type) 30 | except socket.error: 31 | pass 32 | addresses = [info[4][0] for info in infos] 33 | valid_addresses = [item for item in addresses if not re.match(invalid_prefixes, item, re.I)] 34 | if valid_addresses: 35 | return valid_addresses[0] 36 | else: 37 | return default 38 | 39 | def update_hosts_file(fqdn, hostname, repl): 40 | with open("/etc/hosts", "r+") as f: 41 | hosts = f.read() 42 | pattern = re.compile("\\n+(.*{0} {1}\\n+)+".format(re.escape(fqdn), re.escape(hostname)), flags=re.M) 43 | new_hosts, n = pattern.subn(repl, hosts) 44 | if n == 0: 45 | new_hosts = hosts + repl 46 | f.seek(0) 47 | f.truncate() 48 | f.write(new_hosts) 49 | 50 | update_hosts_file(fqdn, hostname, "") 51 | ipv4 = guess_address(fqdn, hostname, socket.AF_INET, "127\\.0\\.", "127.0.1.1") 52 | 53 | if options.ipv6_is_enabled: 54 | # we explicitly exclude link-local addresses as we currently can't get the interface names 55 | ipv6 = guess_address(fqdn, hostname, socket.AF_INET6, "(::1$)|(fe[89ab][0-f]:)", "# ipv6 address not found for names:") 56 | repl = "\n\n{0} {1} {2}\n{3} {4} {5}\n".format(ipv4, fqdn, hostname, ipv6, fqdn, hostname) 57 | else: 58 | repl = "\n\n{0} {1} {2}\n".format(ipv4, fqdn, hostname) 59 | 60 | update_hosts_file(fqdn, hostname, repl) 61 | 62 | print("/etc/hosts updated.") 63 | -------------------------------------------------------------------------------- /salt/default/sshd.sls: -------------------------------------------------------------------------------- 1 | {% if 'client' in grains.get('roles') or 'minion' in grains.get('roles') or 'sshminion' in grains.get('roles') %} 2 | # WORKAROUND: Leap 15.6 and SL-Micro 6.0 are using a different sshd_config. To be reviewed. 3 | {% if not ( grains['osfullname'] in ['Leap', 'SL-Micro'] and grains['osrelease'] in ['15.6', '6.0', '6.1'] ) %} 4 | sshd_change_challengeresponseauthentication: 5 | file.replace: 6 | - name: /etc/ssh/sshd_config 7 | - pattern: "^ChallengeResponseAuthentication.*" 8 | - repl: "ChallengeResponseAuthentication yes" 9 | {% endif %} 10 | {% endif %} 11 | -------------------------------------------------------------------------------- /salt/default/testsuite.sls: -------------------------------------------------------------------------------- 1 | {% if grains.get('testsuite') | default(false, true) %} 2 | {% if 'client' in grains.get('roles') or 'minion' in grains.get('roles') or 'sshminion' in grains.get('roles') %} 3 | 4 | include: 5 | - scc 6 | - repos.testsuite 7 | 8 | {% if grains['os'] == 'SUSE' %} 9 | 10 | default_cucumber_requisites: 11 | pkg.installed: 12 | - pkgs: 13 | - andromeda-dummy 14 | - milkyway-dummy 15 | - virgo-dummy 16 | - iptables 17 | - require: 18 | - sls: repos.testsuite 19 | 20 | {% elif grains['os_family'] == 'RedHat' %} 21 | 22 | default_cucumber_requisites: 23 | pkg.installed: 24 | - pkgs: 25 | - andromeda-dummy 26 | - milkyway-dummy 27 | - virgo-dummy 28 | - require: 29 | - pkgrepo: test_repo_rpm_pool 30 | 31 | {% endif %} 32 | {% endif %} 33 | {% endif %} 34 | -------------------------------------------------------------------------------- /salt/default/time.sls: -------------------------------------------------------------------------------- 1 | {% if not grains['osfullname'] in ['SLE Micro', 'SL-Micro'] %} 2 | # Dependencies already satisfied by the images 3 | # https://build.opensuse.org/project/show/systemsmanagement:sumaform:images:microos 4 | timezone_package: 5 | pkg.installed: 6 | {% if grains['os_family'] == 'Suse' %} 7 | - name: timezone 8 | {% else %} 9 | - name: tzdata 10 | {% endif %} 11 | {% endif %} 12 | 13 | timezone_symlink: 14 | file.symlink: 15 | - name: /etc/localtime 16 | - target: /usr/share/zoneinfo/{{ grains['timezone'] }} 17 | - force: true 18 | {% if not grains['osfullname'] in ['SLE Micro', 'SL-Micro'] %} 19 | - require: 20 | - pkg: timezone_package 21 | {% endif %} 22 | 23 | timezone_setting: 24 | timezone.system: 25 | - name: {{ grains['timezone'] }} 26 | - utc: True 27 | - require: 28 | - file: timezone_symlink 29 | 30 | {% if grains['use_ntp'] %} 31 | 32 | {% if grains['osfullname'] == 'Leap' %} 33 | 34 | ntp_pkg: 35 | pkg.installed: 36 | - name: ntp 37 | 38 | ntp_conf_file: 39 | file.managed: 40 | - name: /etc/ntp.conf 41 | - source: salt://default/ntp.conf 42 | 43 | ntpd_enable_service: 44 | service.running: 45 | - name: ntpd 46 | - enable: true 47 | 48 | {% else %} 49 | 50 | {% if not grains['osfullname'] in ['SLE Micro', 'SL-Micro'] %} 51 | # Dependencies already satisfied by SLE Micro itself 52 | chrony_pkg: 53 | pkg.installed: 54 | - name: chrony 55 | {% endif %} 56 | 57 | chrony_conf_file: 58 | file.managed: 59 | - name: /etc/chrony.conf 60 | - source: salt://default/chrony.conf 61 | 62 | chrony_enable_service: 63 | service.running: 64 | - name: chronyd 65 | - enable: true 66 | 67 | {% endif %} 68 | {% endif %} 69 | -------------------------------------------------------------------------------- /salt/default/update.sls: -------------------------------------------------------------------------------- 1 | {% if grains.get('use_os_released_updates') | default(false, true) %} 2 | {% if not grains['osfullname'] in ['SLE Micro', 'SL-Micro'] %} 3 | update_packages: 4 | pkg.uptodate: 5 | - require: 6 | - sls: repos 7 | {% endif %} 8 | {% endif %} 9 | -------------------------------------------------------------------------------- /salt/first_deployment_highstate.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Applies the "minimal" state in isolation to set the hostname and update Salt itself 4 | # then applies the highstate 5 | 6 | FILE_ROOT="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" 7 | 8 | # Force direct call module executors on MicroOS images 9 | MODULE_EXEC="" 10 | if grep -q "cpe:/o:.*suse:.*micro" /etc/os-release; then 11 | MODULE_EXEC="--module-executors=[direct_call]" 12 | fi 13 | 14 | if [ -x /usr/bin/venv-salt-call ]; then 15 | SALT_CALL=venv-salt-call 16 | elif [ -x /usr/bin/salt-call ]; then 17 | SALT_CALL=salt-call 18 | else 19 | echo "Error: Cannot find venv-salt-call or salt-call on the system" 20 | exit 1 21 | fi 22 | 23 | echo "starting first call to update salt and do minimal configuration" 24 | 25 | ${SALT_CALL} --local --file-root=$FILE_ROOT/ --log-level=info --out-file=/var/log/salt-deployment.log $MODULE_EXEC state.sls default.minimal ||: 26 | 27 | # Display the output from the Salt execution 28 | cat /var/log/salt-deployment.log 29 | 30 | NEXT_TRY=0 31 | until [ $NEXT_TRY -eq 10 ] || ${SALT_CALL} --local test.ping 32 | do 33 | echo "It seems neither venv-salt-call or salt-call are available after default.minimal state was applied. Retrying... [$NEXT_TRY]"; 34 | sleep 1; 35 | ((NEXT_TRY++)); 36 | done 37 | 38 | if [ $NEXT_TRY -eq 10 ] 39 | then 40 | echo "ERROR: Neither venv-salt-call or salt-call are available after 10 retries"; 41 | fi 42 | 43 | echo "apply highstate" 44 | 45 | ${SALT_CALL} --local --file-root=$FILE_ROOT/ --log-level=info --out-file=/var/log/salt-deployment.log --retcode-passthrough --force-color $MODULE_EXEC state.highstate || exit 1 46 | 47 | # Display the output from the Salt execution 48 | cat /var/log/salt-deployment.log 49 | 50 | chmod +x ${FILE_ROOT}/highstate.sh 51 | -------------------------------------------------------------------------------- /salt/grafana/provisioning/dashboards/suse_manager.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | providers: 4 | - name: default 5 | orgId: 1 6 | folder: '' 7 | type: file 8 | disableDeletion: false 9 | updateIntervalSeconds: 10 # how often Grafana will scan for changed dashboards 10 | options: 11 | path: /etc/grafana/provisioning 12 | -------------------------------------------------------------------------------- /salt/grafana/provisioning/datasources/prometheus_localhost.yml: -------------------------------------------------------------------------------- 1 | # config file version 2 | apiVersion: 1 3 | 4 | deleteDatasources: 5 | - name: Prometheus on localhost 6 | orgId: 1 7 | 8 | datasources: 9 | - name: Prometheus on localhost 10 | type: prometheus 11 | access: proxy 12 | url: http://localhost:9090/ 13 | basicAuth: False 14 | isDefault: True 15 | editable: true 16 | jsonData: 17 | timeInterval: 5s 18 | -------------------------------------------------------------------------------- /salt/grafana/setup_grafana.py: -------------------------------------------------------------------------------- 1 | #!{{grains['pythonexecutable']}} 2 | 3 | import base64 4 | import errno 5 | import httplib 6 | import json 7 | import socket 8 | import sys 9 | import time 10 | 11 | def do(method, connection, headers, path, body=None): 12 | connection.request(method, path, headers=headers, body=json.dumps(body)) 13 | resp = connection.getresponse() 14 | content = resp.read() 15 | 16 | if resp.status != 200: 17 | raise IOError("Unexpected HTTP status received on %s: %d" % (path, resp.status)) 18 | 19 | return json.loads(content) 20 | 21 | 22 | connection = httplib.HTTPConnection("localhost") 23 | 24 | # try to connect, multiple times if ECONNREFUSED is raised 25 | # (service is up but not ready for requests yet) 26 | for retries in range(0,10): 27 | try: 28 | connection.connect() 29 | except socket.error as e: 30 | if e.errno != errno.ECONNREFUSED: 31 | raise e 32 | print("Connection refused, retrying...") 33 | time.sleep(1) 34 | 35 | token = base64.b64encode("admin:admin".encode("ASCII")).decode("ascii") 36 | headers = { 37 | "Authorization" : "Basic %s" % token, 38 | "Content-Type" : "application/json; charset=utf8" 39 | } 40 | 41 | do("PUT", connection, headers, "/api/org/preferences", {"homeDashboardId" : 1}) 42 | -------------------------------------------------------------------------------- /salt/highstate.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Applies the highstate - assumes the minimal state was already applied 4 | FILE_ROOT="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" 5 | 6 | if [ -x /usr/bin/venv-salt-call ]; then 7 | SALT_CALL=venv-salt-call 8 | elif [ -x /usr/bin/salt-call ]; then 9 | SALT_CALL=salt-call 10 | else 11 | echo "Error: Cannot find venv-salt-call or salt-call on the system" 12 | exit 1 13 | fi 14 | 15 | ${SALT_CALL} --local --file-root=$FILE_ROOT/ --log-level=info --out-file=/var/log/salt-deployment.log --retcode-passthrough --force-color state.highstate 16 | 17 | # Display the output from the Salt execution 18 | cat /var/log/salt-deployment.log 19 | -------------------------------------------------------------------------------- /salt/jenkins/basic-security.groovy: -------------------------------------------------------------------------------- 1 | #!groovy 2 | 3 | import jenkins.model.* 4 | import hudson.security.* 5 | import static jenkins.model.Jenkins.instance as jenkins 6 | import jenkins.install.InstallState 7 | 8 | def instance = Jenkins.getInstance() 9 | def hudsonRealm = new HudsonPrivateSecurityRealm(false) 10 | String randomPassword = org.apache.commons.lang.RandomStringUtils.randomAscii(30) 11 | 12 | println "Writing the password to /var/lib/jenkins/secrets/initialAdminPassword" 13 | File file = new File("/var/lib/jenkins/secrets/initialAdminPassword") 14 | file.write randomPassword + "\n" 15 | 16 | println "Creating the user 'admin" 17 | hudsonRealm.createAccount('admin',randomPassword) 18 | 19 | println "Enabling the integrated database authentication" 20 | instance.setSecurityRealm(hudsonRealm) 21 | def strategy = new FullControlOnceLoggedInAuthorizationStrategy() 22 | instance.setAuthorizationStrategy(strategy) 23 | 24 | if (!jenkins.installState.isSetupComplete()) { 25 | println "Skipping the Setup Wizard" 26 | InstallState.INITIAL_SETUP_COMPLETED.initializeState() 27 | } 28 | 29 | instance.setInstallState(InstallState.INITIAL_SETUP_COMPLETED) 30 | 31 | println "Saving all changes" 32 | instance.save() 33 | -------------------------------------------------------------------------------- /salt/jenkins/configure-jenkins.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | URL="http://localhost:8080" 4 | 5 | RETRIES=0 6 | while [ "$(curl -s -o /dev/null -w %{http_code} ${URL})" != "200" -a ${RETRIES} -lt 10 ]; do 7 | sleep 10 8 | RETRIES=$((RETRIES+1)) 9 | done 10 | 11 | echo "[INFO] Downloading ${URL}/jnlpJars/jenkins-cli.jar" 12 | curl -s ${URL}/jnlpJars/jenkins-cli.jar -o /tmp/jenkins-cli.jar 13 | if [ ${?} -ne 0 ]; then 14 | echo "[ERROR] Could not download ${URL}/jnlpJars/jenkins-cli.jar" 15 | exit 1 16 | fi 17 | 18 | cli_call() { 19 | echo "[INFO] Running CLI call with arguments: ${@}" 20 | java -jar /tmp/jenkins-cli.jar -s ${URL} -auth admin:"$(cat /var/lib/jenkins/secrets/initialAdminPassword)" ${@} 21 | } 22 | 23 | # Only credential 2.6.1 is compatible with current LTS 2.303.3 24 | # For some reason using URL breaks the command if it's reapplied, so be sure 25 | # you removed credentials from thos command if you want to reapply 26 | cli_call install-plugin swarm https://updates.jenkins.io/download/plugins/credentials/2.6.1/credentials.hpi git git-client workflow-aggregator copyartifact extended-choice-parameter timestamper htmlpublisher rebuild http_request ansicolor greenballs -deploy -restart 27 | 28 | -------------------------------------------------------------------------------- /salt/jenkins/etc/jenkins.conf: -------------------------------------------------------------------------------- 1 | 2 | ErrorLog /var/log/apache2/jenkins_error_log 3 | CustomLog /var/log/apache2/jenkins_access_log combined 4 | 5 | # don't loose time with IP address lookups 6 | HostnameLookups Off 7 | 8 | # needed for named virtual hosts 9 | UseCanonicalName Off 10 | 11 | # configures the footer on server-generated documents 12 | ServerSignature On 13 | 14 | Proxypass / http://localhost:8080/ 15 | ProxypassReverse / http://localhost:8080/ 16 | 17 | -------------------------------------------------------------------------------- /salt/jenkins/http_proxy.sls: -------------------------------------------------------------------------------- 1 | http_proxy: 2 | cmd.run: 3 | - name: /usr/bin/podman run -d -p 3128:3128 --restart always --name http_proxy registry.opensuse.org/systemsmanagement/uyuni/master/docker/containers/proxy 4 | - unless: podman ps -a --filter "name=http_proxy" --format json|grep '"Id"' 5 | - runas: podman 6 | - requires: 7 | - sls: jenkins.podman 8 | cron.present: 9 | - name: /usr/bin/podman start http_proxy 10 | - user: podman 11 | - special: '@reboot' 12 | -------------------------------------------------------------------------------- /salt/jenkins/init.sls: -------------------------------------------------------------------------------- 1 | include: 2 | - default 3 | - jenkins.configuration 4 | - jenkins.podman 5 | - jenkins.http_proxy 6 | -------------------------------------------------------------------------------- /salt/jenkins/podman.sls: -------------------------------------------------------------------------------- 1 | podman: 2 | pkg.installed: 3 | - name: podman 4 | - require: 5 | - sls: default 6 | user.present: 7 | - name: podman 8 | - fullname: podman 9 | - usergroup: true 10 | - shell: /bin/nologin 11 | - home: /var/lib/podman 12 | - system: true 13 | cmd.run: 14 | - name: usermod --add-subuids 200000-201000 --add-subgids 200000-201000 podman 15 | - user: root 16 | - require: 17 | - user: podman 18 | -------------------------------------------------------------------------------- /salt/locust/locustfile.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import os 4 | from locust import HttpLocust, TaskSet, task 5 | 6 | class UserBehavior(TaskSet): 7 | def on_start(self): 8 | """ on_start is called when a Locust start before any task is scheduled """ 9 | # don't verify ssl certs 10 | self.client.verify = False 11 | self.login() 12 | 13 | def login(self): 14 | self.client.post("/", {"username": os.environ["SERVER_USERNAME"], "password": os.environ["SERVER_PASSWORD"]}) 15 | 16 | @task(1) 17 | def index(self): 18 | self.client.get("/rhn/YourRhn.do") 19 | 20 | class WebsiteUser(HttpLocust): 21 | task_set = UserBehavior 22 | # These are the minimum and maximum time respectively, in milliseconds, that a simulated user will wait between executing each task. 23 | min_wait = 5000 24 | max_wait = 9000 25 | -------------------------------------------------------------------------------- /salt/locust/run-locust.py: -------------------------------------------------------------------------------- 1 | #!{{grains['pythonexecutable']}} 2 | 3 | import argparse 4 | import requests 5 | import time 6 | 7 | parser = argparse.ArgumentParser() 8 | parser.add_argument('-c', '--clients', action='store', dest='clients', 9 | default=200, type=int, 10 | help='Number of concurrent clients') 11 | parser.add_argument('-r', '--hatch-rate', action='store', dest='hatch_rate', 12 | default=100, type=int, 13 | help='The rate per second in which clients are spawned') 14 | parser.add_argument('-t', '--swarm-time', action='store', dest='swarm_time', 15 | default=120, type=int, 16 | help='The duration of the swarm in seconds') 17 | args = parser.parse_args() 18 | 19 | LocustPayload = { 20 | 'locust_count': args.clients, 21 | 'hatch_rate': args.hatch_rate 22 | } 23 | 24 | res = requests.post('http://localhost/swarm', data=LocustPayload) 25 | print(res.json()["message"]) 26 | time.sleep(args.swarm_time) 27 | res = requests.get('http://localhost/stop') 28 | print(res.json()["message"]) 29 | -------------------------------------------------------------------------------- /salt/minion/reflector.sls: -------------------------------------------------------------------------------- 1 | {% if grains.get('use_avahi') and grains.get('avahi_reflector') %} 2 | 3 | reflector_configuration: 4 | file.replace: 5 | - name: /etc/avahi/avahi-daemon.conf 6 | - pattern: "#enable-reflector=no" 7 | - repl: "enable-reflector=yes" 8 | 9 | reflector_service: 10 | service.running: 11 | - name: avahi-daemon 12 | - enable: True 13 | - running: True 14 | - watch: 15 | - file: /etc/avahi/avahi-daemon.conf 16 | 17 | {% endif %} 18 | -------------------------------------------------------------------------------- /salt/minion/testsuite.sls: -------------------------------------------------------------------------------- 1 | {% if grains.get('testsuite') | default(false, true) %} 2 | 3 | {% if not grains['osfullname'] in ['SLE Micro', 'SL-Micro'] %} 4 | # Dependencies already satisfied by the images 5 | # https://build.opensuse.org/project/show/systemsmanagement:sumaform:images:microos 6 | minion_cucumber_requisites: 7 | pkg.installed: 8 | - pkgs: 9 | {% if grains['install_salt_bundle'] %} 10 | - venv-salt-minion 11 | {% else %} 12 | - salt-minion 13 | {% endif %} 14 | # Debian based systems don't come with curl installed, the test suite handles it with wget instead 15 | {% if grains['os_family'] == 'Debian' %} 16 | - wget 17 | {% endif %} 18 | - require: 19 | - sls: default 20 | {% endif %} 21 | 22 | {% if grains['os'] == 'SUSE' %} 23 | {% if '12' in grains['osrelease'] or '15' in grains['osrelease']%} 24 | 25 | suse_minion_cucumber_requisites: 26 | pkg.installed: 27 | - pkgs: 28 | - aaa_base-extras 29 | - ca-certificates 30 | {% if 'build_image' not in grains.get('product_version', '') and 'paygo' not in grains.get('product_version', '') %} 31 | - require: 32 | - sls: repos 33 | {% endif %} 34 | 35 | suse_certificate: 36 | file.managed: 37 | - name: /etc/pki/trust/anchors/SUSE_Trust_Root.crt.pem 38 | - source: salt://minion/certs/SUSE_Trust_Root.crt.pem 39 | - makedirs: True 40 | 41 | update_ca_truststore: 42 | cmd.run: 43 | - name: /usr/sbin/update-ca-certificates 44 | - onchanges: 45 | - file: suse_certificate 46 | - require: 47 | - pkg: suse_minion_cucumber_requisites 48 | - unless: 49 | - fun: service.status 50 | args: 51 | - ca-certificates.path 52 | 53 | {% endif %} 54 | {% endif %} 55 | 56 | # WORKAROUND for not syncing openSUSE Leap 15.6 in the Uyuni CIs 57 | # We need some dependencies for the package mgr-push, otherwise the installation in the test suite will fail 58 | {% if grains['osfullname'] == 'Leap' and grains['osrelease'] == '15.6' %} 59 | suse_minion_mgr_push_requisites: 60 | pkg.installed: 61 | - pkgs: 62 | - hwdata 63 | - libgudev-1_0-0 64 | - python3-dbus-python 65 | - python3-dmidecode 66 | - python3-extras 67 | - python3-hwdata 68 | - python3-libxml2 69 | - python3-pyudev 70 | - python3-rhnlib 71 | - zchunk 72 | {% endif %} 73 | 74 | {% endif %} 75 | -------------------------------------------------------------------------------- /salt/mirror/cron.sls: -------------------------------------------------------------------------------- 1 | cron: 2 | pkg.installed 3 | 4 | cron_service: 5 | service.running: 6 | - name: cron 7 | - enable: True 8 | - require: 9 | - pkg: cron 10 | 11 | minima_symlink: 12 | file.symlink: 13 | - name: /etc/cron.daily/minima.sh 14 | - target: /usr/local/bin/minima.sh 15 | - require: 16 | - pkg: cron 17 | - file: minima_script 18 | 19 | apt-mirror_symlink: 20 | file.symlink: 21 | - name: /etc/cron.daily/apt-mirror.sh 22 | - target: /usr/local/bin/apt-mirror.sh 23 | - require: 24 | - pkg: cron 25 | - file: apt-mirror_script 26 | 27 | mirror-images_symlink: 28 | file.symlink: 29 | - name: /etc/cron.daily/mirror-images.sh 30 | - target: /usr/local/bin/mirror-images.sh 31 | - require: 32 | - pkg: cron 33 | - file: mirror-images_script 34 | 35 | scc-data_symlink: 36 | file.symlink: 37 | - name: /etc/cron.daily/scc-data.sh 38 | - target: /usr/local/bin/scc-data.sh 39 | - require: 40 | - pkg: cron 41 | - file: scc-data_script 42 | 43 | # no symlinck by default for docker-images.sh 44 | # (docker is not installed by default) 45 | -------------------------------------------------------------------------------- /salt/mirror/cron_scripts/apt-mirror.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | /usr/bin/date >> /var/log/apt-mirror.log 4 | echo '---------------------------------------------------------' >> /var/log/apt-mirror.log 5 | (apt-mirror 2>&1) >> /var/log/apt-mirror.log 6 | (bash /var/spool/apt-mirror/var/clean.sh 2>&1) >> /var/log/apt-mirror.log 7 | echo >> /var/log/apt-mirror.log 8 | -------------------------------------------------------------------------------- /salt/mirror/cron_scripts/docker-images.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | /usr/bin/date >> /var/log/docker-images.log 4 | echo '---------------------------------------------------------' >> /var/log/docker-images.log 5 | (/usr/local/bin/utils/docker_images /etc/docker-images.conf 2>&1) >> /var/log/docker-images.log 6 | echo >> /var/log/docker-images.log 7 | -------------------------------------------------------------------------------- /salt/mirror/cron_scripts/minima.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | /usr/bin/date >> /var/log/minima.log 4 | echo '---------------------------------------------------------' >> /var/log/minima.log 5 | (/usr/local/bin/utils/minima sync -c /etc/minima.yaml 2>&1) >> /var/log/minima.log 6 | (/usr/local/bin/utils/jdupes --linkhard -r -s /srv/mirror/ 2>&1) >> /var/log/minima.log 7 | (/usr/local/bin/utils/adjust_external_repos 2>&1) >> /var/log/minima.log 8 | echo >> /var/log/minima.log 9 | -------------------------------------------------------------------------------- /salt/mirror/cron_scripts/mirror-images.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | /usr/bin/date >> /var/log/mirror-images.log 4 | echo '---------------------------------------------------------' >> /var/log/mirror-images.log 5 | cd /srv/mirror 6 | for IMAGE in $(cat /etc/mirror-images.conf); do 7 | echo "${IMAGE}:" >> /var/log/mirror-images.log 8 | (wget --mirror --no-host-directories ${IMAGE} 2>&1) >> /var/log/mirror-images.log 9 | echo >> /var/log/mirror-images.log 10 | done 11 | echo >> /var/log/mirror-images.log 12 | -------------------------------------------------------------------------------- /salt/mirror/cron_scripts/scc-data.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | /usr/bin/date >> /var/log/scc-data.log 4 | echo '---------------------------------------------------------' >> /var/log/scc-data.log 5 | cd /srv/mirror 6 | SCC_CREDS="$(cat /etc/scc-data.conf)" 7 | (/usr/local/bin/utils/refresh_scc_data.py $SCC_CREDS 2>&1) >> /var/log/scc-data.log 8 | echo >> /var/log/scc-data.log 9 | -------------------------------------------------------------------------------- /salt/mirror/etc/apt-mirror.list: -------------------------------------------------------------------------------- 1 | 2 | set mirror_path /srv/mirror/ 3 | set defaultarch amd64 4 | set nthreads 20 5 | 6 | ## 7 | ## Sources 8 | ## 9 | 10 | {% set ubuntu_names = {'20.04': 'focal', '22.04': 'jammy', '24.04': 'noble'} %} 11 | 12 | {% for distro in grains['ubuntu_distros']|default([], true) %} 13 | {% set distro_name = ubuntu_names.get(distro, distro) %} 14 | deb http://archive.ubuntu.com/ubuntu {{distro_name}} main 15 | deb http://archive.ubuntu.com/ubuntu {{distro_name}}-updates main 16 | deb http://archive.ubuntu.com/ubuntu {{distro_name}}-security main 17 | 18 | {% endfor %} 19 | 20 | clean http://archive.ubuntu.com/ubuntu 21 | -------------------------------------------------------------------------------- /salt/mirror/etc/docker-images.conf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/uyuni-project/sumaform/e5158b05d93232bb2c14f2863c4581fa2c1bf1e1/salt/mirror/etc/docker-images.conf -------------------------------------------------------------------------------- /salt/mirror/etc/mirror-images.conf: -------------------------------------------------------------------------------- 1 | https://github.com/uyuni-project/sumaform-images/releases/download/4.3.0/centos7.qcow2 2 | http://dist.nue.suse.com/ibs/Devel:/Galaxy:/Terraform:/Images/images/sles15sp2.x86_64.qcow2 3 | http://dist.nue.suse.com/ibs/Devel:/Galaxy:/Terraform:/Images/images/sles12sp4.x86_64.qcow2 4 | http://cloud.centos.org/centos/6/images/CentOS-6-x86_64-GenericCloud.qcow2 5 | http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2 6 | http://cloud.centos.org/centos/8/x86_64/images/CentOS-8-GenericCloud-8.1.1911-20200113.3.x86_64.qcow2 7 | http://download.opensuse.org/distribution/leap/15.2/appliances/openSUSE-Leap-15.2-JeOS.x86_64-OpenStack-Cloud.qcow2 8 | http://dist.nue.suse.com/install/SLE-15-SP2-JeOS-GM/SLES15-SP2-JeOS.x86_64-15.2-OpenStack-Cloud-GM.qcow2 9 | https://schnell.suse.de/SLE12/SLE-12-SP4-JeOS-GM/SLES12-SP4-JeOS.x86_64-12.4-OpenStack-Cloud-GM.qcow2 10 | http://dist.nue.suse.com/install/SLE-12-SP5-JeOS-GM/SLES12-SP5-JeOS.x86_64-12.5-OpenStack-Cloud-GM.qcow2 11 | http://dist.nue.suse.com/install/SLE-15-SP3-JeOS-GM/SLES15-SP3-JeOS.x86_64-15.3-OpenStack-Cloud-GM.qcow2 12 | http://dist.nue.suse.com/install/SLE-15-SP4-Minimal-Snapshot-202205-1/SLES15-SP4-Minimal-VM.x86_64-OpenStack-Cloud-Snapshot-202205-1.qcow2 13 | http://dist.nue.suse.com/install/SLE-15-SP6-Minimal-GM/SLES15-SP6-Minimal-VM.x86_64-Cloud-Build6.73.qcow2 14 | http://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img 15 | http://cloud-images.ubuntu.com/bionic/current/bionic-server-cloudimg-amd64.img 16 | http://cloud-images.ubuntu.com/focal/current/focal-server-cloudimg-amd64.img 17 | http://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img 18 | http://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img 19 | -------------------------------------------------------------------------------- /salt/mirror/etc/mirror.conf: -------------------------------------------------------------------------------- 1 | 2 | DocumentRoot /srv/mirror 3 | 4 | ErrorLog /var/log/apache2/mirror_error_log 5 | CustomLog /var/log/apache2/mirror_access_log combined 6 | 7 | # don't loose time with IP address lookups 8 | HostnameLookups Off 9 | 10 | # needed for named virtual hosts 11 | UseCanonicalName Off 12 | 13 | # configures the footer on server-generated documents 14 | ServerSignature On 15 | 16 | 17 | Options Indexes FollowSymLinks 18 | AllowOverride None 19 | Require all granted 20 | 21 | 22 | 23 | -------------------------------------------------------------------------------- /salt/mirror/etc/scc-data.conf: -------------------------------------------------------------------------------- 1 | {{ grains.get('cc_username') }}:{{ grains.get('cc_password') }} 2 | -------------------------------------------------------------------------------- /salt/mirror/init.sls: -------------------------------------------------------------------------------- 1 | include: 2 | - default 3 | - mirror.configuration 4 | {% if not grains.get('disable_cron') %} 5 | - mirror.cron 6 | {% endif %} 7 | 8 | {% if grains.get('synchronize_immediately') %} 9 | synchronize_http_repositories : 10 | cmd.run: 11 | - name: bash /usr/local/bin/minima.sh 12 | {% endif %} 13 | -------------------------------------------------------------------------------- /salt/mirror/utils/adjust_external_repos: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | # External repos as nvidia drivers need this to emulate RMT 4 | # which is what Uyuni/SUSE Manager server expects 5 | 6 | ERROR=0 7 | 8 | if [ -d suse -a -d repo ]; then 9 | mkdir -p repo/RPMMD || exit ${?} 10 | for dir in suse/sle* ; do 11 | d=${dir^^} 12 | ver=${d/SUSE\/SLE/SLE-} 13 | if [[ $ver == ${ver%SP*} ]]; then 14 | target="${ver}-GA" 15 | else 16 | target=${ver/SP/-SP} 17 | fi 18 | full_path=repo/RPMMD/${target}-Desktop-NVIDIA-Driver 19 | if [ ! -L ${full_path} ]; then 20 | ln -s ../../$dir ${full_path} 21 | if [ ${?} -ne 0 ]; then 22 | ERROR=${?} 23 | fi 24 | fi 25 | done 26 | fi 27 | 28 | exit $ERROR 29 | -------------------------------------------------------------------------------- /salt/mirror/utils/docker_images: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | DOCKER_IMAGES_CONF="$1" 4 | [ -z "$DOCKER_IMAGES_CONF" ] && exit 1 5 | DOCKER_IMAGE_NAMES=$(cat "$DOCKER_IMAGES_CONF") 6 | 7 | original_registry="registry.mgr.suse.de" 8 | target_acr=$(hostname -f) 9 | grep_filter="none" 10 | 11 | for original_image in $DOCKER_IMAGE_NAMES; do 12 | # Download all images 13 | docker pull $original_registry/$original_image 14 | 15 | # Get all images published after $minimum_version 16 | # format output to be: 17 | # docker tag ORIGINAL_IMAGE_NAME:VERSION TARGET_IMAGE_NAME:VERSION | 18 | # docker push TARGET_IMAGE_NAME:VERSION 19 | # then filter the result, removing any entries containing words defined on $grep_filter (i.e. rc, beta, alpha, etc) 20 | # finally, execute those as commands 21 | docker images $original_registry/$original_image \ 22 | --format "docker tag {{.Repository}}:{{.Tag}} $target_acr/$original_image:{{.Tag}} | docker push $target_acr/$original_image:{{.Tag}}" | 23 | grep -vE $grep_filter | 24 | bash -x 25 | done 26 | 27 | target_acr=$(hostname -f):5000/cucutest 28 | grep_filter="none" 29 | 30 | docker login --username="cucutest" --password="cucusecret" $target_acr 31 | 32 | for original_image in $DOCKER_IMAGE_NAMES; do 33 | # Download all images 34 | docker pull $original_registry/$original_image 35 | 36 | # Get all images published after $minimum_version 37 | # format output to be: 38 | # docker tag ORIGINAL_IMAGE_NAME:VERSION TARGET_IMAGE_NAME:VERSION | 39 | # docker push TARGET_IMAGE_NAME:VERSION 40 | # then filter the result, removing any entries containing words defined on $grep_filter (i.e. rc, beta, alpha, etc) 41 | # finally, execute those as commands 42 | docker images $original_registry/$original_image \ 43 | --format "docker tag {{.Repository}}:{{.Tag}} $target_acr/$original_image:{{.Tag}} | docker push $target_acr/$original_image:{{.Tag}}" | 44 | grep -vE $grep_filter | 45 | bash -x 46 | done 47 | 48 | docker logout $target_acr 49 | -------------------------------------------------------------------------------- /salt/post_provisioning_cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # If we are using Salt Bundle (venv-salt-minion), then we need to remove 4 | # the original Salt package installed on the instance. 5 | 6 | if [ -x /usr/bin/venv-salt-call ]; then 7 | SALT_CALL=venv-salt-call 8 | elif [ -x /usr/bin/salt-call ]; then 9 | SALT_CALL=salt-call 10 | else 11 | echo "Error: Cannot find venv-salt-call or salt-call on the system" 12 | exit 1 13 | fi 14 | 15 | # Nothing to do in case "install_salt_bundle" grain is not true 16 | INSTALL_SALT_BUNDLE=$(${SALT_CALL} --local --log-level=quiet --output=txt grains.get install_salt_bundle) 17 | 18 | if [[ "$INSTALL_SALT_BUNDLE" != "local: True" ]]; then 19 | exit 0 20 | fi 21 | 22 | echo "This instance is configured to use Salt Bundle in grains !" 23 | 24 | if [ -x /usr/bin/dnf ]; then 25 | INSTALLER=yum 26 | elif [ -x /usr/bin/zypper ]; then 27 | INSTALLER=zypper 28 | elif [ -x /usr/bin/yum ]; then 29 | INSTALLER=yum 30 | elif [ -x /usr/bin/apt ]; then 31 | INSTALLER=apt 32 | fi 33 | 34 | echo "Removing Salt packages, except Salt Bundle (venv-salt-minion) ..." 35 | if [[ "$INSTALLER" == "zypper" ]]; then 36 | zypper -q --non-interactive remove salt-minion > /dev/null 2>&1 ||: 37 | elif [[ "$INSTALLER" == "yum" ]]; then 38 | yum -y remove salt salt-minion python3-salt python2-salt > /dev/null 2>&1 ||: 39 | elif [[ "$INSTALLER" == "apt" ]]; then 40 | apt-get --yes purge salt-common > /dev/null 2>&1 ||: 41 | fi 42 | 43 | echo "Done!" 44 | -------------------------------------------------------------------------------- /salt/proxy/additional_disk.sls: -------------------------------------------------------------------------------- 1 | include: 2 | - default 3 | 4 | {% if grains.get('repository_disk_size') > 0 %} 5 | 6 | parted: 7 | pkg.installed 8 | 9 | spacewalk_partition: 10 | cmd.run: 11 | - name: /usr/sbin/parted -s /dev/{{grains['data_disk_device']}} mklabel gpt && /usr/sbin/parted -s /dev/{{grains['data_disk_device']}} mkpart primary 0% 100% && sleep 1 && /sbin/mkfs.ext4 /dev/{{grains['data_disk_device']}}1 12 | - unless: ls /dev/{{grains['data_disk_device']}}1 13 | - require: 14 | - pkg: parted 15 | 16 | spacewalk_directory: 17 | file.directory: 18 | - name: /var/spacewalk 19 | - makedirs: True 20 | mount.mounted: 21 | - name: /var/spacewalk 22 | - device: /dev/{{grains['data_disk_device']}}1 23 | - fstype: ext4 24 | - mkmnt: True 25 | - persist: True 26 | - opts: 27 | - defaults 28 | - require: 29 | - cmd: spacewalk_partition 30 | 31 | {% endif %} 32 | -------------------------------------------------------------------------------- /salt/proxy/config-answers.txt: -------------------------------------------------------------------------------- 1 | RHN_PARENT={{grains['server']}} 2 | HTTP_PROXY='' 3 | TRACEBACK_EMAIL='' 4 | USE_SSL=Y/n 5 | USE_EXISTING_CERTS=y/N 6 | INSTALL_MONITORING=n 7 | SSL_ORG=SUSE 8 | SSL_ORGUNIT=SUSE 9 | SSL_COMMON={{grains['hostname']}}.{{grains['domain']}} 10 | SSL_CITY=Nuremberg 11 | SSL_STATE=Bayern 12 | SSL_COUNTRY=DE 13 | SSL_EMAIL=galaxy-noise@suse.de 14 | SSL_CNAME_ASK='' 15 | POPULATE_CONFIG_CHANNEL=Y/n 16 | RHN_USER={{ grains.get('server_username') | default('admin', true) }} 17 | ACTIVATE_SLP=Y/n 18 | -------------------------------------------------------------------------------- /salt/proxy_containerized/config: -------------------------------------------------------------------------------- 1 | Host * 2 | StrictHostKeyChecking no 3 | UserKnownHostsFile=/dev/null 4 | -------------------------------------------------------------------------------- /salt/proxy_containerized/id_ed25519: -------------------------------------------------------------------------------- 1 | -----BEGIN OPENSSH PRIVATE KEY----- 2 | b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW 3 | QyNTUxOQAAACBhDwPFL2f+rU4i/jChlALBybj5Q6bnng5m7iftIIjgmAAAAJAqWvyhKlr8 4 | oQAAAAtzc2gtZWQyNTUxOQAAACBhDwPFL2f+rU4i/jChlALBybj5Q6bnng5m7iftIIjgmA 5 | AAAEAbj/tZzeu2JuFmYIwVkjOvK+mdUFlnsGK0AluVz9V3A2EPA8UvZ/6tTiL+MKGUAsHJ 6 | uPlDpueeDmbuJ+0giOCYAAAAC3Bhb2xvQGxpbnV4AQI= 7 | -----END OPENSSH PRIVATE KEY----- 8 | -------------------------------------------------------------------------------- /salt/proxy_containerized/id_ed25519.pub: -------------------------------------------------------------------------------- 1 | ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGEPA8UvZ/6tTiL+MKGUAsHJuPlDpueeDmbuJ+0giOCY root@controller 2 | -------------------------------------------------------------------------------- /salt/registry/init.sls: -------------------------------------------------------------------------------- 1 | include: 2 | - default 3 | 4 | podman: 5 | pkg.installed: 6 | - require: 7 | - sls: default 8 | 9 | registry_directory: 10 | file.directory: 11 | - name: /var/lib/registry 12 | 13 | # generated via 14 | # podman create --name registry --publish 5000:80 -v /var/lib/registry:/var/lib/registry docker.io/library/registry:2 15 | # podman generate systemd --name --new registry 16 | # replace KillMode=none with TimeoutStopSec=60 as per https://github.com/containers/podman/pull/8889 17 | 18 | registry_service: 19 | file.managed: 20 | - name: /etc/systemd/system/registry.service 21 | - contents: | 22 | [Unit] 23 | Description=Podman container-registry.service 24 | Documentation=man:podman-generate-systemd(1) 25 | Wants=network.target 26 | After=network-online.target 27 | 28 | [Service] 29 | Environment=PODMAN_SYSTEMD_UNIT=%n 30 | Restart=on-failure 31 | ExecStartPre=/bin/rm -f %t/container-registry.pid %t/container-registry.ctr-id 32 | ExecStart=/usr/bin/podman run --conmon-pidfile %t/container-registry.pid --cidfile %t/container-registry.ctr-id --cgroups=no-conmon -d --replace --name registry --publish 80:5000 -v /var/lib/registry:/var/lib/registry docker.io/library/registry:2 33 | ExecStop=/usr/bin/podman stop --ignore --cidfile %t/container-registry.ctr-id -t 10 34 | ExecStopPost=/usr/bin/podman rm --ignore -f --cidfile %t/container-registry.ctr-id 35 | PIDFile=%t/container-registry.pid 36 | TimeoutStopSec=60 37 | Type=forking 38 | 39 | [Install] 40 | WantedBy=multi-user.target default.target 41 | service.running: 42 | - name: registry 43 | - enable: True 44 | -------------------------------------------------------------------------------- /salt/repos/disable_local.sls: -------------------------------------------------------------------------------- 1 | {% if 'paygo' not in grains.get('product_version') | default('', true) %} 2 | disable_all_local_repos: 3 | cmd.run: 4 | - name: zypper mr -d --all 5 | - onlyif: test -x /usr/bin/zypper 6 | {% endif %} 7 | -------------------------------------------------------------------------------- /salt/repos/init.sls: -------------------------------------------------------------------------------- 1 | include: 2 | - repos.disable_local 3 | - repos.vendor 4 | {% if not grains.get('additional_repos_only') %} 5 | - repos.default_settings 6 | - repos.os 7 | - repos.client_tools 8 | - repos.minion 9 | - repos.proxy 10 | - repos.proxy_containerized 11 | - repos.server 12 | - repos.server_containerized 13 | - repos.build_host 14 | - repos.virthost 15 | - repos.testsuite 16 | - repos.tools 17 | - repos.jenkins 18 | - repos.ruby 19 | {% endif %} 20 | - repos.additional 21 | 22 | {% if grains['os'] == 'SUSE' and grains['osfullname'] not in ['SLE Micro', 'SL-Micro'] %} 23 | refresh_repos: 24 | cmd.run: 25 | - name: zypper --non-interactive --gpg-auto-import-keys refresh --force; exit 0 26 | {% endif %} 27 | 28 | # WORKAROUND: see github:saltstack/salt#10852 29 | {{ sls }}_nop: 30 | test.nop: [] 31 | -------------------------------------------------------------------------------- /salt/repos/jenkins.sls: -------------------------------------------------------------------------------- 1 | {% if 'jenkins' in grains.get('roles') %} 2 | {% if grains['os'] == 'SUSE' %} 3 | {% if grains['osfullname'] == 'Leap' %} 4 | {% set repo = 'openSUSE_Leap_' + grains['osrelease'] %} 5 | {% elif grains['osfullname'] == 'SLES' %} 6 | {% set slemajorver = grains['osrelease'].split('.')[0] %} 7 | {% set slesp = grains['osrelease'].split('.')[1] %} 8 | {% if slesp == '0' %} 9 | {% set slever = 'SLE_' + slemajorver %} 10 | {% else %} 11 | {% set slever = 'SLE_' + slemajorver + '_' + slesp %} 12 | {% endif %} 13 | {% endif %} 14 | {% endif %} 15 | jenkins_repo: 16 | pkgrepo.managed: 17 | - baseurl: http://{{ grains.get("mirror") | default("download.opensuse.org/", true) }}/repositories/devel:/tools:/building/{{ repo }} 18 | - refresh: True 19 | - gpgcheck: 1 20 | - gpgkey: http://{{ grains.get("mirror") | default("download.opensuse.org/", true) }}/repositories/devel:/tools:/building//{{ repo }}/repodata/repomd.xml.key 21 | {% endif %} 22 | 23 | -------------------------------------------------------------------------------- /salt/repos/minion.sls: -------------------------------------------------------------------------------- 1 | {% if 'minion' in grains.get('roles') and grains.get('testsuite') | default(false, true) and grains['osfullname'] == 'SLES' and not grains.get('sles_registration_code') %} 2 | 3 | {% if '15' in grains['osrelease'] %} 4 | 5 | {% if grains['osrelease'] == '15.2' %} 6 | {% set sle_version_path = '15-SP2' %} 7 | {% elif grains['osrelease'] == '15.3' %} 8 | {% set sle_version_path = '15-SP3' %} 9 | {% elif grains['osrelease'] == '15.4' %} 10 | {% set sle_version_path = '15-SP4' %} 11 | {% endif %} 12 | 13 | {% endif %} 14 | 15 | 16 | {% endif %} 17 | 18 | # WORKAROUND: see github:saltstack/salt#10852 19 | default_nop: 20 | test.nop: [] 21 | -------------------------------------------------------------------------------- /salt/repos/proxy.sls: -------------------------------------------------------------------------------- 1 | {% if 'proxy' in grains.get('roles') %} 2 | include: 3 | {%- if '4.3' in grains['product_version'] %} 4 | - repos.proxy43 5 | {%- else %} 6 | # Non-podman version deprecated in September 2024: 7 | - repos.proxyUyuni 8 | {%- endif %} 9 | 10 | {% endif %} 11 | 12 | # WORKAROUND: see github:saltstack/salt#10852 13 | {{ sls }}_nop: 14 | test.nop: [] 15 | -------------------------------------------------------------------------------- /salt/repos/proxyUyuni.sls: -------------------------------------------------------------------------------- 1 | {% if 'proxy' in grains.get('roles') %} 2 | 3 | {% if 'uyuni' in grains['product_version'] %} 4 | proxy_pool_repo: 5 | pkgrepo.managed: 6 | - baseurl: http://{{ grains.get("mirror") | default("downloadcontent.opensuse.org", true) }}/repositories/systemsmanagement:/Uyuni:/Stable/images/repo/Uyuni-Proxy-POOL-x86_64-Media1/ 7 | - refresh: True 8 | - priority: 97 9 | {% endif %} 10 | 11 | {% if 'uyuni-master' in grains.get('product_version') or 'uyuni-pr' in grains.get('product_version') or '4.3-pr' in grains.get('product_version') %} 12 | proxy_devel_repo: 13 | pkgrepo.managed: 14 | - baseurl: http://{{ grains.get("mirror") | default("downloadcontent.opensuse.org", true) }}/repositories/systemsmanagement:/Uyuni:/Master/images/repo/Uyuni-Proxy-POOL-x86_64-Media1/ 15 | - refresh: True 16 | - priority: 96 17 | 18 | testing_overlay_devel_repo: 19 | pkgrepo.managed: 20 | - baseurl: http://{{ grains.get("mirror") | default("downloadcontent.opensuse.org", true) }}/repositories/systemsmanagement:/Uyuni:/Master/images/repo/Testing-Overlay-POOL-x86_64-Media1/ 21 | - refresh: True 22 | - priority: 96 23 | 24 | {% endif %} 25 | 26 | {% endif %} 27 | 28 | # WORKAROUND: see github:saltstack/salt#10852 29 | {{ sls }}_nop: 30 | test.nop: [] 31 | -------------------------------------------------------------------------------- /salt/repos/proxy_containerized.sls: -------------------------------------------------------------------------------- 1 | {% if 'proxy_containerized' in grains.get('roles') %} 2 | 3 | include: 4 | {% if '5.0' in grains.get('product_version') %} 5 | - repos.proxy_containerized50 6 | {% elif '5.1' in grains.get('product_version') %} 7 | - repos.proxy_containerized51 8 | {%- elif 'head' in grains['product_version'] %} 9 | - repos.proxy_containerizedHead 10 | {%- else %} 11 | - repos.proxy_containerizedUyuni 12 | {%- endif %} 13 | 14 | {% endif %} 15 | 16 | # WORKAROUND: see github:saltstack/salt#10852 17 | {{ sls }}_nop: 18 | test.nop: [] 19 | -------------------------------------------------------------------------------- /salt/repos/proxy_containerizedHead.sls: -------------------------------------------------------------------------------- 1 | {% if 'proxy_containerized' in grains.get('roles') %} 2 | 3 | {% if grains.get("os") == 'SUSE' %} 4 | {% if grains['osfullname'] == 'SL-Micro' %} 5 | 6 | 7 | # Commented out because we already add this repo in cloud-init: 8 | # proxy_devel_repo: 9 | # pkgrepo.managed: 10 | # - baseurl: http://{{ grains.get("mirror") | default("dist.nue.suse.com", true) }}/ibs/Devel:/Galaxy:/Manager:/Head/images/repo/SUSE-Manager-Proxy-5.1-POOL-x86_64-Media1/ 11 | # - refresh: True 12 | # - gpgkey: http://{{ grains.get("mirror") | default("dist.nue.suse.com", true) }}/ibs/Devel:/Galaxy:/Manager:/Head/images/repo/SUSE-Manager-Proxy-5.1-POOL-x86_64-Media1/repodata/repomd.xml.key 13 | 14 | {% endif %} 15 | {% endif %} 16 | {% endif %} 17 | 18 | # WORKAROUND: see github:saltstack/salt#10852 19 | {{ sls }}_nop: 20 | test.nop: [] 21 | -------------------------------------------------------------------------------- /salt/repos/ruby.sls: -------------------------------------------------------------------------------- 1 | {% if grains['os'] == 'SUSE' and ('controller' in grains.get('roles')) %} 2 | 3 | ruby_add_devel_repository: 4 | pkgrepo.managed: 5 | - name: ruby_devel 6 | - baseurl: http://download.opensuse.org/repositories/devel:/languages:/ruby/15.6/ 7 | - refresh: True 8 | - gpgautoimport: True 9 | 10 | ruby_gems_add_devel_repository: 11 | pkgrepo.managed: 12 | - name: ruby_devel_extensions 13 | - baseurl: http://download.opensuse.org/repositories/devel:/languages:/ruby:/extensions/15.6/ 14 | - refresh: True 15 | - gpgautoimport: True 16 | 17 | {% endif %} 18 | 19 | # WORKAROUND: see github:saltstack/salt#10852 20 | {{ sls }}_nop: 21 | test.nop: [] 22 | -------------------------------------------------------------------------------- /salt/repos/server.sls: -------------------------------------------------------------------------------- 1 | {% if 'server' in grains.get('roles') %} 2 | 3 | include: 4 | {%- if '4.3' in grains['product_version'] %} 5 | - repos.server43 6 | {%- else %} 7 | # Non-podman version deprecated in September 2024: 8 | - repos.serverUyuni 9 | {%- endif %} 10 | 11 | {% endif %} 12 | 13 | # WORKAROUND: see github:saltstack/salt#10852 14 | {{ sls }}_nop: 15 | test.nop: [] 16 | -------------------------------------------------------------------------------- /salt/repos/serverUyuni.sls: -------------------------------------------------------------------------------- 1 | {% if 'server' in grains.get('roles') %} 2 | 3 | {% if 'uyuni' in grains['product_version'] %} 4 | server_pool_repo: 5 | pkgrepo.managed: 6 | - baseurl: http://{{ grains.get("mirror") | default("downloadcontent.opensuse.org", true) }}/repositories/systemsmanagement:/Uyuni:/Stable/images/repo/Uyuni-Server-POOL-x86_64-Media1/ 7 | - refresh: True 8 | - priority: 97 9 | {% endif %} 10 | 11 | {% if 'uyuni-master' in grains.get('product_version') %} 12 | server_devel_repo: 13 | pkgrepo.managed: 14 | - baseurl: http://{{ grains.get("mirror") | default("downloadcontent.opensuse.org", true) }}/repositories/systemsmanagement:/Uyuni:/Master/images/repo/Uyuni-Server-POOL-x86_64-Media1/ 15 | - refresh: True 16 | - priority: 96 17 | 18 | testing_overlay_devel_repo: 19 | pkgrepo.managed: 20 | - baseurl: http://{{ grains.get("mirror") | default("downloadcontent.opensuse.org", true) }}/repositories/systemsmanagement:/Uyuni:/Master/images/repo/Testing-Overlay-POOL-x86_64-Media1/ 21 | - refresh: True 22 | - priority: 96 23 | 24 | {% endif %} 25 | 26 | {% endif %} 27 | 28 | # WORKAROUND: see github:saltstack/salt#10852 29 | {{ sls }}_nop: 30 | test.nop: [] 31 | -------------------------------------------------------------------------------- /salt/repos/server_containerized.sls: -------------------------------------------------------------------------------- 1 | {% if 'server_containerized' in grains.get('roles') %} 2 | 3 | include: 4 | {% if '5.0' in grains.get('product_version') %} 5 | - repos.server_containerized50 6 | {% elif '5.1' in grains.get('product_version') %} 7 | - repos.server_containerized51 8 | {%- elif 'head' in grains['product_version'] %} 9 | - repos.server_containerizedHead 10 | {%- else %} 11 | - repos.server_containerizedUyuni 12 | {%- endif %} 13 | 14 | {% endif %} 15 | 16 | # WORKAROUND: see github:saltstack/salt#10852 17 | {{ sls }}_nop: 18 | test.nop: [] 19 | -------------------------------------------------------------------------------- /salt/repos/server_containerizedHead.sls: -------------------------------------------------------------------------------- 1 | {% if 'server_containerized' in grains.get('roles') %} 2 | 3 | {% if grains.get("os") == 'SUSE' %} 4 | {% if grains['osfullname'] == 'SL-Micro' %} 5 | 6 | 7 | # Commented out because we already add this repo in cloud-init: 8 | # server_devel_repo: 9 | # pkgrepo.managed: 10 | # - baseurl: http://{{ grains.get("mirror") | default("dist.nue.suse.com", true) }}/ibs/Devel:/Galaxy:/Manager:/Head/images/repo/SUSE-Manager-Server-5.1-POOL-x86_64-Media1/ 11 | # - refresh: True 12 | # - gpgkey: http://{{ grains.get("mirror") | default("dist.nue.suse.com", true) }}/ibs/Devel:/Galaxy:/Manager:/Head/images/repo/SUSE-Manager-Server-5.1-POOL-x86_64-Media1/repodata/repomd.xml.key 13 | 14 | {% endif %} 15 | {% endif %} 16 | {% endif %} 17 | 18 | 19 | # WORKAROUND: see github:saltstack/salt#10852 20 | {{ sls }}_nop: 21 | test.nop: [] 22 | 23 | -------------------------------------------------------------------------------- /salt/repos/testsuite.sls: -------------------------------------------------------------------------------- 1 | {% if grains.get('testsuite') | default(false, true) %} 2 | {% if 'client' in grains.get('roles') or 'minion' in grains.get('roles') or 'sshminion' in grains.get('roles') %} 3 | 4 | {% if (grains['os'] == 'SUSE') or (grains['os_family'] == 'RedHat') %} 5 | 6 | uyuni_key_for_fake_packages: 7 | {% if not grains['osfullname'] in ['SLE Micro', 'SL-Micro'] %} 8 | file.managed: 9 | - name: /tmp/uyuni.key 10 | - source: salt://default/gpg_keys/uyuni.key 11 | cmd.wait: 12 | - name: rpm --import /tmp/uyuni.key 13 | - watch: 14 | - file: uyuni_key_for_fake_packages 15 | {% else %} 16 | cmd.run: 17 | - name: transactional-update -c run rpm --import http://{{ grains.get("mirror") | default("minima-mirror-ci-bv.mgr.prv.suse.net", true) }}/uyuni.key 18 | {% endif %} 19 | 20 | {% if not (grains['osfullname'] in ['SLE-Micro', 'SL-Micro', 'openSUSE Leap Micro']) %} 21 | test_repo_rpm_pool: 22 | pkgrepo.managed: 23 | - baseurl: http://{{ grains.get("mirror") | default("downloadcontent.opensuse.org", true) }}/repositories/systemsmanagement:/Uyuni:/Test-Packages:/Pool/rpm/ 24 | - refresh: True 25 | - gpgcheck: 1 26 | - gpgkey: http://{{ grains.get("mirror") | default("downloadcontent.opensuse.org", true) }}/repositories/systemsmanagement:/Uyuni:/Test-Packages:/Pool/rpm/repodata/repomd.xml.key 27 | {% endif %} {# already added via combustion #} 28 | 29 | {% elif grains['os_family'] == 'Debian' %} 30 | 31 | test_repo_deb_pool: 32 | pkgrepo.managed: 33 | - name: deb http://{{ grains.get("mirror") | default("downloadcontent.opensuse.org", true) }}/repositories/systemsmanagement:/Uyuni:/Test-Packages:/Pool/deb/ / 34 | - refresh: True 35 | - file: /etc/apt/sources.list.d/test_repo_deb_pool.list 36 | - key_url: http://{{ grains.get("mirror") | default("downloadcontent.opensuse.org", true) }}/repositories/systemsmanagement:/Uyuni:/Test-Packages:/Pool/deb/Release.key 37 | 38 | {% endif %} 39 | {% endif %} 40 | {% endif %} 41 | 42 | # WORKAROUND: see github:saltstack/salt#10852 43 | {{ sls }}_nop: 44 | test.nop: [] 45 | -------------------------------------------------------------------------------- /salt/repos/tools.sls: -------------------------------------------------------------------------------- 1 | {% if grains['os'] == 'SUSE' and ( 2 | 'controller' in grains.get('roles') or 3 | 'grafana' in grains.get('roles') or 4 | 'mirror' in grains.get('roles') or 5 | grains.get('evil_minion_count') or 6 | grains.get('monitored') 7 | ) %} 8 | 9 | {% if grains['osfullname'] == 'Leap' %} 10 | {% set path = 'openSUSE_Leap_' + grains['osrelease'] %} 11 | {% endif %} 12 | 13 | {% if grains['osfullname'] != 'Leap' %} 14 | {% if grains['osrelease'] == '12.5' %} 15 | {% set path = 'SLE_12_SP5' %} 16 | {% elif grains['osrelease'] == '15.2' %} 17 | {% set path = 'SLE_15_SP2' %} 18 | {% elif grains['osrelease'] == '15.3' %} 19 | {% set path = 'SLE_15_SP3' %} 20 | {% elif grains['osrelease'] == '15.4' %} 21 | {% set path = 'SLE_15_SP4' %} 22 | {% elif grains['osrelease'] == '15.5' %} 23 | {% set path = 'SLE_15_SP5' %} 24 | {% elif grains['osrelease'] == '15.6' %} 25 | {% set path = 'SLE_15_SP6' %} 26 | {% endif %} 27 | {% endif %} 28 | 29 | tools_repo: 30 | pkgrepo.managed: 31 | - baseurl: http://{{ grains.get("mirror") | default("downloadcontent.opensuse.org", true) }}/repositories/systemsmanagement:/sumaform:/tools/{{path}}/ 32 | - refresh: True 33 | - gpgcheck: 1 34 | - gpgkey: http://{{ grains.get("mirror") | default("downloadcontent.opensuse.org", true) }}/repositories/systemsmanagement:/sumaform:/tools/{{path}}/repodata/repomd.xml.key 35 | 36 | {% endif %} 37 | 38 | # WORKAROUND: see github:saltstack/salt#10852 39 | {{ sls }}_nop: 40 | test.nop: [] 41 | -------------------------------------------------------------------------------- /salt/repos/vendor.sls: -------------------------------------------------------------------------------- 1 | allow_vendor_changes: 2 | {% if grains['osfullname'] == 'Leap' %} 3 | file.managed: 4 | - name: /etc/zypp/vendors.d/opensuse 5 | - makedirs: True 6 | - contents: | 7 | [main] 8 | vendors = openSUSE,openSUSE Build Service,obs://build.suse.de/Devel:Galaxy,obs://build.opensuse.org 9 | {% else %} 10 | file.managed: 11 | - name: /etc/zypp/vendors.d/suse 12 | - makedirs: True 13 | - contents: | 14 | [main] 15 | vendors = SUSE,openSUSE Build Service,obs://build.suse.de/Devel:Galaxy,obs://build.opensuse.org 16 | {% endif %} 17 | 18 | -------------------------------------------------------------------------------- /salt/repos/virthost.sls: -------------------------------------------------------------------------------- 1 | {% if 'virthost' in grains.get('roles') %} 2 | 3 | {% if grains['osfullname'] == 'SLES' %} 4 | 5 | {% if grains['osrelease'] == '15.2' %} 6 | {% set sle_version_path = '15-SP2' %} 7 | {% elif grains['osrelease'] == '15.3' %} 8 | {% set sle_version_path = '15-SP3' %} 9 | {% elif grains['osrelease'] == '15.4' %} 10 | {% set sle_version_path = '15-SP4' %} 11 | {% elif grains['osrelease'] == '15.5' %} 12 | {% set sle_version_path = '15-SP5' %} 13 | {% elif grains['osrelease'] == '15.6' %} 14 | {% set sle_version_path = '15-SP6' %} 15 | {% endif %} 16 | 17 | module_server_applications_pool_repo: 18 | pkgrepo.managed: 19 | - baseurl: http://{{ grains.get("mirror") | default("dist.nue.suse.com/ibs", true) }}/SUSE/Products/SLE-Module-Server-Applications/{{ sle_version_path }}/x86_64/product/ 20 | - refresh: True 21 | 22 | module_server_applications_update_repo: 23 | pkgrepo.managed: 24 | - baseurl: http://{{ grains.get("mirror") | default("dist.nue.suse.com/ibs", true) }}/SUSE/Updates/SLE-Module-Server-Applications/{{ sle_version_path }}/x86_64/update/ 25 | - refresh: True 26 | 27 | {% endif %} 28 | 29 | {% endif %} 30 | 31 | # WORKAROUND: see github:saltstack/salt#10852 32 | {{ sls }}_nop: 33 | test.nop: [] 34 | -------------------------------------------------------------------------------- /salt/salt_testenv/init.sls: -------------------------------------------------------------------------------- 1 | include: 2 | - default 3 | {% if grains['osfullname'] == 'SLES' and grains['osrelease_info'][0] == 15 %} 4 | - .salt_classic_package 5 | - .salt_bundle_package 6 | {% elif grains['osfullname'] == 'SLES' and grains['osrelease_info'][0] == 12 %} 7 | - .salt_bundle_package 8 | {% elif grains['osfullname'] == 'SL-Micro' %} 9 | - .salt_classic_package 10 | - .salt_bundle_package 11 | {% elif grains['osfullname'] == 'Leap' %} 12 | - .salt_classic_package 13 | - .salt_bundle_package 14 | {% elif grains['osfullname'] == 'openSUSE Tumbleweed' %} 15 | - .salt_classic_package 16 | {% elif grains['os'] == 'Debian' %} 17 | {% if grains['osrelease'] == '10' %} 18 | - .salt_classic_package 19 | - .salt_bundle_package 20 | {% else %} 21 | - .salt_bundle_package 22 | {% endif %} 23 | {% elif grains['osfullname'] == 'Ubuntu' %} 24 | {% if grains['osrelease'] == '20.04' %} 25 | - .salt_classic_package 26 | - .salt_bundle_package 27 | {% else %} 28 | - .salt_bundle_package 29 | {% endif %} 30 | {% elif grains['osfullname'] == 'AlmaLinux' %} 31 | {% if grains['osrelease_info'][0] == 8 %} 32 | - .salt_classic_package 33 | - .salt_bundle_package 34 | {% else %} 35 | - .salt_bundle_package 36 | {% endif %} 37 | {% elif grains['osfullname'] == 'CentOS Linux' %} 38 | - .salt_bundle_package 39 | {% else %} 40 | {{ raise("Salt Shaker unsupported OS") }} 41 | {% endif %} 42 | - .postinstallation 43 | -------------------------------------------------------------------------------- /salt/salt_testenv/postinstallation.sls: -------------------------------------------------------------------------------- 1 | {% if grains['os_family'] == 'Suse' and grains['osfullname'] == 'SL-Micro' %} 2 | copy_salt_classic_testsuite: 3 | cmd.run: 4 | - name: transactional-update -c run cp -r /usr/lib/python3.{{ grains["pythonversion"][1] }}/site-packages/salt-testsuite /opt/salt-testsuite-classic 5 | 6 | copy_salt_bundle_testsuite: 7 | cmd.run: 8 | - name: transactional-update -c run cp -r /usr/lib/venv-salt-minion/lib/python3.{{ grains["pythonversion"][1] }}/site-packages/salt-testsuite /opt/salt-testsuite-bundle 9 | 10 | disable_rebootmgr_to_avoid_reboots: 11 | file.managed: 12 | - name: /etc/rebootmgr.conf 13 | - contents: | 14 | [rebootmgr] 15 | strategy=off 16 | 17 | reboot_transactional_system: 18 | module.run: 19 | - name: system.reboot 20 | - at_time: +1 21 | - order: last 22 | {% endif %} 23 | 24 | # WORKAROUND: see github:saltstack/salt#10852 25 | {{ sls }}_nop: 26 | test.nop: [] 27 | -------------------------------------------------------------------------------- /salt/scc/build_host.sls: -------------------------------------------------------------------------------- 1 | {% if 'build_host' in grains.get('roles') and grains.get('sles_registration_code') %} 2 | 3 | register_sles_server: 4 | cmd.run: 5 | - name: SUSEConnect --url https://scc.suse.com -r {{ grains.get("sles_registration_code") }} -p SLES/{{ grains['osrelease'] }}/x86_64 6 | 7 | basesystem_activation: 8 | cmd.run: 9 | - name: SUSEConnect -p sle-module-basesystem/{{ grains['osrelease'] }}/x86_64 10 | 11 | containers_activation: 12 | cmd.run: 13 | - name: SUSEConnect -p sle-module-containers/{{ grains['osrelease'] }}/x86_64 14 | 15 | desktop_activation: 16 | cmd.run: 17 | - name: SUSEConnect -p sle-module-desktop-applications/{{ grains['osrelease'] }}/x86_64 18 | 19 | devel_activation: 20 | cmd.run: 21 | - name: SUSEConnect -p sle-module-development-tools/{{ grains['osrelease'] }}/x86_64 22 | 23 | {% endif %} 24 | -------------------------------------------------------------------------------- /salt/scc/clean.sls: -------------------------------------------------------------------------------- 1 | clean_sles_release_package: 2 | cmd.run: 3 | - name: rpm -e --nodeps sles-release; exit 0 4 | 5 | clean_suseconnect_registration : 6 | cmd.run: 7 | - name: SUSEConnect --cleanup 8 | -------------------------------------------------------------------------------- /salt/scc/client.sls: -------------------------------------------------------------------------------- 1 | {% if 'client' in grains.get('roles') and grains.get('sles_registration_code') %} 2 | 3 | register_sles_server: 4 | cmd.run: 5 | - name: SUSEConnect --url https://scc.suse.com -r {{ grains.get("sles_registration_code") }} -p SLES/{{ grains['osrelease'] }}/x86_64 6 | 7 | {% if '15' in grains['osrelease'] %} 8 | 9 | basesystem_activation: 10 | cmd.run: 11 | - name: SUSEConnect -p sle-module-basesystem/{{ grains['osrelease'] }}/x86_64 12 | 13 | containers_activation: 14 | cmd.run: 15 | - name: SUSEConnect -p sle-module-containers/{{ grains['osrelease'] }}/x86_64 16 | 17 | desktop_activation: 18 | cmd.run: 19 | - name: SUSEConnect -p sle-module-desktop-applications/{{ grains['osrelease'] }}/x86_64 20 | 21 | devel_activation: 22 | cmd.run: 23 | - name: SUSEConnect -p sle-module-development-tools/{{ grains['osrelease'] }}/x86_64 24 | 25 | {% endif %} 26 | {% endif %} 27 | -------------------------------------------------------------------------------- /salt/scc/init.sls: -------------------------------------------------------------------------------- 1 | {% if (grains.get('server_registration_code') or grains.get('proxy_registration_code') or grains.get('sles_registration_code')) and grains['os'] == 'SUSE' %} 2 | include: 3 | - scc.clean 4 | - scc.client 5 | - scc.minion 6 | - scc.build_host 7 | - scc.proxy 8 | - scc.server 9 | 10 | scc_refresh_repos: 11 | cmd.run: 12 | - name: zypper --non-interactive --gpg-auto-import-keys refresh --force; exit 0 13 | {% endif %} 14 | -------------------------------------------------------------------------------- /salt/scc/minion.sls: -------------------------------------------------------------------------------- 1 | {% if ( 'minion' in grains.get('roles') or 'sshminion' in grains.get('roles') ) and grains.get('sles_registration_code') %} 2 | 3 | register_sles_server: 4 | cmd.run: 5 | - name: SUSEConnect --url https://scc.suse.com -r {{ grains.get("sles_registration_code") }} -p SLES/{{ grains['osrelease'] }}/x86_64 6 | 7 | {% if '15' in grains['osrelease'] %} 8 | 9 | basesystem_activation: 10 | cmd.run: 11 | - name: SUSEConnect -p sle-module-basesystem/{{ grains['osrelease'] }}/x86_64 12 | {% endif %} 13 | 14 | {% endif %} 15 | -------------------------------------------------------------------------------- /salt/scc/proxy.sls: -------------------------------------------------------------------------------- 1 | {% if 'proxy' in grains.get('roles') and grains.get('proxy_registration_code') %} 2 | 3 | {% if '4.3' in grains['product_version'] and not 'proxy_containerized' in grains.get('roles') %} 4 | register_suse_manager_proxy_with_scc: 5 | cmd.run: 6 | - name: SUSEConnect --url https://scc.suse.com -r {{ grains.get("proxy_registration_code") }} -p SUSE-Manager-Proxy/4.3/x86_64 7 | add_sle_module_basesystem: 8 | cmd.run: 9 | - name: SUSEConnect -p sle-module-basesystem/15.4/x86_64 10 | add_sle_module_server_application: 11 | cmd.run: 12 | - name: SUSEConnect -p sle-module-server-applications/15.4/x86_64 13 | add_sle_module_suse_manager_proxy: 14 | cmd.run: 15 | - name: SUSEConnect -p sle-module-suse-manager-proxy/4.3/x86_64 16 | add_sle_module_suse_container: 17 | cmd.run: 18 | - name: SUSEConnect -p sle-module-containers/15.4/x86_64 19 | {% endif %} 20 | 21 | {% endif %} 22 | -------------------------------------------------------------------------------- /salt/scc/server.sls: -------------------------------------------------------------------------------- 1 | {% if 'server' in grains.get('roles') and grains.get('server_registration_code') %} 2 | 3 | {% if '4.3' in grains['product_version'] %} 4 | register_suse_manager_server_with_scc: 5 | cmd.run: 6 | - name: SUSEConnect --url https://scc.suse.com -r {{ grains.get("server_registration_code") }} -p SUSE-Manager-Server/4.3/x86_64 7 | add_sle_module_basesystem: 8 | cmd.run: 9 | - name: SUSEConnect -p sle-module-basesystem/15.4/x86_64 10 | add_sle_module_server_application: 11 | cmd.run: 12 | - name: SUSEConnect -p sle-module-server-applications/15.4/x86_64 13 | add_sle_module_web_scripting: 14 | cmd.run: 15 | - name: SUSEConnect -p sle-module-web-scripting/15.4/x86_64 16 | add_sle_module_suse_manager_server: 17 | cmd.run: 18 | - name: SUSEConnect -p sle-module-suse-manager-server/4.3/x86_64 19 | {% endif %} 20 | 21 | {% endif %} 22 | -------------------------------------------------------------------------------- /salt/server/aws.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIEBjCCAu6gAwIBAgIJAMc0ZzaSUK51MA0GCSqGSIb3DQEBCwUAMIGPMQswCQYD 3 | VQQGEwJVUzEQMA4GA1UEBwwHU2VhdHRsZTETMBEGA1UECAwKV2FzaGluZ3RvbjEi 4 | MCAGA1UECgwZQW1hem9uIFdlYiBTZXJ2aWNlcywgSW5jLjETMBEGA1UECwwKQW1h 5 | em9uIFJEUzEgMB4GA1UEAwwXQW1hem9uIFJEUyBSb290IDIwMTkgQ0EwHhcNMTkw 6 | ODIyMTcwODUwWhcNMjQwODIyMTcwODUwWjCBjzELMAkGA1UEBhMCVVMxEDAOBgNV 7 | BAcMB1NlYXR0bGUxEzARBgNVBAgMCldhc2hpbmd0b24xIjAgBgNVBAoMGUFtYXpv 8 | biBXZWIgU2VydmljZXMsIEluYy4xEzARBgNVBAsMCkFtYXpvbiBSRFMxIDAeBgNV 9 | BAMMF0FtYXpvbiBSRFMgUm9vdCAyMDE5IENBMIIBIjANBgkqhkiG9w0BAQEFAAOC 10 | AQ8AMIIBCgKCAQEArXnF/E6/Qh+ku3hQTSKPMhQQlCpoWvnIthzX6MK3p5a0eXKZ 11 | oWIjYcNNG6UwJjp4fUXl6glp53Jobn+tWNX88dNH2n8DVbppSwScVE2LpuL+94vY 12 | 0EYE/XxN7svKea8YvlrqkUBKyxLxTjh+U/KrGOaHxz9v0l6ZNlDbuaZw3qIWdD/I 13 | 6aNbGeRUVtpM6P+bWIoxVl/caQylQS6CEYUk+CpVyJSkopwJlzXT07tMoDL5WgX9 14 | O08KVgDNz9qP/IGtAcRduRcNioH3E9v981QO1zt/Gpb2f8NqAjUUCUZzOnij6mx9 15 | McZ+9cWX88CRzR0vQODWuZscgI08NvM69Fn2SQIDAQABo2MwYTAOBgNVHQ8BAf8E 16 | BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUc19g2LzLA5j0Kxc0LjZa 17 | pmD/vB8wHwYDVR0jBBgwFoAUc19g2LzLA5j0Kxc0LjZapmD/vB8wDQYJKoZIhvcN 18 | AQELBQADggEBAHAG7WTmyjzPRIM85rVj+fWHsLIvqpw6DObIjMWokpliCeMINZFV 19 | ynfgBKsf1ExwbvJNzYFXW6dihnguDG9VMPpi2up/ctQTN8tm9nDKOy08uNZoofMc 20 | NUZxKCEkVKZv+IL4oHoeayt8egtv3ujJM6V14AstMQ6SwvwvA93EP/Ug2e4WAXHu 21 | cbI1NAbUgVDqp+DRdfvZkgYKryjTWd/0+1fS8X1bBZVWzl7eirNVnHbSH2ZDpNuY 22 | 0SBd8dj5F6ld3t58ydZbrTHze7JJOd8ijySAp4/kiu9UfZWuTPABzDa/DSdz9Dk/ 23 | zPW4CXXvhLmE02TA9/HeCw3KEHIwicNuEfw= 24 | -----END CERTIFICATE----- 25 | -------------------------------------------------------------------------------- /salt/server/download_ubuntu_repo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | DIR=$1/htdocs/pub/$2 6 | mkdir -p $DIR 7 | cd $DIR 8 | wget -r -np -A deb,dsc,tar.xz,tar.gz,gz,key,gpg,Packages,Release,Sources http://$3 9 | mv $3/* . 10 | HOST=$(echo $3 | awk -F/ '{print $1}') 11 | if [ -n "$HOST" -a x"$HOST" != "x/" ]; then 12 | rm -rf "$HOST" 13 | fi 14 | 15 | -------------------------------------------------------------------------------- /salt/server/firewall.sls: -------------------------------------------------------------------------------- 1 | include: 2 | - default 3 | 4 | firewall: 5 | pkg.installed: 6 | {% if grains.get('osmajorrelease', None)|int() == 15 %} 7 | - name: firewalld 8 | {% else %} 9 | - name: SuSEfirewall2 10 | {% endif %} 11 | - require: 12 | - sls: default 13 | 14 | {% if not grains.get('disable_firewall') | default(true, true) %} 15 | 16 | firewall_configuration: 17 | {% if grains.get('osmajorrelease', None)|int() == 15 %} 18 | file.managed: 19 | - name: /etc/firewalld/zones/public.xml 20 | - source: salt://server/firewalld_public.xml 21 | {% else %} 22 | file.replace: 23 | - name: /etc/sysconfig/SuSEfirewall2 24 | - pattern: | 25 | ^FW_SERVICES_EXT_TCP 26 | - repl: | 27 | FW_SERVICES_EXT_TCP="http https ssh xmpp-client xmpp-server tftp 1521 5432 4505 4506" 28 | - append_if_not_found: True 29 | - require: 30 | - pkg: firewall 31 | 32 | {% endif %} 33 | {% endif %} 34 | -------------------------------------------------------------------------------- /salt/server/firewalld_public.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | Public 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | -------------------------------------------------------------------------------- /salt/server/iss.sls: -------------------------------------------------------------------------------- 1 | include: 2 | - server.initial_content 3 | 4 | {% if grains['iss_slave'] %} 5 | 6 | register_slave: 7 | cmd.script: 8 | - name: salt://server/register_slave.py 9 | - template: jinja 10 | - args: "{{ grains.get('server_username') | default('admin', true) }} {{ grains.get('server_password') | default('admin', true) }} {{ grains.get('fqdn') | default('localhost', true) }} {{ grains['iss_slave'] }}" 11 | - require: 12 | - sls: server.initial_content 13 | 14 | {% elif grains['iss_master'] %} 15 | 16 | register_master: 17 | cmd.script: 18 | - name: salt://server/register_master.py 19 | - template: jinja 20 | - args: "{{ grains.get('server_username') | default('admin', true) }} {{ grains.get('server_password') | default('admin', true) }} {{ grains['iss_master'] }} {{ grains.get('fqdn') | default('localhost', true) }}" 21 | - require: 22 | - sls: server.initial_content 23 | 24 | master_ssl_cert: 25 | file.managed: 26 | - name: /usr/share/rhn/RHN-ORG-TRUSTED-SSL-CERT 27 | - source: http://{{grains['iss_master']}}/pub/RHN-ORG-TRUSTED-SSL-CERT 28 | - source_hash: http://{{grains['iss_master']}}/pub/RHN-ORG-TRUSTED-SSL-CERT.sha512 29 | - require: 30 | - sls: server.initial_content 31 | 32 | {% endif %} 33 | -------------------------------------------------------------------------------- /salt/server/java_agent.yaml: -------------------------------------------------------------------------------- 1 | whitelistObjectNames: 2 | - java.lang:type=Threading,* 3 | - java.lang:type=Memory,* 4 | - Catalina:type=ThreadPool,name=* 5 | rules: 6 | - pattern: ".*" 7 | -------------------------------------------------------------------------------- /salt/server/large_deployment_tune_tomcat.xslt: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 256 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /salt/server/master-custom.conf: -------------------------------------------------------------------------------- 1 | {%- if grains.get('auto_accept') %} 2 | auto_accept: True 3 | {% endif %} 4 | ssh_minion_opts: 5 | log_file: ../../../../../var/log/salt-ssh.log 6 | log_level: debug 7 | -------------------------------------------------------------------------------- /salt/server/postgres-exporter: -------------------------------------------------------------------------------- 1 | ## Path: Applications/PostgreSQLExporter 2 | ## Description: Prometheus exporter for PostgreSQL 3 | ## Type: string() 4 | ## Default: "postgresql://user:passwd@localhost:5432/database?sslmode=disable" 5 | ## ServiceRestart: postgres-exporter 6 | # 7 | # Connection URL to postgresql instance 8 | # 9 | DATA_SOURCE_NAME="postgresql://spacewalk:spacewalk@localhost:5432/susemanager?sslmode=disable" 10 | 11 | ## Path: Applications/PostgreSQLExporter 12 | ## Description: Prometheus exporter for PostgreSQL 13 | ## Type: string() 14 | ## Default: "" 15 | ## ServiceRestart: postgres-exporter 16 | # 17 | # Extra options for postgres-exporter 18 | # 19 | POSTGRES_EXPORTER_PARAMS="--extend.query-path /etc/postgres_exporter/postgres_exporter_queries.yaml" 20 | -------------------------------------------------------------------------------- /salt/server/postgres.sls: -------------------------------------------------------------------------------- 1 | include: 2 | - server 3 | 4 | postgresql_main_configuration: 5 | file.append: 6 | - name: /var/lib/pgsql/data/postgresql.conf 7 | - text: 8 | {% if grains.get('allow_postgres_connections') %} 9 | - listen_addresses = '*' 10 | {% endif %} 11 | {% if grains.get('unsafe_postgres') %} 12 | - fsync = off 13 | - full_page_writes = off 14 | {% endif %} 15 | {% if grains.get('postgres_log_min_duration') is not none %} 16 | - log_min_duration_statement = {{ grains.get('postgres_log_min_duration') }} 17 | {% endif %} 18 | - require: 19 | - sls: server 20 | 21 | {% if grains.get('allow_postgres_connections') %} 22 | postgresql_hba_configuration: 23 | file.append: 24 | - name: /var/lib/pgsql/data/pg_hba.conf 25 | - text: | 26 | {%- if grains['product_version'] in ['head', 'beta', '4.3-nightly', '4.3-pr', '4.3-released', '4.3-VM-nightly', '4.3-VM-released'] %} 27 | host all all 0.0.0.0/0 scram-sha-256 28 | host all all ::/0 scram-sha-256 29 | {%- else %} 30 | host all all 0.0.0.0/0 md5 31 | host all all ::/0 md5 32 | {%- endif %} 33 | - require: 34 | - sls: server 35 | {% endif %} 36 | 37 | postgresql: 38 | service.running: 39 | - watch: 40 | - file: postgresql_main_configuration 41 | {% if grains.get('allow_postgres_connections') %} 42 | - file: postgresql_hba_configuration 43 | {% endif %} 44 | -------------------------------------------------------------------------------- /salt/server/postgres_exporter_queries.yaml: -------------------------------------------------------------------------------- 1 | mgr_serveractions: 2 | query: | 3 | SELECT ( 4 | SELECT COUNT(*) 5 | FROM rhnServerAction 6 | WHERE status = ( 7 | SELECT id FROM rhnActionStatus WHERE name = 'Queued' 8 | ) 9 | ) AS queued, 10 | ( 11 | SELECT COUNT(*) 12 | FROM rhnServerAction 13 | WHERE status = ( 14 | SELECT id FROM rhnActionStatus WHERE name = 'Picked Up' 15 | ) 16 | ) AS picked_up, 17 | ( 18 | SELECT COUNT(*) 19 | FROM rhnServerAction 20 | WHERE status = ( 21 | SELECT id FROM rhnActionStatus WHERE name IN ('Completed') 22 | ) 23 | ) AS completed, 24 | ( 25 | SELECT COUNT(*) 26 | FROM rhnServerAction 27 | WHERE status = ( 28 | SELECT id FROM rhnActionStatus WHERE name IN ('Failed') 29 | ) 30 | ) AS failed; 31 | metrics: 32 | - queued: 33 | usage: "GAUGE" 34 | description: "Count of queued Actions" 35 | - picked_up: 36 | usage: "GAUGE" 37 | description: "Count of picked up Actions" 38 | - completed: 39 | usage: "COUNTER" 40 | description: "Count of completed Actions" 41 | - failed: 42 | usage: "COUNTER" 43 | description: "Count of failed Actions" 44 | salt_events: 45 | query: | 46 | SELECT COUNT(*) 47 | FROM suseSaltEvent 48 | AS salt_events_count; 49 | metrics: 50 | - salt_events_count: 51 | usage: "GAUGE" 52 | description: "Count of suse salt events" 53 | -------------------------------------------------------------------------------- /salt/server/register_master.py: -------------------------------------------------------------------------------- 1 | #!{{grains['pythonexecutable']}} 2 | 3 | import sys 4 | import time 5 | try: 6 | # Python 2 7 | from urllib2 import urlopen, HTTPError 8 | from xmlrpclib import Server 9 | except ImportError: 10 | # Python 3 11 | from urllib.request import urlopen 12 | from urllib.error import HTTPError 13 | from xmlrpc.client import ServerProxy as Server 14 | 15 | 16 | if len(sys.argv) != 5: 17 | print("Usage: register_master.py ") 18 | sys.exit(1) 19 | 20 | MANAGER_URL = "http://{}/rpc/api".format(sys.argv[4]) 21 | 22 | # ensure Tomcat is up 23 | for _ in range(10): 24 | try: 25 | urlopen(MANAGER_URL) 26 | break 27 | except HTTPError: 28 | time.sleep(3) 29 | 30 | client = Server(MANAGER_URL, verbose=0) 31 | 32 | session_key = client.auth.login(sys.argv[1], sys.argv[2]) 33 | 34 | try: 35 | previous_master = client.sync.master.getMasterByLabel(session_key, sys.argv[3]) 36 | client.sync.master.delete(session_key, previous_master["id"]) 37 | print("Pre-existing Master deleted.") 38 | except: 39 | pass 40 | 41 | master = client.sync.master.create(session_key, sys.argv[3]) 42 | 43 | print("Master added to this Slave.") 44 | 45 | result = client.sync.master.makeDefault(session_key, master["id"]) 46 | if result != 1: 47 | print("Got error %d on makeDefault" % result) 48 | sys.exit(1) 49 | 50 | print("Master made default.") 51 | 52 | result = client.sync.master.setCaCert(session_key, master["id"], "/usr/share/rhn/RHN-ORG-TRUSTED-SSL-CERT") 53 | if result != 1: 54 | print("Got error %d on setCaCert" % result) 55 | sys.exit(1) 56 | 57 | print("CA cert path set.") 58 | 59 | print("Done.") 60 | -------------------------------------------------------------------------------- /salt/server/register_slave.py: -------------------------------------------------------------------------------- 1 | #!{{grains['pythonexecutable']}} 2 | 3 | import sys 4 | import time 5 | try: 6 | # Python 2 7 | from urllib2 import urlopen, HTTPError 8 | from xmlrpclib import Server 9 | except ImportError: 10 | # Python 3 11 | from urllib.request import urlopen 12 | from urllib.error import HTTPError 13 | from xmlrpc.client import ServerProxy as Server 14 | 15 | if len(sys.argv) != 5: 16 | print("Usage: register_slave.py ") 17 | sys.exit(1) 18 | 19 | MANAGER_URL = "http://{}/rpc/api".format(sys.argv[3]) 20 | 21 | # ensure Tomcat is up 22 | for _ in range(10): 23 | try: 24 | urlopen(MANAGER_URL) 25 | break 26 | except HTTPError: 27 | time.sleep(3) 28 | 29 | client = Server(MANAGER_URL, verbose=0) 30 | 31 | session_key = None 32 | attempts = 10 33 | while session_key is None and attempts > 0: 34 | try: 35 | session_key = client.auth.login(sys.argv[1], sys.argv[2]) 36 | except ProtocolError: 37 | time.sleep(3) 38 | attempts -= 1 39 | 40 | try: 41 | previous_slave = client.sync.slave.getSlaveByName(session_key, sys.argv[4]) 42 | client.sync.slave.delete(session_key, previous_slave["id"]) 43 | print("Pre-existing Slave deleted.") 44 | except: 45 | pass 46 | 47 | slave = client.sync.slave.create(session_key, sys.argv[4], True, True) 48 | 49 | print("Slave added to this Master.") 50 | 51 | orgs = client.org.listOrgs(session_key) 52 | result = client.sync.slave.setAllowedOrgs(session_key, slave["id"], [org["id"] for org in orgs]) 53 | if result != 1: 54 | print("Got error %d on setAllowedOrgs" % result) 55 | sys.exit(1) 56 | 57 | print("All orgs exported.") 58 | 59 | print("Done.") 60 | -------------------------------------------------------------------------------- /salt/server/salt_master.sls: -------------------------------------------------------------------------------- 1 | include: 2 | - server 3 | 4 | custom_salt_master_configuration: 5 | file.managed: 6 | - name: /etc/salt/master.d/zz-custom.conf 7 | - source: salt://server/master-custom.conf 8 | - template: jinja 9 | - require: 10 | - sls: server 11 | 12 | salt_master: 13 | service.running: 14 | - name: salt-master 15 | - enable: True 16 | - watch: 17 | - file: custom_salt_master_configuration 18 | 19 | salt_api: 20 | service.running: 21 | - name: salt-api 22 | - enable: True 23 | - watch: 24 | - file: custom_salt_master_configuration 25 | -------------------------------------------------------------------------------- /salt/server/setup_env.sh: -------------------------------------------------------------------------------- 1 | MANAGER_USER="spacewalk" 2 | MANAGER_PASS="spacewalk" 3 | MANAGER_ADMIN_EMAIL="galaxy-noise@suse.de" 4 | CERT_O="SUSE" 5 | CERT_OU="SUSE" 6 | CERT_CITY="Nuernberg" 7 | CERT_STATE="Bayern" 8 | CERT_COUNTRY="DE" 9 | CERT_EMAIL="galaxy-noise@suse.de" 10 | CERT_PASS="spacewalk" 11 | {%- if grains.get('provider') == 'aws' %} 12 | CERT_CNAMES="{{ grains.get('hostname') }}.{{ grains.get('domain') }}" 13 | {%- endif %} 14 | USE_EXISTING_CERTS="N" 15 | MANAGER_DB_NAME="susemanager" 16 | MANAGER_DB_HOST="{{ grains.get('db_configuration')['hostname'] }}" 17 | MANAGER_DB_PORT="{{ grains.get('db_configuration')['port'] }}" 18 | MANAGER_DB_PROTOCOL="TCP" 19 | MANAGER_ENABLE_TFTP="Y" 20 | {%- if grains.get('cc_username') %} 21 | SCC_USER="{{ grains.get("cc_username") }}" 22 | SCC_PASS="{{ grains.get("cc_password") }}" 23 | {% endif %} 24 | {%- if not grains.get('db_configuration')['local'] %} 25 | EXTERNALDB_ADMIN_USER="{{ grains.get('db_configuration')['superuser'] }}" 26 | EXTERNALDB_ADMIN_PASS="{{ grains.get('db_configuration')['superuser_password'] }}" 27 | REPORT_DB_HOST="{{ grains.get('db_configuration')['hostname'] }}" 28 | EXTERNALDB_PROVIDER="{{ grains.get('provider') }}" 29 | REPORT_DB_PORT="{{ grains.get('db_configuration')['port'] }}" 30 | REPORT_DB_NAME="reportdb" 31 | REPORT_DB_USER="pythia_susemanager" 32 | REPORT_DB_PASS="pythia_susemanager" 33 | MANAGER_DB_CA_CERT="{{ grains.get('db_configuration')['certificate'] }}" 34 | REPORT_DB_CA_CERT="{{ grains.get('db_configuration')['certificate'] }}" 35 | {% endif %} 36 | -------------------------------------------------------------------------------- /salt/server/spacewalk-search.sls: -------------------------------------------------------------------------------- 1 | {% if grains.get('java_debugging') %} 2 | 3 | include: 4 | - server 5 | 6 | spacewalk_search_config: 7 | file.append: 8 | - name: /usr/share/rhn/config-defaults/rhn_search_daemon.conf 9 | - text: | 10 | JAVA_OPTS='-Xdebug -Xrunjdwp:transport=dt_socket,address=*:8002,server=y,suspend=n' 11 | - require: 12 | - sls: server 13 | 14 | spacewalk-service: 15 | service.running: 16 | - name: spacewalk.target 17 | - watch: 18 | - file: spacewalk_search_config 19 | - require: 20 | - file: spacewalk_search_config 21 | 22 | {% endif %} 23 | -------------------------------------------------------------------------------- /salt/server/taskomatic.sls: -------------------------------------------------------------------------------- 1 | {% if grains.get('java_debugging') or grains.get('java_hibernate_debugging') or grains.get('scc_access_logging') %} 2 | include: 3 | - server.rhn 4 | {% endif %} 5 | 6 | {% if grains.get('java_debugging') %} 7 | taskomatic_config: 8 | file.replace: 9 | - name: /etc/rhn/taskomatic.conf 10 | - pattern: JAVA_OPTS="" 11 | - repl: JAVA_OPTS="-Xdebug -Xrunjdwp:transport=dt_socket,address=*:8001,server=y,suspend=n " 12 | - require: 13 | - sls: server.rhn 14 | {% endif %} 15 | 16 | {% if grains.get('java_hibernate_debugging') %} 17 | hibernate_debug_log: 18 | file.line: 19 | - name: /srv/tomcat/webapps/rhn/WEB-INF/classes/log4j2.xml 20 | - content: ' ' 21 | - after: "" 22 | - mode: ensure 23 | - require: 24 | - sls: server.rhn 25 | 26 | taskomatic_hibernate_debug_log: 27 | file.line: 28 | - name: /srv/tomcat/webapps/rhn/WEB-INF/classes/log4j2.xml 29 | - content: ' ' 30 | - after: "" 31 | - mode: ensure 32 | - require: 33 | - sls: server.rhn 34 | {% endif %} 35 | 36 | {% if grains.get('scc_access_logging') %} 37 | taskomatic_scc_access_logging: 38 | file.line: 39 | - name: /usr/share/rhn/classes/log4j2.xml 40 | - content: '' 41 | - before: "" 42 | - mode: ensure 43 | - indent: True 44 | - require: 45 | - sls: server.rhn 46 | {% endif %} 47 | 48 | taskomatic: 49 | service.running: 50 | - watch: 51 | - file: /etc/rhn/rhn.conf 52 | {% if grains.get('monitored') | default(false, true) %} 53 | - file: jmx_taskomatic_config 54 | {% endif %} 55 | -------------------------------------------------------------------------------- /salt/server/taskomatic_jmx.conf: -------------------------------------------------------------------------------- 1 | [Service] 2 | Environment="JAVA_AGENT=-javaagent:/usr/share/java/jmx_prometheus_javaagent.jar=5557:/etc/prometheus-jmx_exporter/taskomatic/java_agent.yml" 3 | -------------------------------------------------------------------------------- /salt/server/tcpdump.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | After=network.target 3 | 4 | [Service] 5 | Type=simple 6 | Restart=always 7 | RestartSec=30 8 | ExecStart=/bin/bash -c "/usr/sbin/tcpdump -ilo -n -v src localhost and tcp port 9080 -w /tmp/tcpdump-$$(date +%%s)_suma-head-srv.pcap -C 100 -W 10" 9 | ExecStop=/bin/kill -s QUIT $MAINPID 10 | 11 | [Install] 12 | WantedBy=multi-user.target 13 | 14 | -------------------------------------------------------------------------------- /salt/server/tcpdump.sls: -------------------------------------------------------------------------------- 1 | {% if grains.get('saltapi_tcpdump', false) %} 2 | 3 | tcpdump: 4 | pkg.installed 5 | 6 | tcpdump.unit: 7 | file.managed: 8 | - name: /etc/systemd/system/tcpdump.service 9 | - require: 10 | - pkg: tcpdump 11 | - source: salt://server/tcpdump.service 12 | - user: root 13 | - group: root 14 | 15 | tcpdump.service: 16 | service.running: 17 | - require: 18 | - file: tcpdump.unit 19 | - enable: True 20 | 21 | {% endif %} 22 | -------------------------------------------------------------------------------- /salt/server/tomcat_jmx.conf: -------------------------------------------------------------------------------- 1 | [Service] 2 | Environment="CATALINA_OPTS=-javaagent:/usr/share/java/jmx_prometheus_javaagent.jar=5556:/etc/prometheus-jmx_exporter/tomcat/java_agent.yml" 3 | -------------------------------------------------------------------------------- /salt/server/wait_for_mgr_sync.py: -------------------------------------------------------------------------------- 1 | #!{{grains['pythonexecutable']}} 2 | 3 | import time 4 | 5 | from spacewalk.server import rhnSQL 6 | 7 | rhnSQL.initDB() 8 | 9 | task_query = rhnSQL.prepare(""" 10 | SELECT COUNT(DISTINCT task.name) AS count 11 | FROM rhnTaskoRun run 12 | JOIN rhnTaskoTemplate template ON template.id = run.template_id 13 | JOIN rhnTaskoBunch bunch ON bunch.id = template.bunch_id 14 | JOIN rhnTaskoTask task ON task.id = template.task_id 15 | WHERE bunch.name = 'mgr-sync-refresh-bunch' AND run.end_time IS NOT NULL 16 | """); 17 | 18 | 19 | print("Waiting for mgr-sync refresh to finish...") 20 | 21 | while True: 22 | task_query.execute() 23 | 24 | if task_query.fetchone_dict()['count'] > 0: 25 | break 26 | 27 | print("...not finished yet...") 28 | time.sleep(10) 29 | 30 | print("Done.") 31 | -------------------------------------------------------------------------------- /salt/server/wait_for_reposync.py: -------------------------------------------------------------------------------- 1 | #!{{grains['pythonexecutable']}} 2 | 3 | import os 4 | import sys 5 | import time 6 | 7 | try: 8 | # Python 2 9 | from urllib2 import urlopen, HTTPError 10 | from xmlrpclib import Server 11 | except ImportError: 12 | # Python 3 13 | from urllib.request import urlopen 14 | from urllib.error import HTTPError 15 | from xmlrpc.client import ServerProxy as Server 16 | 17 | if len(sys.argv) != 5: 18 | print("Usage: wait_for_reposync.py ") 19 | sys.exit(1) 20 | 21 | _, username, password, fqdn, channel = sys.argv 22 | 23 | MANAGER_URL = "http://{}/rpc/api".format(fqdn) 24 | 25 | # ensure Tomcat is up 26 | for _ in range(10): 27 | try: 28 | urlopen(MANAGER_URL) 29 | break 30 | except HTTPError: 31 | time.sleep(3) 32 | 33 | client = Server(MANAGER_URL, verbose=0) 34 | 35 | session_key = client.auth.login(username, password) 36 | 37 | channels = filter(lambda c: c["label"] == channel, client.channel.listVendorChannels(session_key)) 38 | if not channels: 39 | print("Channel not found.") 40 | sys.exit(1) 41 | 42 | id = channels[0]["id"] 43 | 44 | print("Waiting for reposync to finish...") 45 | 46 | while not os.path.isfile("/var/cache/rhn/repodata/{}/repomd.xml".format(channel)): 47 | print("...not finished yet...") 48 | time.sleep(10) 49 | 50 | print("Done.") 51 | -------------------------------------------------------------------------------- /salt/server_containerized/init.sls: -------------------------------------------------------------------------------- 1 | include: 2 | {% if 'build_image' not in grains.get('product_version') | default('', true) %} 3 | - repos 4 | {% endif %} 5 | - server_containerized.additional_disks 6 | - server_containerized.install_mgradm 7 | - server_containerized.initial_content 8 | - server_containerized.rhn 9 | - server_containerized.large_deployment 10 | - server_containerized.testsuite 11 | -------------------------------------------------------------------------------- /salt/server_containerized/install_common.sls: -------------------------------------------------------------------------------- 1 | {%- set mirror_hostname = grains.get('server_mounted_mirror') if grains.get('server_mounted_mirror') else grains.get('mirror') %} 2 | 3 | {% if grains['osfullname'] not in ['SLE Micro', 'SL-Micro', 'openSUSE Leap Micro'] %} 4 | uyuni-tools: 5 | pkg.installed: 6 | - pkgs: 7 | - mgradm 8 | - mgrctl 9 | {%- else %} 10 | check_mgrctl_installed: 11 | cmd.run: 12 | - name: "rpm -q mgrctl" 13 | - success_retcodes: [0] 14 | - failhard: True 15 | 16 | check_mgradm_installed: 17 | cmd.run: 18 | - name: "rpm -q mgradm" 19 | - success_retcodes: [0] 20 | - failhard: True 21 | {% endif %} 22 | 23 | {% if mirror_hostname %} 24 | 25 | nfs_client: 26 | pkg.installed: 27 | - name: nfs-client 28 | 29 | non_empty_fstab: 30 | file.managed: 31 | - name: /etc/fstab 32 | - replace: false 33 | 34 | mirror_directory: 35 | mount.mounted: 36 | - name: /srv/mirror 37 | - device: {{ mirror_hostname }}:/srv/mirror 38 | - fstype: nfs 39 | - mkmnt: True 40 | - require: 41 | - file: /etc/fstab 42 | - pkg: nfs_client 43 | 44 | {% endif %} 45 | 46 | # WORKAROUND: see github:saltstack/salt#10852 47 | {{ sls }}_nop: 48 | test.nop: [] 49 | -------------------------------------------------------------------------------- /salt/server_containerized/install_k3s.sls: -------------------------------------------------------------------------------- 1 | include: 2 | - server_containerized.install_common 3 | 4 | {% if grains['osfullname'] not in ['SLE Micro', 'SL-Micro', 'openSUSE Leap Micro'] %} 5 | k3s_install: 6 | cmd.run: 7 | - name: curl -sfL https://get.k3s.io | sh - 8 | - env: 9 | - INSTALL_K3S_EXEC: "--tls-san={{ grains.get('fqdn') }}" 10 | - unless: systemctl is-active k3s 11 | 12 | wait_for_traefik: 13 | cmd.script: 14 | - name: salt://server_containerized/wait_for_kube_resource.py 15 | - args: kube-system deploy traefik 16 | - use_vt: True 17 | - template: jinja 18 | - require: 19 | - cmd: k3s_install 20 | 21 | helm_install: 22 | pkg.installed: 23 | - refresh: True 24 | - name: helm 25 | {% endif %} 26 | 27 | {%- set mirror_hostname = grains.get('server_mounted_mirror') if grains.get('server_mounted_mirror') else grains.get('mirror') %} 28 | {% if mirror_hostname %} 29 | mirror_pv_file: 30 | file.managed: 31 | - name: /root/mirror-pv.yaml 32 | - source: salt://server_containerized/mirror-pv.yaml 33 | - template: jinja 34 | 35 | mirror_pv_deploy: 36 | cmd.run: 37 | - name: kubectl apply -f /root/mirror-pv.yaml 38 | - unless: kubectl get pv mirror 39 | - require: 40 | - file: mirror_pv_file 41 | {% endif %} 42 | -------------------------------------------------------------------------------- /salt/server_containerized/install_mgradm.sls: -------------------------------------------------------------------------------- 1 | include: 2 | - server_containerized.install_{{ grains.get('container_runtime') | default('podman', true) }} 3 | 4 | mgradm_config: 5 | file.managed: 6 | - name: /root/mgradm.yaml 7 | - source: salt://server_containerized/mgradm.yaml 8 | - template: jinja 9 | 10 | {% set runtime = grains.get('container_runtime') | default('podman', true) %} 11 | {% set install_cmd = 'kubernetes' if runtime == 'k3s' else 'podman' %} 12 | 13 | mgradm_install: 14 | cmd.run: 15 | - name: mgradm install {{ install_cmd }} --logLevel=debug --config /root/mgradm.yaml {{ grains.get("fqdn") }} 16 | - env: 17 | - KUBECONFIG: /etc/rancher/k3s/k3s.yaml 18 | {%- if grains.get('container_runtime') | default('podman', true) == 'podman' %} 19 | - unless: podman ps | grep uyuni-server 20 | {%- else %} 21 | - unless: helm --kubeconfig /etc/rancher/k3s/k3s.yaml list | grep uyuni 22 | {%- endif %} 23 | - require: 24 | - sls: server_containerized.install_common 25 | - sls: server_containerized.install_{{ grains.get('container_runtime') | default('podman', true) }} 26 | - file: mgradm_config 27 | -------------------------------------------------------------------------------- /salt/server_containerized/install_podman.sls: -------------------------------------------------------------------------------- 1 | include: 2 | - server_containerized.install_common 3 | 4 | {% if grains['osfullname'] not in ['SLE Micro', 'SL-Micro', 'openSUSE Leap Micro'] %} 5 | podman_packages: 6 | pkg.installed: 7 | - pkgs: 8 | - podman 9 | - netavark 10 | - aardvark-dns 11 | - require: 12 | {% if 'build_image' not in grains.get('product_version') | default('', true) %} 13 | - sls: repos 14 | {% endif %} 15 | {% endif %} 16 | 17 | podman_login: 18 | cmd.run: 19 | - name: podman login -u {{ grains.get('cc_username') }} -p {{ grains.get('cc_password') }} {{ grains.get("container_repository") }} 20 | 21 | # WORKAROUND: see github:saltstack/salt#10852 22 | {{ sls }}_nop: 23 | test.nop: [] 24 | -------------------------------------------------------------------------------- /salt/server_containerized/large_deployment_tune_tomcat.xslt: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 256 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /salt/server_containerized/mgradm.yaml: -------------------------------------------------------------------------------- 1 | {% set runtime = grains.get('container_runtime') | default('podman', true) %} 2 | db: 3 | password: spacewalk 4 | ssl: 5 | password: spacewalk 6 | {%- if grains.get('cc_username') %} 7 | scc: 8 | user: {{ grains.get("cc_username") }} 9 | password: {{ grains.get("cc_password") }} 10 | {% endif %} 11 | email: {{ grains.get("traceback_email") | default('galaxy-noise@suse.de', true) }} 12 | emailFrom: {{ grains.get("from_email") | default('galaxy-noise@suse.de', true) }} 13 | {%- if grains.get('container_repository') %} 14 | registry: {{ grains.get('container_repository') }} 15 | {% endif %} 16 | {%- if grains.get('container_image') %} 17 | image: {{ grains.get('container_image') }} 18 | {% endif %} 19 | {%- if grains.get('container_tag') %} 20 | tag: {{ grains.get('container_tag') }} 21 | {% endif %} 22 | {%- set mirror_hostname = grains.get('server_mounted_mirror') if grains.get('server_mounted_mirror') else grains.get('mirror') %} 23 | {%- if runtime == 'podman' %} 24 | {%- if mirror_hostname %} 25 | mirror: /srv/mirror 26 | {%- endif %} 27 | {%- else %} 28 | {%- if mirror_hostname %} 29 | volumes: 30 | mirror: mirror 31 | {%- endif %} 32 | kubernetes: 33 | uyuni: 34 | namespace: uyuni 35 | {%- endif %} 36 | {%- if grains.get("java_debugging") %} 37 | debug: 38 | java: true 39 | {%- endif %} 40 | 41 | {% set server_username = grains.get('server_username') | default('admin', true) %} 42 | {% set server_password = grains.get('server_password') | default('admin', true) %} 43 | organization: SUSE Test 44 | admin: 45 | password: {{ server_password }} 46 | login: {{ server_username }} 47 | firstName: Admin 48 | lastName: Admin 49 | email: galaxy-noise@suse.de 50 | tz: 51 | Europe/Berlin 52 | -------------------------------------------------------------------------------- /salt/server_containerized/mirror-pv.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: mirror 5 | spec: 6 | capacity: 7 | storage: 100Gi 8 | volumeMode: Filesystem 9 | accessModes: 10 | - ReadWriteOnce 11 | persistentVolumeReclaimPolicy: Delete 12 | storageClassName: local-storage 13 | local: 14 | path: /srv/mirror 15 | nodeAffinity: 16 | required: 17 | nodeSelectorTerms: 18 | - matchExpressions: 19 | - key: kubernetes.io/hostname 20 | operator: In 21 | values: 22 | - {{ grains['hostname'] }} 23 | claimRef: 24 | namespace: uyuni 25 | name: mirror 26 | -------------------------------------------------------------------------------- /salt/server_containerized/rhn.sls: -------------------------------------------------------------------------------- 1 | {% if grains.get('skip_changelog_import') %} 2 | 3 | package_import_skip_changelog_reposync: 4 | cmd.run: 5 | - name: mgrctl exec 'echo "package_import_skip_changelog = 1" >> /etc/rhn/rhn.conf' 6 | 7 | {% endif %} 8 | 9 | limit_changelog_entries: 10 | cmd.run: 11 | - name: mgrctl exec 'grep -q "java.max_changelog_entries" /etc/rhn/rhn.conf && sed -i "s/java.max_changelog_entries.*/java.max_changelog_entries = 3/" /etc/rhn/rhn.conf || echo "java.max_changelog_entries = 3" >> /etc/rhn/rhn.conf' 12 | 13 | {% if grains.get('disable_download_tokens') %} 14 | disable_download_tokens: 15 | cmd.run: 16 | - name: mgrctl exec 'echo "java.salt_check_download_tokens = false" >> /etc/rhn/rhn.conf' 17 | {% endif %} 18 | 19 | {% if grains.get('monitored') | default(false, true) %} 20 | 21 | rhn_conf_prometheus: 22 | cmd.run: 23 | - name: mgrctl exec 'echo "prometheus_monitoring_enabled = true" >> /etc/rhn/rhn.conf' 24 | 25 | {% endif %} 26 | 27 | {% if not grains.get('forward_registration') | default(false, true) %} 28 | 29 | rhn_conf_forward_reg: 30 | cmd.run: 31 | - name: mgrctl exec 'echo "server.susemanager.forward_registration = 0" >> /etc/rhn/rhn.conf' 32 | 33 | {% endif %} 34 | 35 | {% if grains.get('disable_auto_bootstrap') | default(false, true) %} 36 | 37 | rhn_conf_disable_auto_generate_bootstrap_repo: 38 | cmd.run: 39 | - name: mgrctl exec 'echo "server.susemanager.auto_generate_bootstrap_repo = 0" >> /etc/rhn/rhn.conf' 40 | 41 | {% endif %} 42 | 43 | {% if 'head' in grains.get('product_version') and grains.get('beta_enabled') %} 44 | change_product_tree_to_beta: 45 | cmd.run: 46 | - name: mgrctl exec 'grep -q "java.product_tree_tag" /etc/rhn/rhn.conf && sed -i "s/java.product_tree_tag = .*/java.product_tree_tag = Beta/" /etc/rhn/rhn.conf || echo "java.product_tree_tag = Beta" >> /etc/rhn/rhn.conf' 47 | {% endif %} 48 | 49 | {% if grains.get('testsuite') | default(false, true) %} 50 | increase_presence_ping_timeout: 51 | cmd.run: 52 | - name: mgrctl exec 'echo "java.salt_presence_ping_timeout = 6" >> /etc/rhn/rhn.conf' 53 | {% endif %} 54 | 55 | rhn_conf_present: 56 | cmd.run: 57 | - name: mgrctl exec 'touch /etc/rhn/rhn.conf' 58 | -------------------------------------------------------------------------------- /salt/server_containerized/salt-events.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Log salt events 3 | 4 | [Install] 5 | WantedBy=multi-user.target 6 | 7 | [Service] 8 | ExecStart=sh -c 'salt-run state.event pretty=True >/var/log/rhn/salt-event.log' 9 | -------------------------------------------------------------------------------- /salt/server_containerized/wait_for_kube_resource.py: -------------------------------------------------------------------------------- 1 | #!{{grains['pythonexecutable']}} 2 | 3 | import subprocess 4 | import sys 5 | import time 6 | 7 | 8 | if len(sys.argv) != 4: 9 | print("Usage: wait_for_kube_resource.py ") 10 | 11 | _, namespace, kind, name = sys.argv 12 | 13 | print("Waiting for {} {} to be ready...".format(name, kind)) 14 | 15 | ready_check = "grep Running" 16 | if kind in ["service", "svc"]: 17 | ready_check = "wc -l | grep 1" 18 | elif kind in ["deployment", "deploy"]: 19 | ready_check = "grep 1/1" 20 | elif kind == "issuer": 21 | ready_check = "grep True" 22 | 23 | cmd = "kubectl get --no-headers -n {} {} {} | {}".format(namespace, kind, name, ready_check) 24 | 25 | for i in range(60): 26 | 27 | process = subprocess.run(cmd, shell=True) 28 | if process.returncode == 0: 29 | break 30 | 31 | print("... not finished yet...") 32 | time.sleep(10) 33 | 34 | print("Done.") 35 | -------------------------------------------------------------------------------- /salt/server_containerized/wait_for_mgr_sync.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | USER=$1 4 | PASS=$2 5 | 6 | echo " 7 | SELECT '=== ' || COUNT(DISTINCT task.name) || ' ===' AS count 8 | FROM rhnTaskoRun run 9 | JOIN rhnTaskoTemplate template ON template.id = run.template_id 10 | JOIN rhnTaskoBunch bunch ON bunch.id = template.bunch_id 11 | JOIN rhnTaskoTask task ON task.id = template.task_id 12 | WHERE bunch.name = 'mgr-sync-refresh-bunch' AND run.end_time IS NOT NULL 13 | " > /tmp/sync-refresh-query.sql 14 | 15 | echo "Waiting for mgr-sync refresh to finish..." 16 | 17 | sleep 3 18 | for i in $(seq 100); do 19 | OUT=$(mgradm support sql /tmp/sync-refresh-query.sql) 20 | if echo $OUT | grep "=== " >/dev/null && ! echo $OUT | grep "=== 0 ===" >/dev/null; then 21 | if test -f /root/.mgr-sync ; then 22 | mgrctl exec "mgr-sync refresh" 23 | else 24 | echo -e "$USER\\n$PASS\\n" | mgrctl exec -i "mgr-sync refresh" 25 | fi 26 | if [ $? -eq 0 ]; then 27 | break 28 | fi 29 | fi 30 | echo "...not finished yet..." 31 | sleep 10 32 | done 33 | rm -f /tmp/sync-refresh-query.sql 34 | echo "Done." 35 | -------------------------------------------------------------------------------- /salt/top.sls: -------------------------------------------------------------------------------- 1 | base: 2 | '*': 3 | - default 4 | 5 | 'roles:server': 6 | - match: grain 7 | - server 8 | 9 | 'roles:server_containerized': 10 | - match: grain 11 | - server_containerized 12 | 13 | 'roles:client': 14 | - match: grain 15 | - client 16 | 17 | 'roles:proxy': 18 | - match: grain 19 | - proxy 20 | 21 | 'roles:proxy_containerized': 22 | - match: grain 23 | - proxy_containerized 24 | 25 | 'roles:minion': 26 | - match: grain 27 | - minion 28 | 29 | 'roles:mirror': 30 | - match: grain 31 | - mirror 32 | 33 | 'roles:controller': 34 | - match: grain 35 | - controller 36 | 37 | 'roles:grafana': 38 | - match: grain 39 | - grafana 40 | 41 | 'roles:locust': 42 | - match: grain 43 | - locust 44 | 45 | 'roles:virthost': 46 | - match: grain 47 | - virthost 48 | 49 | 'roles:build_host': 50 | - match: grain 51 | - build_host 52 | 53 | 'roles:jenkins': 54 | - match: grain 55 | - jenkins 56 | 57 | 'roles:registry': 58 | - match: grain 59 | - registry 60 | 61 | 'roles:salt_testenv': 62 | - match: grain 63 | - salt_testenv 64 | -------------------------------------------------------------------------------- /salt/virthost/systemd-detect-virt: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | echo "none" 4 | exit 1 5 | -------------------------------------------------------------------------------- /salt/wait_for_salt.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ -x /usr/bin/cloud-init ]; then 4 | # Wait for cloud-init to finish 5 | NEXT_TRY=0 6 | until [ $NEXT_TRY -eq 50 ] || ! cloud-init status | grep running 7 | do 8 | echo "cloud-init is still running. Retrying... [$NEXT_TRY]"; 9 | sleep 10; 10 | ((NEXT_TRY++)); 11 | done 12 | 13 | if [ $NEXT_TRY -eq 50 ] 14 | then 15 | echo "ERROR: cloud-init is still running after 50 retries"; 16 | exit 1; 17 | fi 18 | fi 19 | 20 | if [ -x /usr/bin/venv-salt-call ]; then 21 | echo "Salt Bundle detected! We use it for running sumaform deployment" 22 | echo "Copying /tmp/grains to /etc/venv-salt-minion/grains" 23 | cp /tmp/grains /etc/venv-salt-minion/grains 24 | SALT_CALL=venv-salt-call 25 | elif [ -x /usr/bin/salt-call ]; then 26 | echo "Classic Salt detected! We use it for running sumaform deployment" 27 | echo "Copying /tmp/grains to /etc/salt/grains" 28 | cp /tmp/grains /etc/salt/grains 29 | SALT_CALL=salt-call 30 | else 31 | echo "Error: Cannot find venv-salt-call or salt-call on the system" 32 | exit 1 33 | fi 34 | 35 | for i in {0..100} 36 | do 37 | if ${SALT_CALL} --help &>/dev/null; then 38 | break 39 | fi 40 | echo "Waiting for salt to be installed..." 41 | sleep 3 42 | done 43 | --------------------------------------------------------------------------------