├── .github └── workflows │ ├── ci.yml │ └── trigger-gitlab.yml ├── .gitignore ├── .gitlab-ci.yml ├── .gitlab-ci.yml.swp ├── Dockerfile ├── LICENSE ├── README.md ├── Schutzfile ├── base.Dockerfile ├── ci ├── .gitlab-ci-cloud-experience.yaml ├── .gitlab-ci-image-builder.yaml ├── aws.sh ├── provision.sh ├── set-env-variables.sh └── shared_lib.sh ├── cloud-image-val.py ├── cloud ├── opentofu │ ├── aws_config_builder.py │ ├── aws_config_builder_efs.py │ ├── azure_config_builder.py │ ├── azure_config_builder_v2.py │ ├── base_config_builder.py │ ├── gcloud_config_builder.py │ ├── opentofu_configurator.py │ └── opentofu_controller.py └── sample │ ├── resources_aws_gov_marketplace.json │ ├── resources_aws_marketplace.json │ ├── resources_azure.json │ ├── resources_azure_marketplace.json │ ├── resources_azure_vhd.json │ └── resources_gcloud.json ├── data ├── azure │ ├── 05_logging.cfg │ ├── 06_logging_override.cfg │ ├── 66-azure-storage.rules │ ├── 68-azure-sriov-nm-unmanaged.rules │ ├── 91-azure_datasource.cfg │ ├── 99-azure-product-uuid.rules │ ├── authconfig │ ├── grub_rhel7 │ ├── grub_rhel8 │ ├── grub_rhel9 │ └── grub_rhel9.3+ └── generic │ ├── dnf.conf │ ├── fingerprint-auth_rhel10 │ ├── fingerprint-auth_rhel7 │ ├── fingerprint-auth_rhel8 │ ├── fingerprint-auth_rhel9 │ ├── langpacks.conf │ ├── password-auth_rhel10 │ ├── password-auth_rhel7 │ ├── password-auth_rhel8 │ ├── password-auth_rhel9 │ ├── postlogin_rhel10 │ ├── postlogin_rhel7 │ ├── postlogin_rhel8 │ ├── postlogin_rhel9 │ ├── smartcard-auth_rhel10 │ ├── smartcard-auth_rhel7 │ ├── smartcard-auth_rhel8 │ ├── smartcard-auth_rhel8.10 │ ├── smartcard-auth_rhel9 │ ├── system-auth_rhel10 │ ├── system-auth_rhel7 │ ├── system-auth_rhel8 │ └── system-auth_rhel9 ├── lib ├── aws_lib.py ├── config_lib.py ├── console_lib.py ├── ssh_lib.py └── test_lib.py ├── main ├── civ_report_analyzer.py └── cloud_image_validator.py ├── pytest.ini ├── requirements.txt ├── result └── reporter.py ├── schutzbot ├── append_team_ssh_keys.sh ├── define-compose-url.sh ├── deploy.sh ├── get_civ_config.py ├── prepare-rhel-internal.sh ├── team_ssh_keys │ ├── ccowman.pub │ ├── fkolwa.pub │ ├── igulina.pub │ ├── knivnia.pub │ ├── nmunoz.pub │ └── sshmulev.pub ├── terraform ├── update-base.py └── update_github_status.sh ├── scripts └── aws │ └── rhel-ha-aws-check.sh ├── test ├── __init__.py ├── test_cloud_image_validator.py ├── test_opentofu_controller.py ├── test_reporter.py └── test_suite_runner.py └── test_suite ├── cloud ├── test_aws.py └── test_azure.py ├── conftest.py ├── generic ├── helpers.py ├── test_generic.py ├── test_markers.py └── test_reboot.py ├── package ├── otel_package │ ├── __init__.py │ ├── fixtures.py │ └── test_otel.py ├── test_awscli2.py └── test_efs_utils.py ├── rhel_devel ├── ctc │ └── test_ctc.py ├── cut │ └── test_cut.py └── run_cloudx_components_testing.py └── suite_runner.py /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: Basic Tests 2 | on: 3 | push: 4 | branches: 5 | - main 6 | pull_request: 7 | branches: 8 | - main 9 | 10 | jobs: 11 | build: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - uses: actions/checkout@v2 15 | 16 | - name: Install Python 3 17 | uses: actions/setup-python@v1 18 | with: 19 | python-version: 3.11 20 | 21 | - name: Install dependencies 22 | run: | 23 | python -m pip install --upgrade pip 24 | pip install -r requirements.txt 25 | 26 | - name: Run unit tests 27 | run: pytest test/ 28 | 29 | - name: Get code coverage 30 | run: pytest --cov-report term-missing --cov=cloud/terraform --cov=result/ --cov=main/ --cov=test_suite/ test/ 31 | 32 | - name: Python linter 33 | run: flake8 --ignore=E501,W503 34 | -------------------------------------------------------------------------------- /.github/workflows/trigger-gitlab.yml: -------------------------------------------------------------------------------- 1 | # inspired by rhinstaller/anaconda 2 | 3 | name: Trigger GitLab CI 4 | 5 | on: 6 | workflow_run: 7 | workflows: ["Basic Tests"] 8 | types: [completed] 9 | 10 | jobs: 11 | trigger-gitlab: 12 | if: ${{ github.event.workflow_run.conclusion == 'success' }} 13 | runs-on: ubuntu-latest 14 | env: 15 | IMAGEBUILDER_BOT_GITLAB_SSH_KEY: ${{ secrets.IMAGEBUILDER_BOT_GITLAB_SSH_KEY }} 16 | steps: 17 | - name: Report status 18 | uses: haya14busa/action-workflow_run-status@v1 19 | 20 | - name: Install Dependencies 21 | run: | 22 | sudo apt install -y jq 23 | - name: Clone repository 24 | uses: actions/checkout@v3 25 | with: 26 | ref: ${{ github.event.workflow_run.head_sha }} 27 | fetch-depth: 0 28 | 29 | - uses: octokit/request-action@v2.x 30 | id: fetch_pulls 31 | env: 32 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 33 | with: 34 | route: GET /repos/${{ github.repository }}/pulls 35 | per_page: 100 36 | 37 | - name: Checkout branch 38 | env: 39 | BRANCH: ${{ github.event.workflow_run.head_branch }} 40 | run: | 41 | PR_DATA=$(mktemp) 42 | # use uuid as a file terminator to avoid conflicts with data content 43 | cat > "$PR_DATA" <<'a21b3e7f-d5eb-44a3-8be0-c2412851d2e6' 44 | ${{ steps.fetch_pulls.outputs.data }} 45 | a21b3e7f-d5eb-44a3-8be0-c2412851d2e6 46 | PR=$(jq -rc '.[] | select(.head.sha | contains("${{ github.event.workflow_run.head_sha }}")) | select(.state | contains("open"))' "$PR_DATA" | jq -r .number) 47 | if [ ! -z "$PR" ]; then 48 | git checkout -b PR-$PR 49 | else 50 | git checkout "${BRANCH}" -- 51 | fi 52 | 53 | - name: Push to GitLab 54 | run: | 55 | mkdir -p ~/.ssh 56 | echo "${IMAGEBUILDER_BOT_GITLAB_SSH_KEY}" > ~/.ssh/id_rsa 57 | chmod 400 ~/.ssh/id_rsa 58 | touch ~/.ssh/known_hosts 59 | ssh-keyscan -t rsa gitlab.com >> ~/.ssh/known_hosts 60 | git remote add ci git@gitlab.com:redhat/services/products/image-builder/ci/cloud-image-val-ci.git 61 | git push -f ci 62 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | */.pytest_cache/ 2 | */__pycache__/ 3 | */*/__pycache__/ 4 | .idea 5 | .terraform* 6 | *.tfstate* 7 | .vscode/ 8 | .coverage 9 | *.tf.json 10 | .venv 11 | -------------------------------------------------------------------------------- /.gitlab-ci.yml: -------------------------------------------------------------------------------- 1 | stages: 2 | - init 3 | - build 4 | - test 5 | - finish 6 | 7 | .deps: 8 | before_script: 9 | - bash schutzbot/append_team_ssh_keys.sh 10 | - curl -fsSL https://get.docker.com -o get-docker.sh 11 | - sudo sh get-docker.sh 12 | - sudo systemctl start docker 13 | - sudo docker login "${QUAY_IO_CONTAINER_URL}" -u ${QUAY_USERNAME} -p ${QUAY_PASSWORD} 14 | variables: 15 | RUNNER: aws/fedora-40-x86_64 16 | INTERNAL_NETWORK: "true" 17 | QUAY_IO_CONTAINER_URL: quay.io/cloudexperience/cloud-image-val 18 | tags: 19 | - terraform 20 | 21 | .tests: 22 | extends: .deps 23 | after_script: 24 | - schutzbot/update_github_status.sh update || true 25 | - echo https://redhat.gitlab.io/-/services/products/image-builder/ci/cloud-image-val-ci/-/jobs/${CI_JOB_ID}/artifacts/report.html 26 | retry: 1 27 | artifacts: 28 | paths: 29 | - report.html 30 | when: always 31 | 32 | init: 33 | stage: init 34 | script: 35 | - schutzbot/update_github_status.sh start 36 | tags: 37 | - shell 38 | 39 | build-branch-container: 40 | extends: .deps 41 | stage: build 42 | script: 43 | - sudo docker run --privileged --rm tonistiigi/binfmt --install all 44 | - sudo docker buildx create --use 45 | - export build_cmd="sudo docker buildx build --push --platform linux/arm64,linux/amd64" 46 | - if python schutzbot/update-base.py | grep -q "true" ; then ${build_cmd} -t "${QUAY_IO_CONTAINER_URL}-base":latest -f base.Dockerfile .; fi 47 | - ${build_cmd} -t "${QUAY_IO_CONTAINER_URL}":"${CI_COMMIT_REF_SLUG}" -f Dockerfile . 48 | 49 | prepare-rhel-internal-runners: 50 | stage: build 51 | before_script: 52 | - bash schutzbot/append_team_ssh_keys.sh 53 | rules: 54 | - if: $NIGHTLY == "true" 55 | script: 56 | - schutzbot/prepare-rhel-internal.sh 57 | artifacts: 58 | paths: 59 | - rhel-${RHEL_MAJOR}.json 60 | - rhel${RHEL_MAJOR}internal.repo 61 | - COMPOSE_ID 62 | tags: 63 | - terraform 64 | parallel: 65 | matrix: 66 | - RUNNER: 67 | # NOTE: 1 runner prepares for all arches b/c subsequent jobs download 68 | # artifacts from all previous jobs and the last one wins 69 | - aws/rhel-10.0-nightly-x86_64 70 | INTERNAL_NIGHTLY: [ "internal" ] 71 | INTERNAL_NETWORK: [ "true" ] 72 | NIGHTLY: [ "true" ] 73 | 74 | .aws: 75 | stage: test 76 | extends: .tests 77 | rules: 78 | - if: $CI_COMMIT_REF_SLUG != "main" 79 | script: 80 | - sudo docker pull "${QUAY_IO_CONTAINER_URL}":"${CI_COMMIT_REF_SLUG}" 81 | - | 82 | sudo docker run \ 83 | -a stdout -a stderr \ 84 | -e AWS_ACCESS_KEY_ID="${CLOUDX_AWS_ACCESS_KEY_ID}" \ 85 | -e AWS_SECRET_ACCESS_KEY="${CLOUDX_AWS_SECRET_ACCESS_KEY}" \ 86 | -e AWS_REGION="${AWS_REGION}" \ 87 | -v ${PWD}:/tmp:Z \ 88 | "${QUAY_IO_CONTAINER_URL}":"${CI_COMMIT_REF_SLUG}" \ 89 | python cloud-image-val.py -r cloud/sample/resources_aws_marketplace.json -d -p -o /tmp/report.xml 90 | 91 | .azure: 92 | stage: test 93 | extends: .tests 94 | rules: 95 | - if: $CI_COMMIT_REF_SLUG != "main" 96 | script: 97 | - sudo docker pull "${QUAY_IO_CONTAINER_URL}":"${CI_COMMIT_REF_SLUG}" 98 | - | 99 | sudo docker run \ 100 | -a stdout -a stderr \ 101 | -e ARM_CLIENT_ID="${CLOUDX_AZURE_CLIENT_ID}" \ 102 | -e ARM_CLIENT_SECRET="${CLOUDX_AZURE_CLIENT_SECRET}" \ 103 | -e ARM_SUBSCRIPTION_ID="${AZURE_SUBSCRIPTION_ID_MARKETPLACE}" \ 104 | -e ARM_TENANT_ID="${AZURE_TENANT_ID_MARKETPLACE}" \ 105 | -v ${PWD}:/tmp:Z \ 106 | "${QUAY_IO_CONTAINER_URL}":"${CI_COMMIT_REF_SLUG}" \ 107 | python cloud-image-val.py -r cloud/sample/resources_azure_marketplace.json -d -p -o /tmp/report.xml 108 | 109 | .rhel_runners: &rhel_runners 110 | RUNNER: 111 | - aws/rhel-8.10-ga-x86_64 112 | - aws/rhel-9.4-ga-x86_64 113 | # - aws/rhel-9.5-ga-x86_64 114 | - aws/centos-stream-9-x86_64 115 | - aws/centos-stream-10-x86_64 116 | NIGHTLY: [ "false" ] 117 | 118 | .rhel_runners_aarch64: &rhel_runners_aarch64 119 | RUNNER: 120 | - aws/rhel-8.10-ga-aarch64 121 | - aws/rhel-9.4-ga-aarch64 122 | # - aws/rhel-9.5-ga-aarch64 123 | - aws/centos-stream-9-aarch64 124 | - aws/centos-stream-10-aarch64 125 | INTERNAL_NETWORK: [ "true" ] 126 | NIGHTLY: [ "false" ] 127 | 128 | .rhel_runners_x86_64_internal_nightlies: &rhel_runners_x86_64_internal_nightlies 129 | RUNNER: 130 | - aws/rhel-9.6-nightly-x86_64 131 | - aws/rhel-10.0-nightly-x86_64 132 | INTERNAL_NIGHTLY: [ "internal" ] 133 | INTERNAL_NETWORK: [ "true" ] 134 | NIGHTLY: [ "true" ] 135 | 136 | .rhel_runners_aarch64_internal_nightlies: &rhel_runners_aarch64_internal_nightlies 137 | RUNNER: 138 | - aws/rhel-9.6-nightly-aarch64 139 | - aws/rhel-10.0-nightly-aarch64 140 | INTERNAL_NIGHTLY: [ "internal" ] 141 | INTERNAL_NETWORK: [ "true" ] 142 | NIGHTLY: [ "true" ] 143 | 144 | .fedora_runners: &fedora_runners 145 | RUNNER: 146 | - aws/fedora-41-x86_64 147 | NIGHTLY: [ "false" ] 148 | 149 | .image_builder_tests: 150 | extends: .tests 151 | variables: 152 | ARTIFACTS: "/tmp/" 153 | before_script: 154 | - bash schutzbot/append_team_ssh_keys.sh 155 | after_script: 156 | - !reference [ .tests, after_script ] 157 | - cp /tmp/report.html ./report.html 158 | 159 | image-builder-aws: 160 | stage: test 161 | extends: .image_builder_tests 162 | script: 163 | - schutzbot/deploy.sh 164 | - python3 schutzbot/get_civ_config.py /tmp/civ_vars.sh && source /tmp/civ_vars.sh 165 | - if [ $SKIP_AWS == "false" ]; then bash /usr/libexec/tests/osbuild-composer/aws.sh; fi 166 | rules: 167 | - if: $NIGHTLY == "false" && $PACKAGES_TESTING != "true" 168 | parallel: 169 | matrix: 170 | - *fedora_runners 171 | - *rhel_runners 172 | - *rhel_runners_aarch64 173 | 174 | image-builder-aws-nightly-compose: 175 | stage: test 176 | extends: .image_builder_tests 177 | script: 178 | - schutzbot/deploy.sh 179 | - python3 schutzbot/get_civ_config.py /tmp/civ_vars.sh && source /tmp/civ_vars.sh 180 | - if [ $SKIP_AWS == "false" ]; then bash /usr/libexec/tests/osbuild-composer/aws.sh; fi 181 | rules: 182 | - if: $NIGHTLY == "true" && $PACKAGES_TESTING != "true" 183 | parallel: 184 | matrix: 185 | - *rhel_runners_x86_64_internal_nightlies 186 | - *rhel_runners_aarch64_internal_nightlies 187 | 188 | image-builder-azure: 189 | stage: test 190 | extends: .image_builder_tests 191 | script: 192 | - schutzbot/deploy.sh 193 | - python3 schutzbot/get_civ_config.py /tmp/civ_vars.sh && source /tmp/civ_vars.sh 194 | - if [ $SKIP_AZURE == "false" ]; then bash /usr/libexec/tests/osbuild-composer/azure.sh; fi 195 | rules: 196 | - if: $NIGHTLY == "false" && $PACKAGES_TESTING != "true" 197 | parallel: 198 | matrix: 199 | - *fedora_runners 200 | - *rhel_runners 201 | 202 | image-builder-azure-nightly-compose: 203 | stage: test 204 | extends: .image_builder_tests 205 | script: 206 | - schutzbot/deploy.sh 207 | - python3 schutzbot/get_civ_config.py /tmp/civ_vars.sh && source /tmp/civ_vars.sh 208 | - if [ $SKIP_AZURE == "false" ]; then bash /usr/libexec/tests/osbuild-composer/azure.sh; fi 209 | rules: 210 | - if: $NIGHTLY == "true" && $PACKAGES_TESTING != "true" 211 | parallel: 212 | matrix: 213 | - *rhel_runners_x86_64_internal_nightlies 214 | 215 | .cloudx_packages_base: 216 | extends: .tests 217 | before_script: 218 | - schutzbot/deploy.sh 219 | - python3 schutzbot/get_civ_config.py /tmp/civ_vars.sh && source /tmp/civ_vars.sh 220 | - echo "Running packages testing for ${RUNNER}..." 221 | - echo "Custom packages to be bundled with generated cloud image (${CUSTOM_PACKAGES})" 222 | variables: 223 | ARTIFACTS: "/tmp/" 224 | after_script: 225 | - !reference [ .tests, after_script ] 226 | - cp /tmp/report.html ./report.html 227 | 228 | cloudx-packages-testing-aws: 229 | extends: .cloudx_packages_base 230 | stage: test 231 | variables: 232 | RUNNER: $RUN_ON 233 | rules: 234 | - if: $PACKAGES_TESTING == "true" && $PACKAGES_TESTING_AWS == "true" 235 | script: 236 | - bash ci/aws.sh 237 | 238 | promote-container-to-main: 239 | extends: .deps 240 | stage: finish 241 | rules: 242 | - if: $CI_COMMIT_REF_SLUG == "main" 243 | script: 244 | - sudo dnf install skopeo -y 245 | - skopeo login quay.io/cloudexperience --password ${QUAY_PASSWORD} --username ${QUAY_USERNAME} 246 | - skopeo copy --all docker://"${QUAY_IO_CONTAINER_URL}":main docker://"${QUAY_IO_CONTAINER_URL}":latest 247 | - skopeo copy --all docker://"${QUAY_IO_CONTAINER_URL}":main docker://"${QUAY_IO_CONTAINER_URL}":prod 248 | 249 | finish: 250 | stage: finish 251 | script: 252 | - schutzbot/update_github_status.sh finish 253 | tags: 254 | - shell 255 | -------------------------------------------------------------------------------- /.gitlab-ci.yml.swp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/osbuild/cloud-image-val/7682fc950842c962d066a637be9ac6ee4b83d9f2/.gitlab-ci.yml.swp -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM quay.io/cloudexperience/cloud-image-val-base:latest 2 | 3 | # Copy cloud-image-val project 4 | COPY . . 5 | 6 | CMD ["bash"] 7 | -------------------------------------------------------------------------------- /base.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM registry.access.redhat.com/ubi8/python-39:latest 2 | 3 | USER 0 4 | 5 | # Copy cloud-image-val project 6 | COPY ./requirements.txt ./ 7 | 8 | # We need epel for keychain package 9 | RUN dnf install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm 10 | 11 | # Install basic tools 12 | RUN dnf install -y \ 13 | wget \ 14 | unzip \ 15 | keychain 16 | 17 | # Install AWS cli tool V2 (supports only x86_64 and aarch64 for now) 18 | RUN ARCH="$(uname -m | grep -qE '^(amd64|arm64|aarch64)$' && echo "aarch64" || echo "x86_64")"; \ 19 | wget https://awscli.amazonaws.com/awscli-exe-linux-${ARCH}.zip \ 20 | -O awscliv2.zip; \ 21 | unzip awscliv2.zip; \ 22 | ./aws/install 23 | 24 | # Install Azure cli tool 25 | RUN rpm --import https://packages.microsoft.com/keys/microsoft.asc; \ 26 | dnf install -y https://packages.microsoft.com/config/rhel/8/packages-microsoft-prod.rpm; \ 27 | dnf install -y azure-cli 28 | 29 | # Install OpenTofu v1.6.2 which is fully compatible with Terraform v1.5.x 30 | RUN export OPENTOFU_VERSION='1.9.0' 31 | 32 | RUN wget --secure-protocol=TLSv1_2 \ 33 | --https-only https://get.opentofu.org/install-opentofu.sh \ 34 | -O install-opentofu.sh 35 | 36 | RUN chmod +x install-opentofu.sh; \ 37 | ./install-opentofu.sh --install-method rpm; \ 38 | rm install-opentofu.sh 39 | 40 | # Install python requirements 41 | RUN pip install -r requirements.txt 42 | 43 | CMD ["bash"] -------------------------------------------------------------------------------- /ci/.gitlab-ci-cloud-experience.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/osbuild/cloud-image-val/7682fc950842c962d066a637be9ac6ee4b83d9f2/ci/.gitlab-ci-cloud-experience.yaml -------------------------------------------------------------------------------- /ci/.gitlab-ci-image-builder.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/osbuild/cloud-image-val/7682fc950842c962d066a637be9ac6ee4b83d9f2/ci/.gitlab-ci-image-builder.yaml -------------------------------------------------------------------------------- /ci/aws.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # 4 | # Test osbuild-composer 'upload to aws' functionality. To do so, create and 5 | # push a blueprint with composer cli. Then, create an instance in aws 6 | # from the uploaded image. Finally, verify that the instance is running and 7 | # cloud init ran. 8 | # 9 | 10 | set -euo pipefail 11 | 12 | source ci/set-env-variables.sh 13 | source ci/shared_lib.sh 14 | 15 | 16 | # Container image used for cloud provider CLI tools 17 | CONTAINER_IMAGE_CLOUD_TOOLS="quay.io/osbuild/cloud-tools:latest" 18 | 19 | # Provision the software under test. 20 | ci/provision.sh none 21 | 22 | # Check available container runtime 23 | if which podman 2>/dev/null >&2; then 24 | CONTAINER_RUNTIME=podman 25 | elif which docker 2>/dev/null >&2; then 26 | CONTAINER_RUNTIME=docker 27 | else 28 | echo No container runtime found, install podman or docker. 29 | exit 2 30 | fi 31 | 32 | TEMPDIR=$(mktemp -d) 33 | function cleanup() { 34 | greenprint "== Script execution stopped or finished - Cleaning up ==" 35 | sudo rm -rf "$TEMPDIR" 36 | } 37 | trap cleanup EXIT 38 | 39 | # Generate a string, which can be used as a predictable resource name, 40 | # especially when running the test in CI where we may need to clean up 41 | # resources in case the test unexpectedly fails or is canceled 42 | CI="${CI:-false}" 43 | if [[ "$CI" == true ]]; then 44 | # in CI, imitate GenerateCIArtifactName() from internal/test/helpers.go 45 | TEST_ID="$DISTRO_CODE-$ARCH-$CI_COMMIT_BRANCH-$CI_JOB_ID" 46 | else 47 | # if not running in Jenkins, generate ID not relying on specific env variables 48 | TEST_ID=$(uuidgen); 49 | fi 50 | 51 | ARTIFACTS="${ARTIFACTS:-/tmp/artifacts}" 52 | 53 | # Set up temporary files. 54 | AWS_CONFIG=${TEMPDIR}/aws.toml 55 | BLUEPRINT_FILE=${TEMPDIR}/blueprint.toml 56 | COMPOSE_START=${TEMPDIR}/compose-start-${TEST_ID}.json 57 | COMPOSE_INFO=${TEMPDIR}/compose-info-${TEST_ID}.json 58 | AMI_DATA=${TEMPDIR}/ami-data-${TEST_ID}.json 59 | 60 | # We need awscli to talk to AWS. 61 | if ! hash aws; then 62 | echo "Using 'awscli' from a container" 63 | sudo "${CONTAINER_RUNTIME}" pull ${CONTAINER_IMAGE_CLOUD_TOOLS} 64 | 65 | AWS_CMD="sudo ${CONTAINER_RUNTIME} run --rm \ 66 | -e AWS_ACCESS_KEY_ID=${V2_AWS_ACCESS_KEY_ID} \ 67 | -e AWS_SECRET_ACCESS_KEY=${V2_AWS_SECRET_ACCESS_KEY} \ 68 | -v ${TEMPDIR}:${TEMPDIR}:Z \ 69 | ${CONTAINER_IMAGE_CLOUD_TOOLS} aws --region $AWS_REGION --output json --color on" 70 | else 71 | echo "Using pre-installed 'aws' from the system" 72 | AWS_CMD="aws --region $AWS_REGION --output json --color on" 73 | fi 74 | $AWS_CMD --version 75 | 76 | # Get the compose log. 77 | get_compose_log () { 78 | COMPOSE_ID=$1 79 | LOG_FILE=${ARTIFACTS}/osbuild-${ID}-${VERSION_ID}-aws.log 80 | 81 | # Download the logs. 82 | sudo composer-cli compose log "$COMPOSE_ID" | tee "$LOG_FILE" > /dev/null 83 | } 84 | 85 | # Get the compose metadata. 86 | get_compose_metadata () { 87 | COMPOSE_ID=$1 88 | METADATA_FILE=${ARTIFACTS}/osbuild-${ID}-${VERSION_ID}-aws.json 89 | 90 | # Download the metadata. 91 | sudo composer-cli compose metadata "$COMPOSE_ID" > /dev/null 92 | 93 | # Find the tarball and extract it. 94 | TARBALL=$(basename "$(find . -maxdepth 1 -type f -name "*-metadata.tar")") 95 | sudo tar -xf "$TARBALL" 96 | sudo rm -f "$TARBALL" 97 | 98 | # Move the JSON file into place. 99 | sudo cat "${COMPOSE_ID}".json | jq -M '.' | tee "$METADATA_FILE" > /dev/null 100 | } 101 | 102 | # Write an AWS TOML file 103 | tee "$AWS_CONFIG" > /dev/null << EOF 104 | provider = "aws" 105 | 106 | [settings] 107 | accessKeyID = "${V2_AWS_ACCESS_KEY_ID}" 108 | secretAccessKey = "${V2_AWS_SECRET_ACCESS_KEY}" 109 | bucket = "${AWS_BUCKET}" 110 | region = "${AWS_REGION}" 111 | key = "${TEST_ID}" 112 | EOF 113 | 114 | # Write a basic blueprint for our image. 115 | tee "$BLUEPRINT_FILE" > /dev/null << EOF 116 | name = "bash" 117 | description = "A base system" 118 | version = "0.0.1" 119 | EOF 120 | 121 | # Append any packages that we want to install 122 | if ! [ -z "$CUSTOM_PACKAGES" ]; then 123 | # shellcheck disable=SC2068 124 | for pkg in ${CUSTOM_PACKAGES[@]}; do 125 | pkg_name="${pkg%:*}" 126 | pkg_version="${pkg##*:}" 127 | 128 | if [[ "$pkg_version" == "$pkg_name" ]]; then 129 | pkg_version='*' 130 | fi 131 | 132 | echo "[[packages]] 133 | name = \"$pkg_name\" 134 | version = \"$pkg_version\" 135 | 136 | " >> "$BLUEPRINT_FILE" 137 | done 138 | fi 139 | 140 | # Prepare the blueprint for the compose. 141 | greenprint "📋 Preparing blueprint" 142 | sudo composer-cli blueprints push "$BLUEPRINT_FILE" 143 | sudo composer-cli blueprints depsolve bash 144 | 145 | # Get worker unit file so we can watch the journal. 146 | WORKER_UNIT=$(sudo systemctl list-units | grep -o -E "osbuild.*worker.*\.service") 147 | sudo journalctl -af -n 1 -u "${WORKER_UNIT}" & 148 | WORKER_JOURNAL_PID=$! 149 | # Stop watching the worker journal when exiting. 150 | trap 'sudo pkill -P ${WORKER_JOURNAL_PID}' EXIT 151 | 152 | # Start the compose and upload to AWS. 153 | greenprint "🚀 Starting compose" 154 | sudo composer-cli --json compose start bash ami "$TEST_ID" "$AWS_CONFIG" | tee "$COMPOSE_START" 155 | COMPOSE_ID=$(get_build_info ".build_id" "$COMPOSE_START") 156 | 157 | # Wait for the compose to finish. 158 | greenprint "⏱ Waiting for compose to finish: ${COMPOSE_ID}" 159 | while true; do 160 | sudo composer-cli --json compose info "${COMPOSE_ID}" | tee "$COMPOSE_INFO" > /dev/null 161 | COMPOSE_STATUS=$(get_build_info ".queue_status" "$COMPOSE_INFO") 162 | 163 | # Is the compose finished? 164 | if [[ $COMPOSE_STATUS != RUNNING ]] && [[ $COMPOSE_STATUS != WAITING ]]; then 165 | break 166 | fi 167 | 168 | # Wait 30 seconds and try again. 169 | sleep 30 170 | done 171 | 172 | # Capture the compose logs from osbuild. 173 | greenprint "💬 Getting compose log and metadata" 174 | get_compose_log "$COMPOSE_ID" 175 | get_compose_metadata "$COMPOSE_ID" 176 | 177 | # Kill the journal monitor immediately and remove the trap 178 | sudo pkill -P ${WORKER_JOURNAL_PID} 179 | trap - EXIT 180 | 181 | # Did the compose finish with success? 182 | if [[ $COMPOSE_STATUS != FINISHED ]]; then 183 | redprint "Something went wrong with the compose. 😢" 184 | exit 1 185 | fi 186 | 187 | # Find the image that we made in AWS. 188 | greenprint "🔍 Search for created AMI" 189 | $AWS_CMD ec2 describe-images \ 190 | --owners self \ 191 | --filters Name=name,Values="${TEST_ID}" \ 192 | | tee "$AMI_DATA" > /dev/null 193 | 194 | AMI_IMAGE_ID=$(jq -r '.Images[].ImageId' "$AMI_DATA") 195 | SNAPSHOT_ID=$(jq -r '.Images[].BlockDeviceMappings[].Ebs.SnapshotId' "$AMI_DATA") 196 | 197 | # Share the created AMI with the CloudX account 198 | $AWS_CMD ec2 modify-image-attribute \ 199 | --image-id "${AMI_IMAGE_ID}" \ 200 | --launch-permission "Add=[{UserId=${CLOUDX_AWS_ACCOUNT_ID}}]" 201 | 202 | # Tag image and snapshot with "gitlab-ci-test" tag 203 | $AWS_CMD ec2 create-tags \ 204 | --resources "${SNAPSHOT_ID}" "${AMI_IMAGE_ID}" \ 205 | --tags Key=gitlab-ci-test,Value=true 206 | 207 | # Verify that the image has the correct boot mode set 208 | AMI_BOOT_MODE=$(jq -r '.Images[].BootMode // empty' "$AMI_DATA") 209 | if nvrGreaterOrEqual "osbuild-composer" "83"; then 210 | case "$ARCH" in 211 | aarch64) 212 | # aarch64 image supports only uefi boot mode 213 | if [[ "$AMI_BOOT_MODE" != "uefi" ]]; then 214 | redprint "AMI boot mode is not \"uefi\", but \"$AMI_BOOT_MODE\"" 215 | exit 1 216 | fi 217 | ;; 218 | x86_64) 219 | # x86_64 image supports hybrid boot mode with preference for uefi 220 | if [[ "$AMI_BOOT_MODE" != "uefi-preferred" ]]; then 221 | redprint "AMI boot mode is not \"uefi-preferred\", but \"$AMI_BOOT_MODE\"" 222 | exit 1 223 | fi 224 | ;; 225 | *) 226 | redprint "Unsupported architecture: $ARCH" 227 | exit 1 228 | ;; 229 | esac 230 | fi 231 | 232 | if [[ "$ID" == "fedora" ]]; then 233 | # fedora uses fedora 234 | SSH_USER="fedora" 235 | else 236 | # RHEL and centos use ec2-user 237 | SSH_USER="ec2-user" 238 | fi 239 | 240 | greenprint "Pulling cloud-image-val container" 241 | 242 | if [[ "$CI_PROJECT_NAME" =~ "cloud-image-val" ]]; then 243 | # If running on CIV, get dev container 244 | TAG=${CI_COMMIT_REF_SLUG} 245 | else 246 | # If not, get prod container 247 | TAG="prod" 248 | fi 249 | 250 | CONTAINER_CLOUD_IMAGE_VAL="quay.io/cloudexperience/cloud-image-val:$TAG" 251 | 252 | sudo "${CONTAINER_RUNTIME}" pull "${CONTAINER_CLOUD_IMAGE_VAL}" 253 | 254 | greenprint "Running cloud-image-val on generated image" 255 | 256 | # Default instance type for x86_64 257 | instance_type="t3.medium" 258 | if [ "$ARCH" == "aarch64" ]; then 259 | instance_type="m6g.large" 260 | fi 261 | 262 | tee "${TEMPDIR}/resource-file.json" < /dev/null 311 | 312 | # Use the return code of the smoke test to determine if we passed or failed. 313 | # On rhel continue with the cloudapi test 314 | case $CIV_EXIT_CODE in 315 | 0) 316 | greenprint "💚 Success" 317 | exit 0 318 | ;; 319 | 5) 320 | echo "❗ No tests were run" 321 | exit 0 322 | ;; 323 | 100) 324 | redprint "❌ Failed (cloud deployment/destroy issues)" 325 | exit 1 326 | ;; 327 | *) 328 | redprint "❌ Failed (exit code: ${CIV_EXIT_CODE})" 329 | exit 1 330 | ;; 331 | esac 332 | 333 | exit 0 334 | -------------------------------------------------------------------------------- /ci/provision.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euxo pipefail 3 | 4 | source ci/set-env-variables.sh 5 | source ci/shared_lib.sh 6 | 7 | # create artifacts folder 8 | ARTIFACTS="${ARTIFACTS:=/tmp/artifacts}" 9 | mkdir -p "${ARTIFACTS}" 10 | 11 | # determine the authentication method used by composer 12 | AUTH_METHOD_TLS="tls" 13 | AUTH_METHOD_JWT="jwt" 14 | AUTH_METHOD_NONE="none" 15 | # default to TLS for now 16 | AUTH_METHOD="${1:-$AUTH_METHOD_TLS}" 17 | 18 | COMPOSER_CONFIG="/etc/osbuild-composer/osbuild-composer.toml" 19 | 20 | # Path to a file with additional configuration for composer. 21 | # The content of this file will be appended to the default configuration. 22 | EXTRA_COMPOSER_CONFIG="${2:-}" 23 | 24 | if [[ -n "${EXTRA_COMPOSER_CONFIG}" && ! -f "${EXTRA_COMPOSER_CONFIG}" ]]; then 25 | echo "ERROR: File '${EXTRA_COMPOSER_CONFIG}' with extra configuration for composer does not exist." 26 | exit 1 27 | fi 28 | 29 | # koji and ansible are not in RHEL repositories. Depending on them in the spec 30 | # file breaks RHEL gating (see OSCI-1541). Therefore, we need to enable epel 31 | # and install koji and ansible here. 32 | if [[ $ID == rhel || $ID == centos ]] && ! rpm -q epel-release; then 33 | curl -Ls --retry 5 --output /tmp/epel.rpm \ 34 | https://dl.fedoraproject.org/pub/epel/epel-release-latest-"${VERSION_ID%.*}".noarch.rpm 35 | sudo rpm -Uvh /tmp/epel.rpm 36 | fi 37 | 38 | # RHEL 8.6+ and CentOS 9 require different handling for ansible 39 | if [[ "$VERSION_ID" == "8.4" ]]; then 40 | sudo dnf install -y ansible koji 41 | else 42 | sudo dnf install -y ansible-core koji 43 | fi 44 | 45 | sudo mkdir -p /etc/osbuild-composer 46 | sudo mkdir -p /etc/osbuild-worker 47 | 48 | # osbuild-composer and worker need to be configured in a specific way only when using 49 | # some authentication method (Service scenario). In such case, also credentials for 50 | # interacting with cloud providers are configured directly in the worker. In addition, 51 | # no certificates need to be generated, because they are not used anywhere in this 52 | # scenario. 53 | if [[ "$AUTH_METHOD" != "$AUTH_METHOD_NONE" ]]; then 54 | # Generate all X.509 certificates for the tests 55 | # The whole generation is done in a $CADIR to better represent how osbuild-ca 56 | # it. 57 | CERTDIR=/etc/osbuild-composer 58 | OPENSSL_CONFIG=/usr/share/tests/osbuild-composer/x509/openssl.cnf 59 | CADIR=/etc/osbuild-composer-test/ca 60 | 61 | scriptloc=$(dirname "$0") 62 | sudo "${scriptloc}/gen-certs.sh" "${OPENSSL_CONFIG}" "${CERTDIR}" "${CADIR}" 63 | sudo chown _osbuild-composer "${CERTDIR}"/composer-*.pem 64 | 65 | # Copy the appropriate configuration files 66 | if [[ "$AUTH_METHOD" == "$AUTH_METHOD_JWT" ]]; then 67 | COMPOSER_TEST_CONFIG="/usr/share/tests/osbuild-composer/composer/osbuild-composer-jwt.toml" 68 | WORKER_TEST_CONFIG="/usr/share/tests/osbuild-composer/worker/osbuild-worker-jwt.toml" 69 | 70 | # Default orgID 71 | sudo tee "/etc/osbuild-worker/token" >/dev/null < /dev/null << EOF 93 | 94 | [gcp] 95 | credentials = "$GCP_CREDS_WORKER_PATH" 96 | bucket = "$GCP_BUCKET" 97 | EOF 98 | fi 99 | 100 | # if Azure credentials are defined in the env, create the credentials file 101 | V2_AZURE_CLIENT_ID="${V2_AZURE_CLIENT_ID:-}" 102 | V2_AZURE_CLIENT_SECRET="${V2_AZURE_CLIENT_SECRET:-}" 103 | if [[ -n "$V2_AZURE_CLIENT_ID" && -n "$V2_AZURE_CLIENT_SECRET" ]]; then 104 | set +x 105 | sudo tee /etc/osbuild-worker/azure-credentials.toml > /dev/null << EOF 106 | client_id = "$V2_AZURE_CLIENT_ID" 107 | client_secret = "$V2_AZURE_CLIENT_SECRET" 108 | EOF 109 | sudo tee -a /etc/osbuild-worker/osbuild-worker.toml > /dev/null << EOF 110 | 111 | [azure] 112 | credentials = "/etc/osbuild-worker/azure-credentials.toml" 113 | EOF 114 | set -x 115 | fi 116 | 117 | # if AWS credentials are defined in the ENV, add them to the worker's configuration 118 | V2_AWS_ACCESS_KEY_ID="${V2_AWS_ACCESS_KEY_ID:-}" 119 | V2_AWS_SECRET_ACCESS_KEY="${V2_AWS_SECRET_ACCESS_KEY:-}" 120 | if [[ -n "$V2_AWS_ACCESS_KEY_ID" && -n "$V2_AWS_SECRET_ACCESS_KEY" ]]; then 121 | set +x 122 | sudo tee /etc/osbuild-worker/aws-credentials.toml > /dev/null << EOF 123 | [default] 124 | aws_access_key_id = "$V2_AWS_ACCESS_KEY_ID" 125 | aws_secret_access_key = "$V2_AWS_SECRET_ACCESS_KEY" 126 | EOF 127 | sudo tee -a /etc/osbuild-worker/osbuild-worker.toml > /dev/null << EOF 128 | 129 | [aws] 130 | credentials = "/etc/osbuild-worker/aws-credentials.toml" 131 | bucket = "${AWS_BUCKET}" 132 | EOF 133 | set -x 134 | fi 135 | 136 | # if OCI credentials are defined in the ENV, add them to the worker's configuration 137 | OCI_SECRETS="${OCI_SECRETS:-}" 138 | OCI_PRIVATE_KEY="${OCI_PRIVATE_KEY:-}" 139 | if [[ -n "$OCI_SECRETS" && -n "$OCI_PRIVATE_KEY" ]]; then 140 | set +x 141 | OCI_USER=$(jq -r '.user' "$OCI_SECRETS") 142 | OCI_TENANCY=$(jq -r '.tenancy' "$OCI_SECRETS") 143 | OCI_REGION=$(jq -r '.region' "$OCI_SECRETS") 144 | OCI_FINGERPRINT=$(jq -r '.fingerprint' "$OCI_SECRETS") 145 | OCI_BUCKET_NAME=$(jq -r '.bucket' "$OCI_SECRETS") 146 | OCI_NAMESPACE=$(jq -r '.namespace' "$OCI_SECRETS") 147 | OCI_COMPARTMENT=$(jq -r '.compartment' "$OCI_SECRETS") 148 | OCI_PRIV_KEY=$(cat "$OCI_PRIVATE_KEY") 149 | 150 | sudo tee /etc/osbuild-worker/oci-credentials.toml > /dev/null << EOF 151 | user = "$OCI_USER" 152 | tenancy = "$OCI_TENANCY" 153 | region = "$OCI_REGION" 154 | fingerprint = "$OCI_FINGERPRINT" 155 | namespace = "$OCI_NAMESPACE" 156 | bucket = "$OCI_BUCKET_NAME" 157 | private_key = """ 158 | $OCI_PRIV_KEY 159 | """ 160 | compartment = "$OCI_COMPARTMENT" 161 | EOF 162 | sudo tee -a /etc/osbuild-worker/osbuild-worker.toml > /dev/null << EOF 163 | [oci] 164 | credentials = "/etc/osbuild-worker/oci-credentials.toml" 165 | EOF 166 | set -x 167 | fi 168 | 169 | else # AUTH_METHOD_NONE 170 | # Repositories in /etc/osbuild-composer/repositories are used only in the 171 | # on-premise scenario (Weldr). 172 | # Copy rpmrepo snapshots for use in weldr tests 173 | REPODIR=/etc/osbuild-composer/repositories 174 | sudo mkdir -p $REPODIR 175 | # Copy all fedora repo overrides 176 | sudo cp -a /usr/share/tests/osbuild-composer/repositories/{fedora,centos}-*.json "$REPODIR" 177 | # Copy RHEL point release repos 178 | sudo cp -a /usr/share/tests/osbuild-composer/repositories/rhel-*.json "$REPODIR" 179 | 180 | # override source repositories to consume content from the nightly compose 181 | if [ "${NIGHTLY:=false}" == "true" ]; then 182 | source /usr/libexec/osbuild-composer-test/define-compose-url.sh 183 | 184 | # TODO: remove once the osbuild-composer v100 is in RHEL 185 | if ! nvrGreaterOrEqual "osbuild-composer" "100"; then 186 | VERSION_SUFFIX=$(echo "${VERSION_ID}" | tr -d ".") 187 | # remove dots from the repo overrides filename, because the installed version of composer can't handle it 188 | for REPO_FILE in "${REPODIR}"/*.json; do 189 | REPO_FILE_NO_DOTS="$(basename "${REPO_FILE}" ".json" | tr -d ".").json" 190 | if [[ "${REPO_FILE}" != "${REPODIR}/${REPO_FILE_NO_DOTS}" ]]; then 191 | sudo mv "${REPO_FILE}" "${REPODIR}/${REPO_FILE_NO_DOTS}" 192 | fi 193 | done 194 | else 195 | VERSION_SUFFIX=${VERSION_ID} 196 | fi 197 | 198 | for ARCH in aarch64 ppc64le s390x x86_64; do 199 | for REPO_NAME in BaseOS AppStream RT; do 200 | REPO_NAME_LOWERCASE=$(echo "$REPO_NAME" | tr "[:upper:]" "[:lower:]") 201 | # will replace only the lines which match 202 | sudo sed -i "s|https://rpmrepo.osbuild.org/v2/mirror/rhvpn/el.*${ARCH}-${REPO_NAME_LOWERCASE}-.*|${COMPOSE_URL}/compose/${REPO_NAME}/${ARCH}/os/\",|" "${REPODIR}/rhel-${VERSION_SUFFIX}.json" 203 | done 204 | done 205 | fi 206 | fi 207 | 208 | # Append the extra configuration to the default configuration 209 | if [[ -n "${EXTRA_COMPOSER_CONFIG}" ]]; then 210 | echo "INFO: Appending extra composer configuration from '${EXTRA_COMPOSER_CONFIG}'" 211 | cat "${EXTRA_COMPOSER_CONFIG}" | sudo tee -a "${COMPOSER_CONFIG}" 212 | fi 213 | 214 | # start appropriate units 215 | case "${AUTH_METHOD}" in 216 | "${AUTH_METHOD_JWT}" | "${AUTH_METHOD_TLS}") 217 | # JWT / TLS are used only in the "Service" scenario. This means that: 218 | # - only remote workers will be used (no local worker) 219 | # - only Cloud API socket will be started (no Weldr API) 220 | sudo systemctl stop 'osbuild*' 221 | # make sure that the local worker is not running 222 | sudo systemctl mask osbuild-worker@1.service 223 | # enable remote worker API 224 | sudo systemctl start osbuild-remote-worker.socket 225 | # enable Cloud API 226 | sudo systemctl start osbuild-composer-api.socket 227 | # start a remote worker 228 | sudo systemctl start osbuild-remote-worker@localhost:8700.service 229 | ;; 230 | 231 | "${AUTH_METHOD_NONE}") 232 | # No authentication method is used on-premise with Weldr. This means that: 233 | # - only local worker will be used (started automatically) 234 | # - only Weldr API socket will be started 235 | sudo systemctl stop 'osbuild*' 236 | # enable Weldr API 237 | sudo systemctl start osbuild-composer.socket 238 | 239 | # Print debugging info about content sources 240 | sudo composer-cli status show 241 | sudo composer-cli sources list 242 | for SOURCE in $(sudo composer-cli sources list); do 243 | sudo composer-cli sources info "$SOURCE" 244 | done 245 | ;; 246 | esac -------------------------------------------------------------------------------- /ci/set-env-variables.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # don't error on unused ARCH and DISTRO_CODE variables 3 | # shellcheck disable=SC2034 4 | 5 | source /etc/os-release 6 | ARCH=$(uname -m) 7 | DISTRO_CODE="${DISTRO_CODE:-${ID}-${VERSION_ID}}" -------------------------------------------------------------------------------- /ci/shared_lib.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/bash 2 | 3 | function nvrGreaterOrEqual { 4 | local rpm_name=$1 5 | local min_version=$2 6 | 7 | set +e 8 | 9 | rpm_version=$(rpm -q --qf "%{version}" "${rpm_name}") 10 | rpmdev-vercmp "${rpm_version}" "${min_version}" 1>&2 11 | if [ "$?" != "12" ]; then 12 | # 0 - rpm_version == min_version 13 | # 11 - rpm_version > min_version 14 | # 12 - rpm_version < min_version 15 | echo "DEBUG: ${rpm_version} >= ${min_version}" 1>&2 16 | set -e 17 | return 18 | fi 19 | 20 | set -e 21 | false 22 | } 23 | 24 | function get_build_info() { 25 | local key="$1" 26 | local fname="$2" 27 | if rpm -q --quiet weldr-client; then 28 | key=".body${key}" 29 | if nvrGreaterOrEqual "weldr-client" "35.6" 2> /dev/null; then 30 | key=".[0]${key}" 31 | fi 32 | fi 33 | jq -r "${key}" "${fname}" 34 | } 35 | 36 | # Colorful timestamped output. 37 | function greenprint { 38 | echo -e "\033[1;32m[$(date -Isecond)] ${1}\033[0m" 39 | } 40 | 41 | function redprint { 42 | echo -e "\033[1;31m[$(date -Isecond)] ${1}\033[0m" 43 | } -------------------------------------------------------------------------------- /cloud-image-val.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from pprint import pprint 4 | from argparse import ArgumentParser, RawTextHelpFormatter 5 | from main.cloud_image_validator import CloudImageValidator 6 | from lib.config_lib import CIVConfig 7 | from lib import console_lib 8 | 9 | parser = ArgumentParser(formatter_class=RawTextHelpFormatter) 10 | 11 | parser.add_argument('-r', '--resources-file', 12 | help='Path to the resources JSON file that contains the Cloud provider and the images to use.\n' 13 | 'See cloud/sample/resources_.json to know about the expected file structure.', 14 | default=None) 15 | parser.add_argument('-o', '--output-file', 16 | help='Output file path of the resultant Junit XML test report and others', 17 | default=None) 18 | parser.add_argument('-t', '--test-filter', 19 | help='Use this option to filter tests execution by test name', 20 | default=None) 21 | parser.add_argument('--test-suites', 22 | help='Use this option to specify which test suites will be use for test run.' 23 | 'If no test suite is provided, default test suites will be used (see test_suite/)', 24 | nargs='+', 25 | default=None) 26 | parser.add_argument('-m', '--include-markers', 27 | help='Use this option to specify which tests to run that match a pytest markers expression.\n' 28 | 'The only marker currently supported is "pub" (see pytest.ini for more details)\n' 29 | 'Example:\n' 30 | '\t-m "pub" --> run tests marked as "pub", which is for images are already published\n' 31 | '\t-m "not pub" --> exclude "pub" tests\n' 32 | 'More information about pytest markers:\n' 33 | '--> https://doc.pytest.org/en/latest/example/markers.html', 34 | default=None) 35 | parser.add_argument('-p', '--parallel', 36 | help='Use this option to enable parallel test execution mode.', 37 | action='store_true', 38 | default=None) 39 | parser.add_argument('-d', '--debug', 40 | help='Use this option to enable debugging mode.', 41 | action='store_true', 42 | default=None) 43 | parser.add_argument('-s', '--stop-cleanup', 44 | help='Use this option to enable stop cleanup process until a key is pressed. \n' 45 | 'Helpful when you need to connect through ssh to an instance.', 46 | action='store_true', 47 | default=None) 48 | parser.add_argument('-e', '--environment', 49 | help='Use this option to set what environment CIV is going to run on.\n' 50 | 'This can change CIV behaviour like how "-s" works. this option can be\n' 51 | 'set to "automated" or "local".', 52 | default=None) 53 | parser.add_argument('-c', '--config-file', 54 | help='Use this option to pass CLI options through a config file.\n' 55 | 'This config should be in yaml format, examples can be found in the README', 56 | default=None) 57 | parser.add_argument('--tags', 58 | help='Use this option to add tags to created cloud resources and modify CIV behaviour.\n' 59 | 'This tags should be passed in json format as in this example:\n' 60 | '--tags \'{"key1": "value1", "key2": "value2"}\'', 61 | default=None) 62 | 63 | if __name__ == '__main__': 64 | args = parser.parse_args() 65 | 66 | # Add current dir abspath to PYTHONPATH to avoid issues when importing modules 67 | if 'PYTHONPATH' not in os.environ: 68 | os.environ['PYTHONPATH'] = '' 69 | os.environ['PYTHONPATH'] = ':'.join( 70 | [f'{os.path.dirname(__file__)}', os.environ['PYTHONPATH']]) 71 | 72 | config_manager = CIVConfig(args.__dict__) 73 | 74 | config_manager.update_config() 75 | config_manager.validate_config() 76 | config_manager.export_config_as_env_vars() 77 | 78 | config = config_manager.get_config() 79 | 80 | if config['debug']: 81 | console_lib.print_divider('Config') 82 | pprint(config) 83 | 84 | cloud_image_validator = CloudImageValidator(config=config) 85 | exit(cloud_image_validator.main()) 86 | -------------------------------------------------------------------------------- /cloud/opentofu/aws_config_builder_efs.py: -------------------------------------------------------------------------------- 1 | from cloud.opentofu.base_config_builder import BaseConfigBuilder 2 | 3 | 4 | class AWSConfigBuilderEfs(BaseConfigBuilder): 5 | cloud_name = 'aws' 6 | cloud_provider_definition = {'aws': {'source': 'hashicorp/aws', 'version': '~> 5.49.0'}} 7 | 8 | def build_providers(self): 9 | all_regions = self.__get_all_regions_from_resources_file() 10 | for region in all_regions: 11 | self.providers_tf['provider'][self.cloud_providers[self.cloud_name]] \ 12 | .append(self.__new_aws_provider(region)) 13 | 14 | return self.providers_tf 15 | 16 | def __get_all_regions_from_resources_file(self): 17 | instances_regions = [i['region'] for i in self.resources_dict['instances']] 18 | 19 | return list(dict.fromkeys(instances_regions)) 20 | 21 | def __new_aws_provider(self, region): 22 | return { 23 | 'region': region, 24 | 'alias': region, 25 | 'skip_region_validation': True 26 | } 27 | 28 | def build_resources(self): 29 | self.resources_tf['resource']['aws_key_pair'] = {} 30 | 31 | # Resources needed for efs creation 32 | self.resources_tf['data'] = {} 33 | self.resources_tf['data']['aws_vpc'] = {} 34 | self.resources_tf['data']['aws_subnets'] = {} 35 | self.resources_tf['resource']['aws_efs_file_system'] = {} 36 | self.resources_tf['resource']['aws_efs_mount_target'] = {} 37 | 38 | self.resources_tf['resource']['aws_instance'] = {} 39 | 40 | for instance in self.resources_dict['instances']: 41 | self.__new_aws_key_pair(instance) 42 | 43 | self.__get_data_aws_vpc(instance) 44 | self.__get_data_aws_subnets(instance) 45 | self.__new_aws_efs_file_system(instance) 46 | self.__new_aws_efs_mount_target(instance) 47 | 48 | self.__new_aws_instance(instance) 49 | 50 | return self.resources_tf 51 | 52 | def __new_aws_key_pair(self, instance): 53 | region = instance['region'] 54 | key_name = self.create_resource_name([region, 'key']) 55 | instance['aws_key_pair'] = key_name 56 | 57 | new_key_pair = { 58 | 'provider': f'aws.{region}', 59 | 'key_name': key_name, 60 | 'public_key': f'${{file("{self.ssh_key_path}")}}' 61 | } 62 | self.add_tags(self.config, new_key_pair) 63 | 64 | self.resources_tf['resource']['aws_key_pair'][key_name] = new_key_pair 65 | 66 | def __get_data_aws_vpc(self, instance): 67 | tf_data_type = 'aws_vpc' 68 | region = instance['region'] 69 | vpc_name = self.create_resource_name([region, 'default', 'vpc']) 70 | 71 | regional_resource_name = self.__get_tf_resource_name_by_region(tf_data_type, region, 'data') 72 | if regional_resource_name: 73 | instance[tf_data_type] = regional_resource_name 74 | return 75 | 76 | instance[tf_data_type] = vpc_name 77 | 78 | get_vpc = { 79 | 'provider': f'aws.{region}', 80 | 'default': True 81 | } 82 | 83 | self.resources_tf['data'][tf_data_type][vpc_name] = get_vpc 84 | 85 | def __get_data_aws_subnets(self, instance): 86 | tf_data_type = 'aws_subnets' 87 | region = instance['region'] 88 | all_subnets_name = self.create_resource_name([region, 'all', 'subnets']) 89 | 90 | regional_resource_name = self.__get_tf_resource_name_by_region(tf_data_type, region, 'data') 91 | if regional_resource_name: 92 | instance[tf_data_type] = regional_resource_name 93 | return 94 | 95 | instance[tf_data_type] = all_subnets_name 96 | 97 | declared_vpc_id = 'data.aws_vpc.{}.id'.format(instance['aws_vpc']) 98 | 99 | aws_subnets = { 100 | 'provider': f'aws.{region}', 101 | 'filter': { 102 | 'name': 'vpc-id', 103 | 'values': [f'${{{declared_vpc_id}}}'] 104 | } 105 | } 106 | 107 | self.resources_tf['data'][tf_data_type][all_subnets_name] = aws_subnets 108 | 109 | def __new_aws_efs_file_system(self, instance): 110 | tf_resource_type = 'aws_efs_file_system' 111 | region = instance['region'] 112 | efs_filesystem_name = self.create_resource_name([region, 'efs', 'filesystem']) 113 | 114 | regional_resource_name = self.__get_tf_resource_name_by_region(tf_resource_type, region) 115 | if regional_resource_name: 116 | instance[tf_resource_type] = regional_resource_name 117 | return 118 | 119 | instance[tf_resource_type] = efs_filesystem_name 120 | 121 | new_efs_filesystem = { 122 | 'provider': f'aws.{region}', 123 | 'creation_token': efs_filesystem_name, 124 | 'encrypted': 'true', 125 | 'tags': {'name': efs_filesystem_name} 126 | } 127 | 128 | self.add_tags(self.config, new_efs_filesystem) 129 | 130 | self.resources_tf['resource'][tf_resource_type][efs_filesystem_name] = new_efs_filesystem 131 | 132 | def __new_aws_efs_mount_target(self, instance): 133 | tf_resource_type = 'aws_efs_mount_target' 134 | region = instance['region'] 135 | efs_mount_target_name = self.create_resource_name([region, 'efs', 'mount-target']) 136 | 137 | regional_resource_name = self.__get_tf_resource_name_by_region(tf_resource_type, region) 138 | if regional_resource_name: 139 | instance[tf_resource_type] = regional_resource_name 140 | return 141 | 142 | instance[tf_resource_type] = efs_mount_target_name 143 | 144 | declared_aws_subnets = 'data.aws_subnets.{}.ids'.format(instance['aws_subnets']) 145 | declared_filesystem_id = 'aws_efs_file_system.{}.id'.format(instance['aws_efs_file_system']) 146 | 147 | new_efs_mount_target = { 148 | 'provider': f'aws.{region}', 149 | 'for_each': f'${{toset({declared_aws_subnets})}}', 150 | 'file_system_id': f'${{{declared_filesystem_id}}}', 151 | 'subnet_id': '${each.value}', 152 | } 153 | 154 | self.resources_tf['resource'][tf_resource_type][efs_mount_target_name] = new_efs_mount_target 155 | 156 | def __new_aws_instance(self, instance): 157 | if not instance['instance_type']: 158 | # CIV will assume the AMI is x64. For ARM, the instance_type must be manually specified in resources.json 159 | instance['instance_type'] = 't3.medium' 160 | 161 | name_tag_value = instance['name'].replace('.', '-') 162 | name = self.create_resource_name([name_tag_value]) 163 | 164 | aliases = [provider['alias'] for provider in self.providers_tf['provider'][self.cloud_name]] 165 | if instance['region'] not in aliases: 166 | raise Exception('Cannot add an instance if region provider is not set up') 167 | 168 | new_instance = { 169 | 'instance_type': instance['instance_type'], 170 | 'ami': instance['ami'], 171 | 'provider': f'aws.{instance["region"]}', 172 | 'key_name': instance['aws_key_pair'], 173 | 'tags': {'name': name_tag_value}, 174 | 'depends_on': [ 175 | 'aws_key_pair.{}'.format(instance['aws_key_pair']) 176 | ] 177 | } 178 | 179 | self.add_tags(self.config, new_instance) 180 | 181 | self.resources_tf['resource']['aws_instance'][name] = new_instance 182 | 183 | def __get_tf_resource_name_by_region(self, resource_type, region, tf_definition_type='resource'): 184 | if resource_type in self.resources_tf[tf_definition_type]: 185 | for resource_name in self.resources_tf[tf_definition_type][resource_type].keys(): 186 | if region in resource_name: 187 | return resource_name 188 | 189 | return None 190 | -------------------------------------------------------------------------------- /cloud/opentofu/base_config_builder.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | 4 | class BaseConfigBuilder: 5 | cloud_name = None 6 | cloud_provider_definition = None 7 | 8 | cloud_providers = { 9 | 'aws': 'aws', 10 | 'azure': 'azurerm', 11 | 'gcloud': 'google', 12 | } 13 | 14 | resource_name_prefix = 'civ' 15 | resource_tags_key = 'tags' 16 | 17 | def __init__(self, resources_dict, ssh_key_path, config): 18 | self.resources_dict = resources_dict 19 | self.ssh_key_path = ssh_key_path 20 | self.config = config 21 | 22 | self.resources_tf = {'resource': {}} 23 | self.providers_tf = {'provider': {self.cloud_providers[self.cloud_name]: []}} 24 | 25 | def build_resources(self) -> dict: 26 | pass 27 | 28 | def build_providers(self) -> dict: 29 | pass 30 | 31 | def create_resource_name(self, resource_names_combination, separator='-') -> str: 32 | """ 33 | Returns a resource name with the items provided in the parameters, plus civ identifier and random number 34 | Example: Given ['vm', 'eastus'] as resource_names_combination param, you will get 'civ-vm-eastus-XXXXX' 35 | (where XXXXX are a random series of digits) 36 | :param resource_names_combination: List of keywords, names or IDs that you want to include in the resource name. 37 | Example: ['westeurope', 'vm'] 38 | :param separator: String to be used as a separator of the items passed in resource_names_combination 39 | :return: String composed of a prefix, resource names combination and a suffix with random numbers. 40 | """ 41 | tf_resource_name_max_chars = 63 42 | random_num = self.get_random_numbers() 43 | 44 | # If any of the names already have prefixes, those will be removed to avoid redundancy. 45 | # Example: We received ['civ-network', 'vnc'] as resource_names_combination argument. 46 | # Considering self.resource_name_prefix is "civ" and separator "-", combined_name will contain: "network-vnc" 47 | combined_name = separator.join(resource_names_combination).replace( 48 | f'{self.resource_name_prefix}{separator}', '') 49 | 50 | # We calculate the full length after concatenation with prefix, two separators and random numbers 51 | end_index = tf_resource_name_max_chars - len(self.resource_name_prefix) - len(random_num) - (len(separator) * 2) 52 | 53 | combinations = [ 54 | self.resource_name_prefix, 55 | combined_name[0:end_index], 56 | random_num 57 | ] 58 | 59 | return separator.join(combinations) 60 | 61 | def get_random_numbers(self): 62 | return f'{random.randrange(1, 10 ** 5):03}' 63 | 64 | def add_tags(self, config_dict, resource): 65 | tags_key = self.resource_tags_key 66 | 67 | if config_dict['tags']: 68 | if tags_key in resource: 69 | resource[tags_key] = {**resource[tags_key], **config_dict['tags']} 70 | else: 71 | resource[tags_key] = config_dict['tags'] 72 | -------------------------------------------------------------------------------- /cloud/opentofu/gcloud_config_builder.py: -------------------------------------------------------------------------------- 1 | from cloud.opentofu.base_config_builder import BaseConfigBuilder 2 | 3 | 4 | class GCloudConfigBuilder(BaseConfigBuilder): 5 | cloud_name = 'gcloud' 6 | cloud_provider_definition = {'google': {'source': 'hashicorp/google', 'version': '~> 5.28.0'}} 7 | 8 | default_ssh_user = 'user' 9 | 10 | resource_tags_key = 'labels' 11 | 12 | ssh_enabled_tag = 'ssh-enabled' 13 | 14 | def __init__(self, resources_dict, ssh_key_path, config): 15 | super().__init__(resources_dict, ssh_key_path, config) 16 | 17 | self.project = resources_dict['project'] 18 | 19 | def build_providers(self): 20 | all_regions = self.__get_all_regions_from_resources_file() 21 | for region in all_regions: 22 | self.providers_tf['provider'][self.cloud_providers[self.cloud_name]]\ 23 | .append(self.__new_gcloud_provider(self.project, region)) 24 | 25 | return self.providers_tf 26 | 27 | def __get_all_regions_from_resources_file(self): 28 | instances_regions = [i['region'] for i in self.resources_dict['instances']] 29 | 30 | return list(dict.fromkeys(instances_regions)) 31 | 32 | def __new_gcloud_provider(self, project, region): 33 | zone = f'{region}-c' 34 | 35 | return { 36 | 'project': project, 37 | 'region': region, 38 | 'zone': zone, 39 | } 40 | 41 | def build_resources(self): 42 | self.resources_tf['resource']['google_compute_network'] = {} 43 | self.resources_tf['resource']['google_compute_firewall'] = {} 44 | self.resources_tf['resource']['google_compute_instance'] = {} 45 | 46 | network_name = self.create_resource_name(['vpc']) 47 | 48 | self.__new_gcloud_network(network_name) 49 | self.__new_gcloud_firewall_rule(network_name) 50 | 51 | for instance in self.resources_dict['instances']: 52 | instance['google_compute_network'] = network_name 53 | self.__new_gcloud_instance(instance) 54 | 55 | return self.resources_tf 56 | 57 | def __new_gcloud_firewall_rule(self, network_name): 58 | name = self.create_resource_name(['firewall-rule']) 59 | 60 | allow_rule = { 61 | 'protocol': 'tcp', 62 | 'ports': ['22'], 63 | } 64 | 65 | new_rule = { 66 | 'name': name, 67 | 'network': network_name, 68 | 'target_tags': [self.ssh_enabled_tag], 69 | 'source_ranges': ['0.0.0.0/0'], 70 | 'allow': allow_rule, 71 | 'depends_on': [ 72 | 'google_compute_network.{}'.format(network_name) 73 | ], 74 | } 75 | 76 | self.resources_tf['resource']['google_compute_firewall'][name] = new_rule 77 | 78 | def __new_gcloud_network(self, network_name): 79 | new_vpc = { 80 | 'name': network_name, 81 | 'auto_create_subnetworks': True 82 | } 83 | 84 | self.resources_tf['resource']['google_compute_network'][network_name] = new_vpc 85 | 86 | def __new_gcloud_instance(self, instance): 87 | if not instance['instance_type']: 88 | instance['instance_type'] = 'c2d-highcpu-2' 89 | 90 | # Google instance names must match the following regex: '(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?)' 91 | formatted_name = instance['name'].replace('.', '-').replace('_', '-') 92 | name = self.create_resource_name([formatted_name]) 93 | 94 | aliases = [provider['region'] for provider in self.providers_tf['provider'][self.cloud_providers[self.cloud_name]]] 95 | if instance['region'] not in aliases: 96 | raise Exception('Cannot add an instance if region provider is not set up') 97 | 98 | boot_disk = { 99 | 'initialize_params': { 100 | 'image': instance['image'] 101 | } 102 | } 103 | 104 | self.add_tags(self.config, boot_disk['initialize_params']) 105 | 106 | network_interface = { 107 | 'network': instance['google_compute_network'], 108 | 'access_config': {} 109 | } 110 | 111 | if 'username' in instance: 112 | username = instance['username'] 113 | else: 114 | username = self.default_ssh_user 115 | 116 | metadata = { 117 | 'ssh-keys': f'{username}:${{file("{self.ssh_key_path}")}}', 118 | 'image': instance['image'], 119 | 'username': username, 120 | } 121 | 122 | new_instance = { 123 | 'name': name, 124 | 'machine_type': instance['instance_type'], 125 | 'boot_disk': boot_disk, 126 | 'zone': instance['zone'], 127 | 'network_interface': network_interface, 128 | 'metadata': metadata, 129 | 'tags': [self.ssh_enabled_tag], 130 | 'depends_on': [ 131 | 'google_compute_network.{}'.format(instance['google_compute_network']) 132 | ] 133 | } 134 | 135 | self.add_tags(self.config, new_instance) 136 | self.resources_tf['resource']['google_compute_instance'][name] = new_instance 137 | -------------------------------------------------------------------------------- /cloud/opentofu/opentofu_configurator.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | from pprint import pprint 4 | 5 | from cloud.opentofu.aws_config_builder import AWSConfigBuilder 6 | from cloud.opentofu.aws_config_builder_efs import AWSConfigBuilderEfs 7 | from cloud.opentofu.azure_config_builder_v2 import AzureConfigBuilderV2 8 | from cloud.opentofu.gcloud_config_builder import GCloudConfigBuilder 9 | from lib import console_lib 10 | 11 | 12 | class OpenTofuConfigurator: 13 | supported_providers = ('aws', 'azure', 'gcloud') 14 | 15 | main_tf = {'terraform': {'required_version': '>= 0.14.9'}} 16 | providers_tf = None 17 | resources_tf = None 18 | 19 | def __init__(self, ssh_key_path, resources_path, config): 20 | self.resources_path = resources_path 21 | self.ssh_key_path = ssh_key_path 22 | self.config = config 23 | 24 | self.resources_dict = self._initialize_resources_dict() 25 | self.cloud_name = self.get_cloud_provider_from_resources() 26 | 27 | def _initialize_resources_dict(self): 28 | with open(self.resources_path) as f: 29 | return json.load(f) 30 | 31 | def get_cloud_provider_from_resources(self): 32 | if 'provider' not in self.resources_dict: 33 | raise Exception(f'No cloud providers found in {self.resources_path}') 34 | 35 | cloud_provider = self.resources_dict['provider'] 36 | if cloud_provider not in self.supported_providers: 37 | raise Exception(f'Unsupported cloud provider: {cloud_provider}') 38 | 39 | return cloud_provider 40 | 41 | def configure_from_resources_json(self): 42 | self.build_configuration() 43 | self.save_configuration_to_json() 44 | 45 | def build_configuration(self): 46 | config_builder = self.get_config_builder() 47 | 48 | self.main_tf['terraform']['required_providers'] = config_builder.cloud_provider_definition 49 | 50 | self.providers_tf = config_builder.build_providers() 51 | self.resources_tf = config_builder.build_resources() 52 | 53 | def get_config_builder(self): 54 | cloud_name = self.resources_dict['provider'] 55 | 56 | if cloud_name == 'aws': 57 | if self.config['tags'] and 'aws-efs' in self.config['tags'].keys(): 58 | return AWSConfigBuilderEfs(self.resources_dict, self.ssh_key_path, self.config) 59 | return AWSConfigBuilder(self.resources_dict, self.ssh_key_path, self.config) 60 | elif cloud_name == 'azure': 61 | return AzureConfigBuilderV2(self.resources_dict, self.ssh_key_path, self.config) 62 | elif cloud_name == 'gcloud': 63 | return GCloudConfigBuilder(self.resources_dict, self.ssh_key_path, self.config) 64 | else: 65 | raise Exception(f'Could not find any suitable configurator for "{cloud_name}" cloud provider') 66 | 67 | def save_configuration_to_json(self): 68 | self.__dump_to_json(self.main_tf, 'main.tf.json') 69 | self.__dump_to_json(self.providers_tf, 'providers.tf.json') 70 | self.__dump_to_json(self.resources_tf, 'resources.tf.json') 71 | 72 | def __dump_to_json(self, content, file): 73 | with open(file, 'w') as config_file: 74 | json.dump(content, config_file, indent=4) 75 | 76 | def print_configuration(self): 77 | console_lib.print_divider('OpenTofu configuration') 78 | pprint(self.main_tf) 79 | pprint(self.providers_tf) 80 | pprint(self.resources_tf) 81 | 82 | def remove_configuration(self): 83 | for file in ['main.tf.json', 'resources.tf.json', 'providers.tf.json']: 84 | if os.path.exists(file): 85 | os.remove(file) 86 | 87 | def get_aws_username_by_ami_name(self, ami_name): 88 | for instance in self.resources_dict['instances']: 89 | if instance['ami'] == ami_name: 90 | return instance['username'] 91 | 92 | raise Exception(f'ERROR: No instance with name "{ami_name}" was found') 93 | -------------------------------------------------------------------------------- /cloud/opentofu/opentofu_controller.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import re 4 | 5 | from threading import Thread 6 | from lib import ssh_lib 7 | 8 | 9 | class OpenTofuController: 10 | def __init__(self, tf_configurator, debug=False): 11 | self.cloud_name = tf_configurator.cloud_name 12 | self.tf_configurator = tf_configurator 13 | self.debug = debug 14 | 15 | self.debug_sufix = '' 16 | if not debug: 17 | self.debug_sufix = '1> /dev/null' 18 | 19 | def create_infra(self): 20 | cmd_output = os.system(f'tofu init {self.debug_sufix}') 21 | if cmd_output: 22 | raise Exception('tofu init command failed, check configuration') 23 | 24 | cmd_output = os.system(f'tofu apply -auto-approve {self.debug_sufix}') 25 | if cmd_output: 26 | raise Exception('tofu apply command failed, check configuration') 27 | 28 | print('Waiting for the ssh server in the instance(s) to be ready...') 29 | self.wait_for_all_instances_ssh_up() 30 | 31 | def wait_for_all_instances_ssh_up(self): 32 | seconds_to_wait = 300 33 | instances = self.get_instances() 34 | 35 | threads = [] 36 | for inst in instances.values(): 37 | t = Thread(target=ssh_lib.wait_for_host_ssh_up, 38 | args=[inst['address'], seconds_to_wait]) 39 | t.start() 40 | threads.append(t) 41 | 42 | [t.join() for t in threads] 43 | 44 | def get_instances(self): 45 | resources = self.get_opentofu_resources() 46 | 47 | if self.cloud_name == 'aws': 48 | instances_info = self.get_instances_aws(resources) 49 | elif self.cloud_name == 'azure': 50 | instances_info = self.get_instances_azure(resources) 51 | elif self.cloud_name == 'gcloud': 52 | instances_info = self.get_instances_gcloud(resources) 53 | else: 54 | raise Exception(f'Unsupported cloud provider: {self.cloud_name}') 55 | 56 | return instances_info 57 | 58 | def get_opentofu_resources(self): 59 | output = os.popen('tofu show --json') 60 | output = output.read() 61 | 62 | json_output = json.loads(output) 63 | 64 | return json_output['values']['root_module']['resources'] 65 | 66 | def get_instances_aws(self, resources): 67 | regional_efs_file_systems = {} 68 | for resource in resources: 69 | if resource['type'] == 'aws_efs_file_system': 70 | efs_dns_name = resource['values']['dns_name'] 71 | result = re.match(r'fs-.*\.efs\.(.*).amazon', efs_dns_name) 72 | 73 | if not result: 74 | raise Exception(f'Could not get EFS file system region in DNS name: {efs_dns_name}') 75 | 76 | efs_region = result.group(1) 77 | 78 | regional_efs_file_systems[efs_region] = efs_dns_name 79 | 80 | instances_info = {} 81 | # 'address' key corresponds to the tf resource id 82 | for resource in resources: 83 | if resource['type'] != 'aws_instance': 84 | continue 85 | 86 | ami_name = resource['values']['ami'] 87 | username = self.tf_configurator.get_aws_username_by_ami_name(ami_name) 88 | 89 | instance_data = { 90 | 'cloud': 'aws', 91 | 'name': resource['name'], 92 | 'instance_id': resource['values']['id'], 93 | 'public_ip': resource['values']['public_ip'], 94 | 'public_dns': resource['values']['public_dns'], 95 | 'private_ip': resource['values']['private_ip'], 96 | 'availability_zone': resource['values']['availability_zone'], 97 | 'ami': ami_name, 98 | 'image': ami_name, 99 | 'username': username, 100 | } 101 | 102 | self._set_instance_default_address(instance_data) 103 | 104 | instance_region = instance_data['availability_zone'][:-1] 105 | if instance_region in regional_efs_file_systems.keys(): 106 | instance_data['efs_file_system_dns_name'] = regional_efs_file_systems[instance_region] 107 | 108 | instances_info[resource['address']] = instance_data 109 | 110 | return instances_info 111 | 112 | def get_instances_azure(self, resources): 113 | instances_info = {} 114 | 115 | for res in resources: 116 | if res['type'] != 'azurerm_linux_virtual_machine': 117 | continue 118 | 119 | public_dns = self._get_azure_vm_fqdn_from_resources_json(res['name'], resources) 120 | 121 | image = self._get_azure_image_data_from_resource(res, resources) 122 | 123 | instance_data = { 124 | 'cloud': 'azure', 125 | 'name': res['name'], 126 | 'instance_id': res['values']['id'], 127 | 'public_ip': res['values']['public_ip_address'], 128 | 'private_ip': res['values']['private_ip_address'], 129 | 'public_dns': public_dns, 130 | 'location': res['values']['location'], 131 | 'image': image, 132 | 'username': res['values']['admin_username'], 133 | } 134 | 135 | self._set_instance_default_address(instance_data) 136 | 137 | instances_info[res['address']] = instance_data 138 | 139 | return instances_info 140 | 141 | def get_instances_gcloud(self, resources): 142 | instances_info = {} 143 | 144 | # 'address' key corresponds to the tf resource id 145 | for resource in resources: 146 | if resource['type'] != 'google_compute_instance': 147 | continue 148 | 149 | public_ip = resource['values']['network_interface'][0]['access_config'][0]['nat_ip'] 150 | 151 | instances_info[resource['address']] = { 152 | 'cloud': 'gcloud', 153 | 'name': resource['name'], 154 | 'instance_id': resource['values']['id'], 155 | 'public_ip': public_ip, 156 | 'public_dns': public_ip, # TODO: Support also public dns as we do for the other Clouds 157 | 'address': public_ip, # TODO: Support also private IP addresses as we do for AWS 158 | 'zone': resource['values']['zone'], 159 | 'image': resource['values']['metadata']['image'], 160 | 'username': resource['values']['metadata']['username'], 161 | } 162 | 163 | return instances_info 164 | 165 | def _set_instance_default_address(self, instance_data): 166 | if instance_data['public_dns']: 167 | inst_address = instance_data['public_dns'] 168 | elif instance_data['public_ip']: 169 | inst_address = instance_data['public_ip'] 170 | elif instance_data['private_ip']: 171 | inst_address = instance_data['private_ip'] 172 | else: 173 | raise Exception('Could not find any valid instance address.') 174 | 175 | instance_data['address'] = inst_address 176 | 177 | def _get_azure_vm_fqdn_from_resources_json(self, vm_name, resources_json): 178 | for r in resources_json: 179 | if r['type'] == 'azurerm_public_ip' and \ 180 | r['values']['domain_name_label'] == vm_name: 181 | return r['values']['fqdn'] 182 | 183 | def _get_azure_image_data_from_resource(self, vm_resource, all_resources): 184 | if 'source_image_reference' in vm_resource['values'] and \ 185 | len(vm_resource['values']['source_image_reference']) > 0: 186 | return vm_resource['values']['source_image_reference'] 187 | elif 'source_image_id' in vm_resource['values'] and \ 188 | len(vm_resource['values']['source_image_id']) > 0: 189 | for r in all_resources: 190 | if r['type'] == 'azurerm_shared_image_version' and \ 191 | vm_resource['values']['source_image_id'] in r['values']['id']: 192 | return r['values']['blob_uri'] 193 | 194 | return vm_resource['values']['source_image_id'] 195 | 196 | def destroy_resource(self, resource_id): 197 | cmd_output = os.system(f'tofu destroy -target={resource_id}') 198 | if cmd_output: 199 | raise Exception('tofu destroy specific resource command failed') 200 | 201 | def destroy_infra(self): 202 | cmd_output = os.system(f'tofu destroy -auto-approve {self.debug_sufix}') 203 | if cmd_output: 204 | raise Exception('tofu destroy command failed') 205 | -------------------------------------------------------------------------------- /cloud/sample/resources_aws_gov_marketplace.json: -------------------------------------------------------------------------------- 1 | { 2 | "provider": "aws", 3 | "instances": [ 4 | { 5 | "ami": "ami-0b3f39f12c41f62c4", 6 | "region": "us-gov-west-1", 7 | "instance_type": "t3.small", 8 | "username": "ec2-user", 9 | "name": "RHEL-8.5.0_HVM-20220303-x86_64-0-Marketplace-GP2-6b2625ac-e9ca-4d9a-a34f-63dc75a4d497" 10 | }, 11 | { 12 | "ami": "ami-0403bf09861a27b43", 13 | "region": "us-gov-east-1", 14 | "instance_type": "t3.2xlarge", 15 | "username": "ec2-user", 16 | "name": "RHEL-8.5.0_HVM-20220303-x86_64-0-Marketplace-GP2-6b2625ac-e9ca-4d9a-a34f-63dc75a4d497" 17 | } 18 | ] 19 | } -------------------------------------------------------------------------------- /cloud/sample/resources_aws_marketplace.json: -------------------------------------------------------------------------------- 1 | { 2 | "provider": "aws", 3 | "instances": [ 4 | { 5 | "ami": "ami-03724163927725aeb", 6 | "region": "us-west-1", 7 | "instance_type": "", 8 | "username": "ec2-user", 9 | "name": "RHEL-9.1.0_HVM-20221101-x86_64-2-Access2-GP2" 10 | }, 11 | { 12 | "ami": "ami-03724163927725aeb", 13 | "region": "us-east-1", 14 | "instance_type": "", 15 | "username": "ec2-user", 16 | "name": "RHEL-9.1.0-Custom-Networking-Optional", 17 | "custom_vpc_name": "Your-Custom-VPC-Name", 18 | "custom_subnet_name": "Your-Custom-Subnet-Name", 19 | "custom_security_group_name": "Your-Custom-Security-Group-Name" 20 | } 21 | ] 22 | } -------------------------------------------------------------------------------- /cloud/sample/resources_azure.json: -------------------------------------------------------------------------------- 1 | { 2 | "subscription_id": "8d026bb1-2a65-454d-a88f-c896db94c4f8", 3 | "resource_group": "cloud-experience", 4 | "provider": "azure", 5 | "instances": [ 6 | { 7 | "image_uri": "/subscriptions/8d026bb1-2a65-454d-a88f-c896db94c4f8/resourceGroups/image-uploads/providers/Microsoft.Compute/images/ImageAzureTest", 8 | "location": "West Europe", 9 | "name": "sample-name-1" 10 | }, 11 | { 12 | "image_definition": { 13 | "publisher": "Canonical", 14 | "offer": "UbuntuServer", 15 | "sku": "16.04-LTS", 16 | "version": "latest" 17 | }, 18 | "location": "West Europe", 19 | "name": "sample-name-2" 20 | }, 21 | { 22 | "image_definition": { 23 | "publisher": "Canonical", 24 | "offer": "UbuntuServer", 25 | "sku": "16.04-LTS", 26 | "version": "latest" 27 | }, 28 | "location": "East US", 29 | "name": "sample-name-3" 30 | } 31 | ] 32 | } 33 | -------------------------------------------------------------------------------- /cloud/sample/resources_azure_marketplace.json: -------------------------------------------------------------------------------- 1 | { 2 | "subscription_id": "1be3e85f-1b24-400e-87ca-daec1140cc88", 3 | "resource_group": "cloud-experience", 4 | "provider": "azure", 5 | "instances": [ 6 | { 7 | "image_definition": { 8 | "offer": "rh-rhel", 9 | "publisher": "RedHat", 10 | "sku": "rh-rhel9", 11 | "version": "9.0.2022061000" 12 | }, 13 | "plan": { 14 | "name": "rh-rhel9", 15 | "publisher": "redhat", 16 | "product": "rh-rhel" 17 | }, 18 | "location": "switzerlandnorth" 19 | }, 20 | { 21 | "image_definition": { 22 | "offer": "rh-rhel", 23 | "publisher": "RedHat", 24 | "sku": "rh-rhel9-gen1", 25 | "version": "9.0.2022061000" 26 | }, 27 | "plan": { 28 | "name": "rh-rhel9-gen1", 29 | "publisher": "redhat", 30 | "product": "rh-rhel" 31 | }, 32 | "location": "uaenorth" 33 | } 34 | ] 35 | } 36 | -------------------------------------------------------------------------------- /cloud/sample/resources_azure_vhd.json: -------------------------------------------------------------------------------- 1 | { 2 | "subscription_id": "a5192c85-2bff-4433-ac37-69aad5fcc86a", 3 | "resource_group": "cloud-experience", 4 | "provider": "azure", 5 | "instances": [ 6 | { 7 | "vhd_uri": "https://rhimages.blob.core.windows.net/rhel/7.9/rhel-azure-7.9-20220803.sp.1.x86_64.vhd", 8 | "arch": "x86_64", 9 | "location": "East US", 10 | "name": "sample-rhel7.9.14-image-from-vhd" 11 | }, 12 | { 13 | "vhd_uri": "https://rhimages.blob.core.windows.net/rhel/8.6/rhel-azure-8.6-20220531.sp.2.x86_64.vhd", 14 | "arch": "x86_64", 15 | "location": "East US", 16 | "name": "sample-rhel8.6-image-from-vhd" 17 | }, 18 | { 19 | "vhd_uri": "https://rhimages.blob.core.windows.net/rhel/9.0/rhel-azure-9.0-20220531.sp.2.x86_64.vhd", 20 | "arch": "x86_64", 21 | "location": "East US", 22 | "name": "sample-rhel9.0-image-from-vhd" 23 | }, 24 | { 25 | "vhd_uri": "https://rhimages.blob.core.windows.net/rhel/9.1/rhel-azure-9.1-20230117.sp.1.aarch64.vhd", 26 | "arch": "arm64", 27 | "location": "East US", 28 | "name": "sample-rhel9.1-arm64-image-from-vhd" 29 | } 30 | ] 31 | } 32 | -------------------------------------------------------------------------------- /cloud/sample/resources_gcloud.json: -------------------------------------------------------------------------------- 1 | { 2 | "provider": "gcloud", 3 | "project": "redhat-marketplace-dev", 4 | "instances": [ 5 | { 6 | "image": "rhel-9-v20220524", 7 | "region": "us-east1", 8 | "zone": "us-east1-c", 9 | "instance_type": "", 10 | "username": "google", 11 | "name": "rhel-9.0-x86_64" 12 | } 13 | ] 14 | } -------------------------------------------------------------------------------- /data/azure/05_logging.cfg: -------------------------------------------------------------------------------- 1 | ## This yaml formated config file handles setting 2 | ## logger information. The values that are necessary to be set 3 | ## are seen at the bottom. The top '_log' are only used to remove 4 | ## redundency in a syslog and fallback-to-file case. 5 | ## 6 | ## The 'log_cfgs' entry defines a list of logger configs 7 | ## Each entry in the list is tried, and the first one that 8 | ## works is used. If a log_cfg list entry is an array, it will 9 | ## be joined with '\n'. 10 | _log: 11 | - &log_base | 12 | [loggers] 13 | keys=root,cloudinit 14 | 15 | [handlers] 16 | keys=consoleHandler,cloudLogHandler 17 | 18 | [formatters] 19 | keys=simpleFormatter,arg0Formatter 20 | 21 | [logger_root] 22 | level=DEBUG 23 | handlers=consoleHandler,cloudLogHandler 24 | 25 | [logger_cloudinit] 26 | level=DEBUG 27 | qualname=cloudinit 28 | handlers= 29 | propagate=1 30 | 31 | [handler_consoleHandler] 32 | class=StreamHandler 33 | level=WARNING 34 | formatter=arg0Formatter 35 | args=(sys.stderr,) 36 | 37 | [formatter_arg0Formatter] 38 | format=%(asctime)s - %(filename)s[%(levelname)s]: %(message)s 39 | 40 | [formatter_simpleFormatter] 41 | format=[CLOUDINIT] %(filename)s[%(levelname)s]: %(message)s 42 | - &log_file | 43 | [handler_cloudLogHandler] 44 | class=FileHandler 45 | level=DEBUG 46 | formatter=arg0Formatter 47 | args=('/var/log/cloud-init.log', 'a', 'UTF-8') 48 | - &log_syslog | 49 | [handler_cloudLogHandler] 50 | class=handlers.SysLogHandler 51 | level=DEBUG 52 | formatter=simpleFormatter 53 | args=("/dev/log", handlers.SysLogHandler.LOG_USER) 54 | 55 | log_cfgs: 56 | # Array entries in this list will be joined into a string 57 | # that defines the configuration. 58 | # 59 | # If you want logs to go to syslog, uncomment the following line. 60 | # - [ *log_base, *log_syslog ] 61 | # 62 | # The default behavior is to just log to a file. 63 | # This mechanism that does not depend on a system service to operate. 64 | - [ *log_base, *log_file ] 65 | # A file path can also be used. 66 | # - /etc/log.conf 67 | 68 | # This tells cloud-init to redirect its stdout and stderr to 69 | # 'tee -a /var/log/cloud-init-output.log' so the user can see output 70 | # there without needing to look on the console. 71 | output: {all: '| tee -a /var/log/cloud-init-output.log'} -------------------------------------------------------------------------------- /data/azure/06_logging_override.cfg: -------------------------------------------------------------------------------- 1 | output: 2 | all: '| tee -a /var/log/cloud-init-output.log' -------------------------------------------------------------------------------- /data/azure/66-azure-storage.rules: -------------------------------------------------------------------------------- 1 | ACTION=="add|change", SUBSYSTEM=="block", ENV{ID_VENDOR}=="Msft", ENV{ID_MODEL}=="Virtual_Disk", GOTO="azure_disk" 2 | GOTO="azure_end" 3 | 4 | LABEL="azure_disk" 5 | # Root has a GUID of 0000 as the second value 6 | # The resource/resource has GUID of 0001 as the second value 7 | ATTRS{device_id}=="?00000000-0000-*", ENV{fabric_name}="root", GOTO="azure_names" 8 | ATTRS{device_id}=="?00000000-0001-*", ENV{fabric_name}="resource", GOTO="azure_names" 9 | ATTRS{device_id}=="?00000001-0001-*", ENV{fabric_name}="BEK", GOTO="azure_names" 10 | # Wellknown SCSI controllers 11 | ATTRS{device_id}=="{f8b3781a-1e82-4818-a1c3-63d806ec15bb}", ENV{fabric_scsi_controller}="scsi0", GOTO="azure_datadisk" 12 | ATTRS{device_id}=="{f8b3781b-1e82-4818-a1c3-63d806ec15bb}", ENV{fabric_scsi_controller}="scsi1", GOTO="azure_datadisk" 13 | ATTRS{device_id}=="{f8b3781c-1e82-4818-a1c3-63d806ec15bb}", ENV{fabric_scsi_controller}="scsi2", GOTO="azure_datadisk" 14 | ATTRS{device_id}=="{f8b3781d-1e82-4818-a1c3-63d806ec15bb}", ENV{fabric_scsi_controller}="scsi3", GOTO="azure_datadisk" 15 | GOTO="azure_end" 16 | 17 | # Retrieve LUN number for datadisks 18 | LABEL="azure_datadisk" 19 | ENV{DEVTYPE}=="partition", PROGRAM="/bin/sh -c 'readlink /sys/class/block/%k/../device|cut -d: -f4'", ENV{fabric_name}="$env{fabric_scsi_controller}/lun$result", GOTO="azure_names" 20 | PROGRAM="/bin/sh -c 'readlink /sys/class/block/%k/device|cut -d: -f4'", ENV{fabric_name}="$env{fabric_scsi_controller}/lun$result", GOTO="azure_names" 21 | GOTO="azure_end" 22 | 23 | # Create the symlinks 24 | LABEL="azure_names" 25 | ENV{DEVTYPE}=="disk", SYMLINK+="disk/azure/$env{fabric_name}" 26 | ENV{DEVTYPE}=="partition", SYMLINK+="disk/azure/$env{fabric_name}-part%n" 27 | 28 | LABEL="azure_end" -------------------------------------------------------------------------------- /data/azure/68-azure-sriov-nm-unmanaged.rules: -------------------------------------------------------------------------------- 1 | # Accelerated Networking on Azure exposes a new SRIOV interface to the VM. 2 | # This interface is transparently bonded to the synthetic interface, 3 | # so NetworkManager should just ignore any SRIOV interfaces. 4 | SUBSYSTEM=="net", DRIVERS=="hv_pci", ACTION=="add", ENV{NM_UNMANAGED}="1" -------------------------------------------------------------------------------- /data/azure/91-azure_datasource.cfg: -------------------------------------------------------------------------------- 1 | datasource: 2 | Azure: 3 | apply_network_config: false 4 | datasource_list: 5 | - Azure -------------------------------------------------------------------------------- /data/azure/99-azure-product-uuid.rules: -------------------------------------------------------------------------------- 1 | SUBSYSTEM!="dmi", GOTO="product_uuid-exit" 2 | ATTR{sys_vendor}!="Microsoft Corporation", GOTO="product_uuid-exit" 3 | ATTR{product_name}!="Virtual Machine", GOTO="product_uuid-exit" 4 | TEST!="/sys/devices/virtual/dmi/id/product_uuid", GOTO="product_uuid-exit" 5 | 6 | RUN+="/bin/chmod 0444 /sys/devices/virtual/dmi/id/product_uuid" 7 | 8 | LABEL="product_uuid-exit" 9 | -------------------------------------------------------------------------------- /data/azure/authconfig: -------------------------------------------------------------------------------- 1 | CACHECREDENTIALS=yes 2 | FAILLOCKARGS="deny=4 unlock_time=1200" 3 | FORCELEGACY=no 4 | FORCESMARTCARD=no 5 | IPADOMAINJOINED=no 6 | IPAV2NONTP=no 7 | PASSWDALGORITHM=sha512 8 | USEDB=no 9 | USEECRYPTFS=no 10 | USEFAILLOCK=no 11 | USEFPRINTD=no 12 | USEHESIOD=no 13 | USEIPAV2=no 14 | USEKERBEROS=no 15 | USELDAP=no 16 | USELDAPAUTH=no 17 | USELOCAUTHORIZE=yes 18 | USEMKHOMEDIR=no 19 | USENIS=no 20 | USEPAMACCESS=no 21 | USEPASSWDQC=no 22 | USEPWQUALITY=yes 23 | USESHADOW=yes 24 | USESMARTCARD=no 25 | USESSSD=yes 26 | USESSSDAUTH=no 27 | USESYSNETAUTH=no 28 | USEWINBIND=no 29 | USEWINBINDAUTH=no 30 | WINBINDKRB5=no -------------------------------------------------------------------------------- /data/azure/grub_rhel7: -------------------------------------------------------------------------------- 1 | GRUB_TIMEOUT=10 2 | GRUB_CMDLINE_LINUX="ro crashkernel=auto console=tty1 console=ttyS0 earlyprintk=ttyS0 rootdelay=300 scsi_mod.use_blk_mq=y" 3 | GRUB_DISABLE_SUBMENU=true 4 | GRUB_DISABLE_RECOVERY=true 5 | GRUB_TIMEOUT_STYLE=countdown 6 | GRUB_DEFAULT=saved 7 | GRUB_DISTRIBUTOR="$(sed 's, release .*$,,g' /etc/system-release)" 8 | GRUB_SERIAL_COMMAND="serial --speed=115200 --unit=0 --word=8 --parity=no --stop=1" 9 | GRUB_TERMINAL_INPUT="serial console" 10 | GRUB_TERMINAL_OUTPUT="serial console" -------------------------------------------------------------------------------- /data/azure/grub_rhel8: -------------------------------------------------------------------------------- 1 | GRUB_CMDLINE_LINUX="ro loglevel=3 crashkernel=auto console=tty1 console=ttyS0 earlyprintk=ttyS0 rootdelay=300" 2 | GRUB_TIMEOUT=10 3 | GRUB_ENABLE_BLSCFG=true 4 | GRUB_DISABLE_RECOVERY=true 5 | GRUB_DISABLE_SUBMENU=true 6 | GRUB_DISTRIBUTOR="$(sed 's, release .*$,,g' /etc/system-release)" 7 | GRUB_SERIAL_COMMAND="serial --speed=115200 --unit=0 --word=8 --parity=no --stop=1" 8 | GRUB_TERMINAL="serial console" 9 | GRUB_TIMEOUT_STYLE=countdown 10 | GRUB_DEFAULT=saved 11 | -------------------------------------------------------------------------------- /data/azure/grub_rhel9: -------------------------------------------------------------------------------- 1 | GRUB_CMDLINE_LINUX="ro console=tty1 console=ttyS0 earlyprintk=ttyS0 rootdelay=300" 2 | GRUB_TIMEOUT=10 3 | GRUB_ENABLE_BLSCFG=true 4 | GRUB_SERIAL_COMMAND="serial --speed=115200 --unit=0 --word=8 --parity=no --stop=1" 5 | GRUB_TERMINAL_INPUT="serial console" 6 | GRUB_TERMINAL_OUTPUT="serial console" 7 | GRUB_DEFAULT=saved -------------------------------------------------------------------------------- /data/azure/grub_rhel9.3+: -------------------------------------------------------------------------------- 1 | GRUB_CMDLINE_LINUX="ro loglevel=3 console=tty1 console=ttyS0 earlyprintk=ttyS0 rootdelay=300" 2 | GRUB_TIMEOUT=10 3 | GRUB_ENABLE_BLSCFG=true 4 | GRUB_DISABLE_RECOVERY=true 5 | GRUB_DISABLE_SUBMENU=true 6 | GRUB_DISTRIBUTOR="$(sed 's, release .*$,,g' /etc/system-release)" 7 | GRUB_SERIAL_COMMAND="serial --speed=115200 --unit=0 --word=8 --parity=no --stop=1" 8 | GRUB_TERMINAL="serial console" 9 | GRUB_TIMEOUT_STYLE=countdown 10 | GRUB_DEFAULT=saved 11 | -------------------------------------------------------------------------------- /data/generic/dnf.conf: -------------------------------------------------------------------------------- 1 | [main] 2 | gpgcheck=1 3 | installonly_limit=3 4 | clean_requirements_on_remove=True 5 | best=True 6 | skip_if_unavailable=False -------------------------------------------------------------------------------- /data/generic/fingerprint-auth_rhel10: -------------------------------------------------------------------------------- 1 | #%PAM-1.0 2 | # This file is auto-generated. 3 | # User changes will be destroyed the next time authselect is run. 4 | auth required pam_env.so 5 | auth sufficient pam_fprintd.so 6 | auth required pam_deny.so 7 | 8 | account required pam_unix.so 9 | account sufficient pam_localuser.so 10 | account sufficient pam_succeed_if.so uid < 500 quiet 11 | account required pam_permit.so 12 | 13 | password required pam_deny.so 14 | 15 | session optional pam_keyinit.so revoke 16 | session required pam_limits.so 17 | -session optional pam_systemd.so 18 | session [success=1 default=ignore] pam_succeed_if.so service in crond quiet use_uid 19 | session required pam_unix.so -------------------------------------------------------------------------------- /data/generic/fingerprint-auth_rhel7: -------------------------------------------------------------------------------- 1 | #%PAM-1.0 2 | # This file is auto-generated. 3 | # User changes will be destroyed the next time authconfig is run. 4 | auth required pam_env.so 5 | auth sufficient pam_fprintd.so 6 | auth required pam_deny.so 7 | 8 | account required pam_unix.so 9 | account sufficient pam_localuser.so 10 | account sufficient pam_succeed_if.so uid < 1000 quiet 11 | account required pam_permit.so 12 | 13 | password required pam_deny.so 14 | 15 | session optional pam_keyinit.so revoke 16 | session required pam_limits.so 17 | -session optional pam_systemd.so 18 | session [success=1 default=ignore] pam_succeed_if.so service in crond quiet use_uid 19 | session required pam_unix.so 20 | -------------------------------------------------------------------------------- /data/generic/fingerprint-auth_rhel8: -------------------------------------------------------------------------------- 1 | #%PAM-1.0 2 | # This file is auto-generated. 3 | # User changes will be destroyed the next time authselect is run. 4 | auth required pam_env.so 5 | auth sufficient pam_fprintd.so 6 | auth required pam_deny.so 7 | 8 | account required pam_unix.so 9 | account sufficient pam_localuser.so 10 | account sufficient pam_succeed_if.so uid < 500 quiet 11 | account required pam_permit.so 12 | 13 | password required pam_deny.so 14 | 15 | session optional pam_keyinit.so revoke 16 | session required pam_limits.so 17 | -session optional pam_systemd.so 18 | session [success=1 default=ignore] pam_succeed_if.so service in crond quiet use_uid 19 | session required pam_unix.so -------------------------------------------------------------------------------- /data/generic/fingerprint-auth_rhel9: -------------------------------------------------------------------------------- 1 | #%PAM-1.0 2 | # This file is auto-generated. 3 | # User changes will be destroyed the next time authselect is run. 4 | auth required pam_env.so 5 | auth sufficient pam_fprintd.so 6 | auth required pam_deny.so 7 | 8 | account required pam_unix.so 9 | account sufficient pam_localuser.so 10 | account sufficient pam_succeed_if.so uid < 500 quiet 11 | account required pam_permit.so 12 | 13 | password required pam_deny.so 14 | 15 | session optional pam_keyinit.so revoke 16 | session required pam_limits.so 17 | -session optional pam_systemd.so 18 | session [success=1 default=ignore] pam_succeed_if.so service in crond quiet use_uid 19 | session required pam_unix.so -------------------------------------------------------------------------------- /data/generic/langpacks.conf: -------------------------------------------------------------------------------- 1 | [main] 2 | enabled=1 3 | langpack_locales = en_US.UTF-8 -------------------------------------------------------------------------------- /data/generic/password-auth_rhel10: -------------------------------------------------------------------------------- 1 | #%PAM-1.0 2 | # This file is auto-generated. 3 | # User changes will be destroyed the next time authselect is run. 4 | auth required pam_env.so 5 | auth sufficient pam_unix.so try_first_pass nullok 6 | auth required pam_deny.so 7 | 8 | account required pam_unix.so 9 | 10 | password requisite pam_pwquality.so try_first_pass local_users_only retry=3 authtok_type= 11 | password sufficient pam_unix.so try_first_pass use_authtok nullok sha512 shadow 12 | password required pam_deny.so 13 | 14 | session optional pam_keyinit.so revoke 15 | session required pam_limits.so 16 | -session optional pam_systemd.so 17 | session [success=1 default=ignore] pam_succeed_if.so service in crond quiet use_uid 18 | session required pam_unix.so -------------------------------------------------------------------------------- /data/generic/password-auth_rhel7: -------------------------------------------------------------------------------- 1 | #%PAM-1.0 2 | # This file is auto-generated. 3 | # User changes will be destroyed the next time authconfig is run. 4 | auth required pam_env.so 5 | auth required pam_faildelay.so delay=2000000 6 | auth sufficient pam_unix.so nullok try_first_pass 7 | auth requisite pam_succeed_if.so uid >= 1000 quiet_success 8 | auth required pam_deny.so 9 | 10 | account required pam_unix.so 11 | account sufficient pam_localuser.so 12 | account sufficient pam_succeed_if.so uid < 1000 quiet 13 | account required pam_permit.so 14 | 15 | password requisite pam_pwquality.so try_first_pass local_users_only retry=3 authtok_type= 16 | password sufficient pam_unix.so sha512 shadow nullok try_first_pass use_authtok 17 | 18 | 19 | password required pam_deny.so 20 | 21 | session optional pam_keyinit.so revoke 22 | session required pam_limits.so 23 | -session optional pam_systemd.so 24 | session [success=1 default=ignore] pam_succeed_if.so service in crond quiet use_uid 25 | session required pam_unix.so 26 | -------------------------------------------------------------------------------- /data/generic/password-auth_rhel8: -------------------------------------------------------------------------------- 1 | #%PAM-1.0 2 | # This file is auto-generated. 3 | # User changes will be destroyed the next time authselect is run. 4 | auth required pam_env.so 5 | auth sufficient pam_unix.so try_first_pass nullok 6 | auth required pam_deny.so 7 | 8 | account required pam_unix.so 9 | 10 | password requisite pam_pwquality.so try_first_pass local_users_only retry=3 authtok_type= 11 | password sufficient pam_unix.so try_first_pass use_authtok nullok sha512 shadow 12 | password required pam_deny.so 13 | 14 | session optional pam_keyinit.so revoke 15 | session required pam_limits.so 16 | -session optional pam_systemd.so 17 | session [success=1 default=ignore] pam_succeed_if.so service in crond quiet use_uid 18 | session required pam_unix.so -------------------------------------------------------------------------------- /data/generic/password-auth_rhel9: -------------------------------------------------------------------------------- 1 | #%PAM-1.0 2 | # This file is auto-generated. 3 | # User changes will be destroyed the next time authselect is run. 4 | auth required pam_env.so 5 | auth sufficient pam_unix.so try_first_pass nullok 6 | auth required pam_deny.so 7 | 8 | account required pam_unix.so 9 | 10 | password requisite pam_pwquality.so try_first_pass local_users_only retry=3 authtok_type= 11 | password sufficient pam_unix.so try_first_pass use_authtok nullok sha512 shadow 12 | password required pam_deny.so 13 | 14 | session optional pam_keyinit.so revoke 15 | session required pam_limits.so 16 | -session optional pam_systemd.so 17 | session [success=1 default=ignore] pam_succeed_if.so service in crond quiet use_uid 18 | session required pam_unix.so -------------------------------------------------------------------------------- /data/generic/postlogin_rhel10: -------------------------------------------------------------------------------- 1 | #%PAM-1.0 2 | # This file is auto-generated. 3 | # User changes will be destroyed the next time authselect is run. 4 | 5 | session optional pam_umask.so silent 6 | session [success=1 default=ignore] pam_succeed_if.so service !~ gdm* service !~ su* quiet 7 | session [default=1] pam_lastlog.so nowtmp showfailed 8 | session optional pam_lastlog.so silent noupdate showfailed -------------------------------------------------------------------------------- /data/generic/postlogin_rhel7: -------------------------------------------------------------------------------- 1 | #%PAM-1.0 2 | # This file is auto-generated. 3 | # User changes will be destroyed the next time authconfig is run. 4 | 5 | 6 | session [success=1 default=ignore] pam_succeed_if.so service !~ gdm* service !~ su* quiet 7 | session [default=1] pam_lastlog.so nowtmp showfailed 8 | session optional pam_lastlog.so silent noupdate showfailed 9 | -------------------------------------------------------------------------------- /data/generic/postlogin_rhel8: -------------------------------------------------------------------------------- 1 | #%PAM-1.0 2 | # This file is auto-generated. 3 | # User changes will be destroyed the next time authselect is run. 4 | 5 | session optional pam_umask.so silent 6 | session [success=1 default=ignore] pam_succeed_if.so service !~ gdm* service !~ su* quiet 7 | session [default=1] pam_lastlog.so nowtmp showfailed 8 | session optional pam_lastlog.so silent noupdate showfailed -------------------------------------------------------------------------------- /data/generic/postlogin_rhel9: -------------------------------------------------------------------------------- 1 | #%PAM-1.0 2 | # This file is auto-generated. 3 | # User changes will be destroyed the next time authselect is run. 4 | 5 | session optional pam_umask.so silent 6 | session [success=1 default=ignore] pam_succeed_if.so service !~ gdm* service !~ su* quiet 7 | session [default=1] pam_lastlog.so nowtmp showfailed 8 | session optional pam_lastlog.so silent noupdate showfailed -------------------------------------------------------------------------------- /data/generic/smartcard-auth_rhel10: -------------------------------------------------------------------------------- 1 | #%PAM-1.0 2 | # This file is auto-generated. 3 | # User changes will be destroyed the next time authselect is run. 4 | auth required pam_env.so 5 | auth [success=done ignore=ignore default=die] pam_pkcs11.so wait_for_card 6 | auth required pam_deny.so 7 | 8 | account required pam_unix.so 9 | account sufficient pam_localuser.so 10 | account sufficient pam_succeed_if.so uid < 500 quiet 11 | account required pam_permit.so 12 | 13 | password optional pam_pkcs11.so 14 | 15 | session optional pam_keyinit.so revoke 16 | session required pam_limits.so 17 | -session optional pam_systemd.so 18 | session [success=1 default=ignore] pam_succeed_if.so service in crond quiet use_uid 19 | session required pam_unix.so -------------------------------------------------------------------------------- /data/generic/smartcard-auth_rhel7: -------------------------------------------------------------------------------- 1 | #%PAM-1.0 2 | # This file is auto-generated. 3 | # User changes will be destroyed the next time authconfig is run. 4 | auth required pam_env.so 5 | auth [success=done ignore=ignore default=die] pam_pkcs11.so nodebug wait_for_card 6 | auth required pam_deny.so 7 | 8 | account required pam_unix.so 9 | account sufficient pam_localuser.so 10 | account sufficient pam_succeed_if.so uid < 1000 quiet 11 | account required pam_permit.so 12 | 13 | password required pam_pkcs11.so 14 | 15 | session optional pam_keyinit.so revoke 16 | session required pam_limits.so 17 | -session optional pam_systemd.so 18 | session [success=1 default=ignore] pam_succeed_if.so service in crond quiet use_uid 19 | session required pam_unix.so 20 | -------------------------------------------------------------------------------- /data/generic/smartcard-auth_rhel8: -------------------------------------------------------------------------------- 1 | #%PAM-1.0 2 | # This file is auto-generated. 3 | # User changes will be destroyed the next time authselect is run. 4 | auth required pam_env.so 5 | auth [success=done ignore=ignore default=die] pam_pkcs11.so wait_for_card 6 | auth required pam_deny.so 7 | 8 | account required pam_unix.so 9 | account sufficient pam_localuser.so 10 | account sufficient pam_succeed_if.so uid < 500 quiet 11 | account required pam_permit.so 12 | 13 | password optional pam_pkcs11.so 14 | 15 | session optional pam_keyinit.so revoke 16 | session required pam_limits.so 17 | -session optional pam_systemd.so 18 | session [success=1 default=ignore] pam_succeed_if.so service in crond quiet use_uid 19 | session required pam_unix.so -------------------------------------------------------------------------------- /data/generic/smartcard-auth_rhel8.10: -------------------------------------------------------------------------------- 1 | #%PAM-1.0 2 | # This file is auto-generated. 3 | # User changes will be destroyed the next time authselect is run. 4 | auth sufficient pam_sss.so allow_missing_name 5 | -------------------------------------------------------------------------------- /data/generic/smartcard-auth_rhel9: -------------------------------------------------------------------------------- 1 | #%PAM-1.0 2 | # This file is auto-generated. 3 | # User changes will be destroyed the next time authselect is run. 4 | auth required pam_env.so 5 | auth [success=done ignore=ignore default=die] pam_pkcs11.so wait_for_card 6 | auth required pam_deny.so 7 | 8 | account required pam_unix.so 9 | account sufficient pam_localuser.so 10 | account sufficient pam_succeed_if.so uid < 500 quiet 11 | account required pam_permit.so 12 | 13 | password optional pam_pkcs11.so 14 | 15 | session optional pam_keyinit.so revoke 16 | session required pam_limits.so 17 | -session optional pam_systemd.so 18 | session [success=1 default=ignore] pam_succeed_if.so service in crond quiet use_uid 19 | session required pam_unix.so -------------------------------------------------------------------------------- /data/generic/system-auth_rhel10: -------------------------------------------------------------------------------- 1 | #%PAM-1.0 2 | # This file is auto-generated. 3 | # User changes will be destroyed the next time authselect is run. 4 | auth required pam_env.so 5 | auth sufficient pam_unix.so try_first_pass nullok 6 | auth required pam_deny.so 7 | 8 | account required pam_unix.so 9 | 10 | password requisite pam_pwquality.so try_first_pass local_users_only retry=3 authtok_type= 11 | password sufficient pam_unix.so try_first_pass use_authtok nullok sha512 shadow 12 | password required pam_deny.so 13 | 14 | session optional pam_keyinit.so revoke 15 | session required pam_limits.so 16 | -session optional pam_systemd.so 17 | session [success=1 default=ignore] pam_succeed_if.so service in crond quiet use_uid 18 | session required pam_unix.so -------------------------------------------------------------------------------- /data/generic/system-auth_rhel7: -------------------------------------------------------------------------------- 1 | #%PAM-1.0 2 | # This file is auto-generated. 3 | # User changes will be destroyed the next time authconfig is run. 4 | auth required pam_env.so 5 | auth required pam_faildelay.so delay=2000000 6 | auth sufficient pam_unix.so nullok try_first_pass 7 | auth requisite pam_succeed_if.so uid >= 1000 quiet_success 8 | auth required pam_deny.so 9 | 10 | account required pam_unix.so 11 | account sufficient pam_localuser.so 12 | account sufficient pam_succeed_if.so uid < 1000 quiet 13 | account required pam_permit.so 14 | 15 | password requisite pam_pwquality.so try_first_pass local_users_only retry=3 authtok_type= 16 | password sufficient pam_unix.so sha512 shadow nullok try_first_pass use_authtok 17 | password required pam_deny.so 18 | 19 | session optional pam_keyinit.so revoke 20 | session required pam_limits.so 21 | -session optional pam_systemd.so 22 | session [success=1 default=ignore] pam_succeed_if.so service in crond quiet use_uid 23 | session required pam_unix.so 24 | -------------------------------------------------------------------------------- /data/generic/system-auth_rhel8: -------------------------------------------------------------------------------- 1 | #%PAM-1.0 2 | # This file is auto-generated. 3 | # User changes will be destroyed the next time authselect is run. 4 | auth required pam_env.so 5 | auth sufficient pam_unix.so try_first_pass nullok 6 | auth required pam_deny.so 7 | 8 | account required pam_unix.so 9 | 10 | password requisite pam_pwquality.so try_first_pass local_users_only retry=3 authtok_type= 11 | password sufficient pam_unix.so try_first_pass use_authtok nullok sha512 shadow 12 | password required pam_deny.so 13 | 14 | session optional pam_keyinit.so revoke 15 | session required pam_limits.so 16 | -session optional pam_systemd.so 17 | session [success=1 default=ignore] pam_succeed_if.so service in crond quiet use_uid 18 | session required pam_unix.so -------------------------------------------------------------------------------- /data/generic/system-auth_rhel9: -------------------------------------------------------------------------------- 1 | #%PAM-1.0 2 | # This file is auto-generated. 3 | # User changes will be destroyed the next time authselect is run. 4 | auth required pam_env.so 5 | auth sufficient pam_unix.so try_first_pass nullok 6 | auth required pam_deny.so 7 | 8 | account required pam_unix.so 9 | 10 | password requisite pam_pwquality.so try_first_pass local_users_only retry=3 authtok_type= 11 | password sufficient pam_unix.so try_first_pass use_authtok nullok sha512 shadow 12 | password required pam_deny.so 13 | 14 | session optional pam_keyinit.so revoke 15 | session required pam_limits.so 16 | -session optional pam_systemd.so 17 | session [success=1 default=ignore] pam_succeed_if.so service in crond quiet use_uid 18 | session required pam_unix.so -------------------------------------------------------------------------------- /lib/aws_lib.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | 4 | def get_aws_instance_identity_from_web(host): 5 | instance_document_url = 'http://169.254.169.254/latest/dynamic/instance-identity/document' 6 | return json.loads(host.check_output(f'curl -s {instance_document_url}')) 7 | 8 | 9 | def is_rhel_aws_stratosphere(host): 10 | instance_data = get_aws_instance_identity_from_web(host) 11 | 12 | return instance_data['billingProducts'] is None 13 | -------------------------------------------------------------------------------- /lib/config_lib.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import yaml 4 | 5 | 6 | class CIVConfig: 7 | config_path = 'civ_config.yaml' 8 | command_line_args = {} 9 | 10 | config_file_arg_name = 'config_file' 11 | 12 | def __init__(self, args_dict: dict = None): 13 | if args_dict is None: 14 | return 15 | 16 | if self.config_file_arg_name in args_dict and \ 17 | args_dict[self.config_file_arg_name] is not None: 18 | self.config_path = args_dict[self.config_file_arg_name] 19 | elif os.path.exists(self.config_path): 20 | os.system(f'rm -f {self.config_path}') 21 | 22 | self.command_line_args = args_dict 23 | 24 | def validate_config(self): 25 | with open(self.config_path) as config_file: 26 | try: 27 | config = yaml.safe_load(config_file) 28 | except Exception as e: 29 | print('ERROR: Failed to load the config yaml, please check the syntax.') 30 | print(e) 31 | exit(1) 32 | 33 | assert 'resources_file' in config.keys(), 'ERROR: Please provide a resources file' 34 | assert 'output_file' in config.keys(), 'ERROR: Please provide an output path' 35 | 36 | def write_config(self, config_to_write): 37 | with open(self.config_path, 'w+') as config_file: 38 | yaml.dump(config_to_write, config_file) 39 | 40 | def update_config(self): 41 | config = self.get_default_config() 42 | 43 | if os.path.exists(self.config_path): 44 | config.update(self.get_config()) 45 | 46 | self.__override_config_from_cmd_line_arg(config) 47 | 48 | self.write_config(config) 49 | 50 | def __override_config_from_cmd_line_arg(self, config): 51 | if len(self.command_line_args) == 1 and \ 52 | self.config_file_arg_name in self.command_line_args: 53 | return 54 | 55 | self.command_line_args.pop(self.config_file_arg_name) 56 | 57 | for arg_name, arg_value in self.command_line_args.items(): 58 | if arg_name not in config: 59 | config[arg_name] = arg_value 60 | 61 | if arg_value == config[arg_name] or arg_value is None: 62 | continue 63 | 64 | print(f'Overriding "{arg_name}" config item with command-line argument value...') 65 | 66 | if arg_name == 'tags': 67 | config[arg_name] = self.get_tags_dict_from_command_line_arg_value(arg_value) 68 | continue 69 | 70 | config[arg_name] = arg_value 71 | 72 | def get_tags_dict_from_command_line_arg_value(self, tags_arg_value): 73 | tags_dict = {} 74 | 75 | tags_list = tags_arg_value.split(',') 76 | 77 | for t in tags_list: 78 | tag_data = t.split(':') 79 | tags_dict[tag_data[0].strip()] = tag_data[1].strip() 80 | 81 | return tags_dict 82 | 83 | def get_config(self): 84 | with open(self.config_path) as config_file: 85 | config = yaml.safe_load(config_file) 86 | 87 | return config 88 | 89 | def get_default_config(self): 90 | config_defaults = { 91 | 'resources_file': None, 92 | 'output_file': None, 93 | 'environment': 'local', 94 | 'tags': None, 95 | 'debug': False, 96 | 'include_markers': None, 97 | 'parallel': False, 98 | 'stop_cleanup': None, 99 | 'test_filter': None, 100 | 'test_suites': None, 101 | 'instances_json': '/tmp/instances.json', 102 | 'ssh_identity_file': '/tmp/ssh_key', 103 | 'ssh_pub_key_file': '/tmp/ssh_key.pub', 104 | 'ssh_config_file': '/tmp/ssh_config' 105 | } 106 | 107 | return config_defaults 108 | 109 | def export_config_as_env_vars(self): 110 | config = self.get_config() 111 | 112 | for key in config.keys(): 113 | composed_env_var_name = f'CIV_{key}'.upper() 114 | os.environ[composed_env_var_name] = self.__get_config_value_as_string(config, key) 115 | 116 | def __get_config_value_as_string(self, config, config_key): 117 | if config_key not in config: 118 | raise ValueError(f'Invalid config key. The key "{config_key}" does not exist in current CIV config.') 119 | 120 | config_value = config[config_key] 121 | if type(config_value) is dict: 122 | config_value = ','.join([f'{k}={v}' for k, v in config_value.items()]) 123 | elif type(config_value) is list: 124 | config_value = ','.join(config_value) 125 | 126 | return str(config_value) 127 | -------------------------------------------------------------------------------- /lib/console_lib.py: -------------------------------------------------------------------------------- 1 | def color_print(text): 2 | print('\033[95m' + text + '\033[0m') 3 | 4 | 5 | def print_divider(text, upper=True, lower=True, center_text=True, length=75): 6 | free_spaces = int((length / 2) - (len(text) / 2)) if center_text else 0 7 | 8 | if upper: 9 | color_print('-' * length) 10 | 11 | color_print(' ' * free_spaces + text) 12 | 13 | if lower: 14 | color_print('-' * length) 15 | 16 | 17 | def print_debug(vars): 18 | vars_string = '\n----------DEBUG----------' 19 | for key, value in vars.items(): 20 | vars_string += f'\n - {key} = {value}' 21 | vars_string += '\n----------DEBUG----------' 22 | return vars_string 23 | -------------------------------------------------------------------------------- /lib/ssh_lib.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | import sshconf 4 | 5 | from threading import Thread 6 | 7 | from cryptography.hazmat.primitives.asymmetric import rsa 8 | from cryptography.hazmat.primitives import serialization 9 | 10 | 11 | def generate_ssh_key_pair(identity_file): 12 | """ 13 | Generates an SSH key pair and writes them to the specified identity file. 14 | 15 | :param identity_file: The path where the private key will be written. 16 | :return: A tuple containing the paths of the private and public keys. 17 | """ 18 | # Generate private key 19 | private_key = rsa.generate_private_key( 20 | public_exponent=65537, 21 | key_size=2048, 22 | ) 23 | 24 | # Write private key to file in PEM format 25 | with open(identity_file, 'wb') as f: 26 | f.write(private_key.private_bytes( 27 | encoding=serialization.Encoding.PEM, 28 | format=serialization.PrivateFormat.TraditionalOpenSSL, 29 | encryption_algorithm=serialization.NoEncryption() 30 | )) 31 | 32 | os.chmod(identity_file, 0o600) 33 | 34 | # Generate public key 35 | public_key = private_key.public_key() 36 | 37 | # Write public key to file in OpenSSH format 38 | with open(identity_file + '.pub', 'wb') as f: 39 | f.write(public_key.public_bytes( 40 | encoding=serialization.Encoding.OpenSSH, 41 | format=serialization.PublicFormat.OpenSSH 42 | )) 43 | 44 | print(f"Generated SSH keys: {identity_file} and {identity_file}.pub") 45 | return identity_file, identity_file + '.pub' 46 | 47 | 48 | def generate_instances_ssh_config(ssh_key_path, ssh_config_file, instances): 49 | if os.path.exists(ssh_config_file): 50 | os.remove(ssh_config_file) 51 | 52 | conf = sshconf.empty_ssh_config_file() 53 | for inst in instances.values(): 54 | conf.add( 55 | inst["address"], 56 | Hostname=inst["address"], 57 | User=inst["username"], 58 | Port=22, 59 | IdentityFile=ssh_key_path, 60 | StrictHostKeyChecking="no", 61 | UserKnownHostsFile="/dev/null", 62 | LogLevel="ERROR", 63 | ConnectTimeout=30, 64 | ConnectionAttempts=5, 65 | ) 66 | 67 | conf.write(ssh_config_file) 68 | 69 | 70 | def wait_for_host_ssh_up(host_address, timeout_seconds): 71 | """ 72 | Check if a given host is ready for SSH connection within a given number of seconds 73 | :param host_address: Host public DNS or IP address. 74 | :param timeout_seconds: The maximum number of seconds to check for SSH availability. 75 | :return: None 76 | """ 77 | start_time = time.time() 78 | while time.time() < start_time + timeout_seconds: 79 | tick = time.time() 80 | if (os.system(f'ssh-keyscan "{host_address}" > /dev/null 2>&1') >> 8) == 0: 81 | print(f"{host_address} SSH is up! ({time.time() - start_time} seconds)") 82 | return 83 | else: 84 | time_diff_seconds = int(time.time() - tick) 85 | time.sleep(max(0, (1 - time_diff_seconds))) 86 | 87 | print( 88 | f"Timeout while waiting for {host_address} to be SSH-ready ({timeout_seconds} seconds)." 89 | ) 90 | print("AWS: Check if this account has the appropiate inbound rules for this region") 91 | exit(1) 92 | 93 | 94 | def copy_file_to_host(host, local_file_path, destination_path): 95 | """ 96 | Copies a local file to the remote host, in a given destination path. 97 | This test only works with testinfra's Paramiko backend. 98 | :param host: The host object from the pytest test case. 99 | :param local_file_path: The path of the local file that will be copied. 100 | :param destination_path: The destination path where the local file will be placed in the remote host. 101 | :return: None 102 | """ 103 | sftp = host.backend.client.open_sftp() 104 | 105 | sftp.put(local_file_path, destination_path) 106 | sftp.close() 107 | 108 | 109 | def add_ssh_keys_to_instances(instances, ssh_config_file): 110 | team_ssh_keys = __get_team_ssh_keys_by_path() 111 | 112 | print(f'Team public SSH key(s) to copy: {", ".join(list(team_ssh_keys.keys()))}') 113 | 114 | threads = [] 115 | for inst in instances.values(): 116 | t = Thread( 117 | target=__copy_team_ssh_keys_to_instance, 118 | args=[inst, ssh_config_file, team_ssh_keys], 119 | ) 120 | t.start() 121 | threads.append(t) 122 | 123 | [t.join() for t in threads] 124 | 125 | 126 | def __get_team_ssh_keys_by_path(): 127 | keys_dir = "schutzbot/team_ssh_keys" 128 | 129 | keys = {} 130 | for p in os.listdir(keys_dir): 131 | key_file_path = os.path.join(keys_dir, p) 132 | with open(key_file_path, "r") as f: 133 | keys[key_file_path] = f.read() 134 | 135 | return keys 136 | 137 | 138 | def __copy_team_ssh_keys_to_instance(instance, ssh_config_file, team_ssh_keys): 139 | auth_keys = "~/.ssh/authorized_keys" 140 | instance_address = instance["address"] 141 | username = instance["username"] 142 | 143 | composed_echo_command = ";".join( 144 | [f'echo "{k}" >> {auth_keys}' for k in team_ssh_keys.values()] 145 | ) 146 | 147 | ssh_command = ( 148 | f'ssh -F "{ssh_config_file}" ' 149 | f'{username}@{instance_address} "{composed_echo_command}" > /dev/null 2>&1' 150 | ) 151 | 152 | if (os.system(ssh_command) >> 8) == 0: 153 | print(f"[{instance_address}] Public SSH key(s) copied successfully!") 154 | else: 155 | print(f"[{instance_address}] WARNING: Could not copy public SSH key(s)") 156 | -------------------------------------------------------------------------------- /lib/test_lib.py: -------------------------------------------------------------------------------- 1 | from os import path 2 | import re 3 | import time 4 | 5 | from lib import ssh_lib, console_lib 6 | 7 | 8 | def is_rhel_atomic_host(host): 9 | with host.sudo(): 10 | return host.file('/etc/redhat-release').contains('Atomic') 11 | 12 | 13 | def is_rhel_sap(host): 14 | return __test_keyword_in_repositories(host, 'sap-') 15 | 16 | 17 | def is_rhel_high_availability(host): 18 | rhui_pkg = str(host.run('rpm -qa | grep rhui').stdout) 19 | return re.search('rhui-(?!sap).*ha', rhui_pkg) 20 | 21 | 22 | def __test_keyword_in_repositories(host, keyword): 23 | with host.sudo(): 24 | if host.exists('yum'): 25 | return keyword in host.run('yum repolist 2>&1').stdout 26 | 27 | 28 | def run_local_script_in_host(host, script_relative_path): 29 | """ 30 | Runs a local script in the given remote host. 31 | To achieve this, the script is first copied to the remote host. 32 | :param host: The host object from the pytest test case. 33 | :param script_relative_path: Relative file path of the script to run (from project's root dir) 34 | :return: testinfra.backend.base.CommandResult containing: command, rc, stdout, stderr 35 | """ 36 | script_remote_path = f'/tmp/{path.basename(script_relative_path)}' 37 | 38 | ssh_lib.copy_file_to_host(host, script_relative_path, script_remote_path) 39 | 40 | with host.sudo(): 41 | host.run_test(f'chmod +x "{script_remote_path}"') 42 | return host.run(script_remote_path) 43 | 44 | 45 | def reboot_host(host, max_timeout=120): 46 | """ 47 | Reboots the given testinfra's host and uses a timeout to check when the host shh is up again. 48 | :param host: The testinfra host object, from pytest test case 49 | :param max_timeout: timeout in seconds, as a limit for the reboot to finish (until the ssh connection is ready) 50 | :return: A new testinfra host object, which will allow a successful reconnection via ssh 51 | """ 52 | last_boot_count_cmd = 'last reboot | grep "system boot" | wc -l' 53 | reboot_count = int(host.check_output(last_boot_count_cmd)) 54 | 55 | hostname = host.backend.hostname 56 | username = host.user().name 57 | 58 | print('Rebooting...') 59 | with host.sudo(): 60 | result = host.run('shutdown -r now') 61 | 62 | time.sleep(10) 63 | 64 | ssh_lib.wait_for_host_ssh_up(hostname, max_timeout) 65 | 66 | new_host = host.get_host(f'paramiko://{username}@{hostname}', 67 | ssh_config=host.backend.ssh_config) 68 | 69 | # If reboot_count is the same after reboot attempt, the system did not reboot 70 | if int(new_host.check_output(last_boot_count_cmd)) == reboot_count: 71 | raise Exception(f'Failed to reboot instance.\n' 72 | f'\tstatus:\t{result.rc}\n' 73 | f'\tstdout:\t{result.stdout}\n' 74 | f'\tstderr:\t{result.stderr}') 75 | 76 | return new_host 77 | 78 | 79 | def get_host_last_boot_time(host): 80 | """ 81 | Get system boot time via "systemd-analyze" 82 | :param host: The testinfra host object, from pytest test case 83 | :return: (float) Boot time, in seconds 84 | """ 85 | timeout_seconds = 60 86 | 87 | with host.sudo(): 88 | start_time = time.time() 89 | while time.time() < start_time + timeout_seconds: 90 | systemd_analyze_result = host.run('systemd-analyze') 91 | if systemd_analyze_result.exit_status == 0 and \ 92 | 'Startup finished' in systemd_analyze_result.stdout: 93 | break 94 | time.sleep(5) 95 | 96 | print(host.run('systemd-analyze blame').stdout, end='\n-------\n') 97 | print(host.run('systemd-analyze').stdout) 98 | 99 | boot_time_string = re.findall('Startup finished .* = (.*)s', 100 | systemd_analyze_result.stdout)[0] 101 | 102 | if 'min' in boot_time_string: 103 | boot_time_data = re.match(r'(\d+)min (\d+.\d+)', boot_time_string) 104 | 105 | if boot_time_data: 106 | boot_time_data = boot_time_data.groups() 107 | minutes = float(boot_time_data[0]) 108 | seconds = float(boot_time_data[1]) 109 | 110 | if seconds > 60: 111 | # This means it's miliseconds, not seconds 112 | seconds /= 1000 113 | 114 | boot_time = (minutes * 60) + seconds 115 | else: 116 | raise Exception(f'Could not obtain boot time from systemd-analyze output: {boot_time_string}') 117 | else: 118 | boot_time = float(boot_time_string) 119 | 120 | return float(boot_time) 121 | 122 | 123 | def compare_local_and_remote_file(host, 124 | local_file_path, 125 | remote_file_path, 126 | ignore_commented_lines=True, 127 | ignore_space_and_blank=True): 128 | tmp_path = f'/tmp/test_file_{time.time()}' 129 | 130 | diff_command = ['diff'] 131 | 132 | if ignore_space_and_blank: 133 | diff_command.append('-wB') 134 | 135 | if ignore_commented_lines: 136 | diff_command.append('-I "^#" -I "^ #"') 137 | 138 | diff_command.extend([remote_file_path, tmp_path]) 139 | 140 | ssh_lib.copy_file_to_host(host, local_file_path, tmp_path) 141 | 142 | with host.sudo(): 143 | if not host.file(remote_file_path).exists: 144 | raise FileNotFoundError(f'The remote file {remote_file_path} was not found') 145 | 146 | result = host.run(' '.join(diff_command)) 147 | print(result.stdout) 148 | 149 | host.run(f'rm -rf {tmp_path}') 150 | 151 | return result.exit_status == 0 152 | 153 | 154 | def filter_host_log_file_by_keywords(host, 155 | log_file, 156 | log_levels, 157 | keywords=None, 158 | exclude_mode=False): 159 | """ 160 | Filters a log file by log levels, and then by a list of keywords. 161 | If exclude_mode is set to True, the keywords will be used for inverted match. 162 | :param host: The host to connect to, from testinfra's module 163 | :param log_file: Path to the file that needs to be filtered 164 | :param log_levels: The log levels that need to be taken into account for filtering 165 | :param keywords: List of keywords to use as secondary filter, after filtering by log level 166 | :param exclude_mode: Whether to use inverted match with the keywords or not 167 | :return: String with all the log lines found, matching the criteria 168 | """ 169 | log_levels_regex = '|'.join(log_levels) 170 | 171 | if keywords is not None: 172 | keywords_regex = '|'.join(keywords) 173 | if exclude_mode: 174 | search_opt = '-vE' 175 | print('exclude_mode set to True. Keywords will be used for inverted match') 176 | else: 177 | search_opt = '-E' 178 | grep_filter_by_keyword = ' | grep {} "{}"'.format(search_opt, keywords_regex) 179 | else: 180 | grep_filter_by_keyword = '' 181 | 182 | print(f'Filtering {log_file} log file...') 183 | 184 | with host.sudo(): 185 | result = host.run('grep -iE "{}" "{}"{}'.format(log_levels_regex, 186 | log_file, 187 | grep_filter_by_keyword)) 188 | if result.rc == 0: 189 | print(f'Logs found:\n{result.stdout}') 190 | return result.stdout 191 | else: 192 | print('No logs found.') 193 | print(result.stderr) 194 | 195 | return None 196 | 197 | 198 | def print_host_command_output(host, command, capture_result=False, use_sudo=True): 199 | console_lib.print_divider(command) 200 | 201 | if use_sudo: 202 | with host.sudo(): 203 | result = host.run(command) 204 | else: 205 | result = host.run(command) 206 | 207 | if result.failed: 208 | print(f'Exit code: {result.exit_status}\n') 209 | print(f'Stdout:\n{result.stdout}\n') 210 | print(f'Stderr:\n{result.stderr}\n') 211 | else: 212 | print(result.stdout) 213 | 214 | if capture_result: 215 | return result 216 | -------------------------------------------------------------------------------- /main/civ_report_analyzer.py: -------------------------------------------------------------------------------- 1 | import json 2 | import re 3 | 4 | from argparse import ArgumentParser, RawTextHelpFormatter 5 | 6 | parser = ArgumentParser(formatter_class=RawTextHelpFormatter) 7 | 8 | parser.add_argument('-r', '--report-file', 9 | help='Specify the path of a JSON report file that resulted from a CIV test run.', 10 | required=True) 11 | parser.add_argument('-o', '--output-file', 12 | help='Specify the file path of the analysis file to be stored as plain text with the chosen format.', 13 | required=False) 14 | parser.add_argument('-f', '--format', 15 | help='(Optional) Specify in which format the analysis should be printed to stdout.\n' 16 | 'Supported values are:\n' 17 | '\t- cli: Outputs a cli-alike analysis\n' 18 | '\t- table: Outputs the analysis with rows in tabulated format\n' 19 | '\t- jira: Outputs the analysis formatted with Jira markup syntax\n', 20 | default='cli', 21 | required=False) 22 | 23 | spaced_indentation = ' ' * 8 24 | 25 | 26 | def get_failed_tests_analysis(data): 27 | test_results = data['tests'] 28 | 29 | analysis = {} 30 | for test in test_results: 31 | if test['outcome'] == 'failed': 32 | test_name = test['keywords'][0].split('[')[0] 33 | error_message = test['call']['crash']['message'].split('\n')[0] 34 | 35 | if test_name in analysis: 36 | if error_message in analysis[test_name]: 37 | analysis[test_name][error_message] += 1 38 | else: 39 | analysis[test_name][error_message] = 1 40 | else: 41 | analysis[test_name] = {error_message: 1} 42 | 43 | return analysis 44 | 45 | 46 | def get_formatted_analysis(report_data, format): 47 | summary = get_tests_summary(report_data) 48 | analysis = get_failed_tests_analysis(report_data) 49 | test_environment = report_data['environment'] 50 | 51 | if format == 'table': 52 | formatted_analysis = get_analysis_as_spreadsheet_table(summary, analysis, test_environment) 53 | elif format == 'jira': 54 | formatted_analysis = get_analysis_as_jira_markup(summary, analysis) 55 | else: 56 | formatted_analysis = get_analysis_as_cli(summary, analysis) 57 | 58 | return '\n'.join(formatted_analysis) 59 | 60 | 61 | def get_tests_summary(data): 62 | summary_data = data['summary'] 63 | 64 | passed_total = summary_data['passed'] if 'passed' in summary_data else 0 65 | failed_total = summary_data['failed'] if 'failed' in summary_data else 0 66 | 67 | failed_and_passed_total = passed_total + failed_total 68 | 69 | success_ratio = round((passed_total * 100 / failed_and_passed_total), 2) 70 | 71 | return { 72 | 'passed_total': passed_total, 73 | 'failed_total': failed_total, 74 | 'failed_and_passed_total': failed_and_passed_total, 75 | 'success_ratio': success_ratio 76 | } 77 | 78 | 79 | def get_analysis_as_cli(summary, analysis): 80 | summary_lines = [ 81 | '-' * 100, 82 | f"Total passed:\t{summary['passed_total']}", 83 | f"Total failed:\t{summary['failed_total']}", 84 | f"Success ratio:\t{summary['success_ratio']}%", 85 | '-' * 100 86 | ] 87 | 88 | rows = summary_lines 89 | 90 | for test_case, error_data in analysis.items(): 91 | for err_msg, count in error_data.items(): 92 | rows.append(f'{test_case} - {count} time(s):') 93 | rows.append('\t' + __parse_error_message(err_msg).replace('\n', f'\n{spaced_indentation}')) 94 | 95 | rows.append('-' * 100) 96 | 97 | return rows 98 | 99 | 100 | def __parse_error_message(error_message): 101 | max_lenght = 1000 102 | 103 | regex_error_generic = re.compile(r'(?:(?:AssertionError|Failed): (.*))') 104 | regex_error_command = re.compile( 105 | r"Unexpected exit code \d+ for CommandResult\(command=b?(?P['|\"]?.*['|\"]?), " 106 | r"exit_status=(?P\d+), stdout=b?(?P['|\"]?.*['|\"]?), " 107 | r"stderr=b?(?P['|\"]?.*['|\"]?)\)" 108 | ) 109 | 110 | extracted_message = error_message 111 | 112 | result = re.findall(regex_error_generic, error_message) 113 | if result: 114 | extracted_message = result[0] 115 | 116 | result = re.match(regex_error_command, extracted_message) 117 | if result: 118 | error_details = result.groupdict() 119 | 120 | composed_error_message = [] 121 | for key, value in error_details.items(): 122 | formatted_value = value.replace(r'\n\n', '\n') 123 | formatted_value = formatted_value.replace(r"\n", "\n") 124 | formatted_value = formatted_value.replace("\n\"", "\"") 125 | formatted_value = formatted_value.replace("\n'", "\'") 126 | formatted_value = formatted_value.replace("\"", '\"\"') 127 | 128 | composed_error_message.append(f'{key}: {formatted_value.strip()}') 129 | 130 | extracted_message = '\n\n'.join(composed_error_message) 131 | 132 | diff = len(extracted_message) - max_lenght 133 | if diff > 0: 134 | chunk_size = max_lenght // 2 135 | extracted_message = (extracted_message[0:chunk_size] + ' [] ' + extracted_message[-chunk_size:-1]) 136 | 137 | return extracted_message 138 | 139 | 140 | def get_analysis_as_jira_markup(summary, analysis): 141 | summary_lines = [ 142 | '-' * 4, 143 | f"Total passed:\t{summary['passed_total']}", 144 | f"Total failed:\t{summary['failed_total']}", 145 | f"Success ratio:\t{summary['success_ratio']}%", 146 | '-' * 4 147 | ] 148 | 149 | rows = summary_lines 150 | 151 | for test_case, error_data in analysis.items(): 152 | for err_msg, count in error_data.items(): 153 | rows.append( 154 | f'h4. {test_case} - {count} failure(s): ' + '{code:java}' + __parse_error_message(err_msg) + '{code}' 155 | ) 156 | 157 | return rows 158 | 159 | 160 | def get_analysis_as_spreadsheet_table(summary, analysis, test_environment): 161 | default_test_owner = 'Jenkins' 162 | default_status = 'Not Started' 163 | default_rerun_value = 'FALSE' 164 | default_delimiter = '\t' 165 | 166 | jenkins_url = 'Jenkins Report' 167 | if 'BUILD_URL' in test_environment: 168 | jenkins_url = '=HYPERLINK("{0}/Report", "{1}")'.format(test_environment['BUILD_URL'], 'Jenkins Report') 169 | 170 | summary_lines = [ 171 | "\t".join(['Total passed:', f"{summary['passed_total']}", '', '', '', 'Pub Task']), 172 | "\t".join(['Total failed:', str(summary['failed_total']), '', '', 'Jenkins Report (rerun)', jenkins_url]), 173 | "\t".join(['Success ratio:', f"{summary['success_ratio']}%"]) 174 | ] 175 | 176 | rows = summary_lines 177 | 178 | for test_case, error_data in analysis.items(): 179 | for err_msg, count in error_data.items(): 180 | formatted_err_msg = __parse_error_message(err_msg) 181 | 182 | if '\n' in formatted_err_msg: 183 | formatted_err_msg.replace("\"", '\"\"') 184 | formatted_err_msg = f"\"{formatted_err_msg}\"" 185 | 186 | row_details = [ 187 | test_case, 188 | default_test_owner, 189 | default_status, 190 | str(count), 191 | default_rerun_value, 192 | formatted_err_msg 193 | ] 194 | 195 | rows.append(default_delimiter.join(row_details)) 196 | 197 | return rows 198 | 199 | 200 | if __name__ == '__main__': 201 | args = parser.parse_args() 202 | 203 | with open(args.report_file) as f: 204 | report_data = json.load(f) 205 | 206 | if 'failed' not in report_data['summary']: 207 | print('Congratulations! No test failures found.') 208 | exit(0) 209 | 210 | formatted_analysis = get_formatted_analysis(report_data, format=args.format) 211 | 212 | print(formatted_analysis) 213 | 214 | if args.output_file: 215 | with open(args.output_file, 'w') as f: 216 | f.write(formatted_analysis) 217 | -------------------------------------------------------------------------------- /main/cloud_image_validator.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import traceback 4 | 5 | from pprint import pprint 6 | from cloud.opentofu.opentofu_controller import OpenTofuController 7 | from cloud.opentofu.opentofu_configurator import OpenTofuConfigurator 8 | from lib import ssh_lib 9 | from lib import console_lib 10 | from test_suite.suite_runner import SuiteRunner 11 | 12 | 13 | class CloudImageValidator: 14 | infra_controller = None 15 | infra_configurator = None 16 | 17 | infra_error_exit_code = 100 18 | 19 | def __init__(self, config): 20 | self.config = config 21 | 22 | def main(self): 23 | exit_code = 0 24 | instances = None 25 | 26 | try: 27 | console_lib.print_divider('Initializing infrastructure') 28 | self.infra_controller = self.initialize_infrastructure() 29 | 30 | console_lib.print_divider('Deploying infrastructure') 31 | instances = self.deploy_infrastructure() 32 | 33 | console_lib.print_divider('Preparing environment') 34 | self.prepare_environment(instances) 35 | 36 | console_lib.print_divider('Running tests') 37 | wait_status = self.run_tests_in_all_instances(instances) 38 | 39 | exit_code = wait_status >> 8 40 | 41 | except Exception: 42 | traceback.print_exc() 43 | exit_code = self.infra_error_exit_code 44 | 45 | finally: 46 | if self.config['stop_cleanup']: 47 | if self.config['environment'] == 'local': 48 | self.print_ssh_commands_for_instances(instances) 49 | input('Press ENTER to proceed with cleanup:') 50 | elif self.config["environment"] == 'automated': 51 | console_lib.print_divider('Skipping cleanup') 52 | return exit_code 53 | else: 54 | print('ERROR: --environment parameter should be either "local" or "automated"') 55 | exit_code = self.infra_error_exit_code 56 | 57 | console_lib.print_divider('Cleanup') 58 | self.cleanup() 59 | 60 | return exit_code 61 | 62 | def print_ssh_commands_for_instances(self, instances): 63 | if instances: 64 | for inst in instances.values(): 65 | ssh_command = 'ssh -i {0} {1}@{2}'.format(self.config['ssh_identity_file'], 66 | inst['username'], 67 | inst['address']) 68 | instance_name = inst['name'] 69 | print(f'{instance_name}:') 70 | print(f'\t{ssh_command}') 71 | 72 | def initialize_infrastructure(self): 73 | ssh_lib.generate_ssh_key_pair(self.config['ssh_identity_file']) 74 | 75 | self.infra_configurator = OpenTofuConfigurator(ssh_key_path=self.config['ssh_pub_key_file'], 76 | resources_path=self.config['resources_file'], 77 | config=self.config) 78 | self.infra_configurator.configure_from_resources_json() 79 | 80 | if self.config['debug']: 81 | self.infra_configurator.print_configuration() 82 | 83 | return OpenTofuController(self.infra_configurator, self.config['debug']) 84 | 85 | def deploy_infrastructure(self): 86 | self.infra_controller.create_infra() 87 | instances = self.infra_controller.get_instances() 88 | 89 | if self.config['debug']: 90 | pprint(instances) 91 | 92 | self._write_instances_to_json(instances) 93 | 94 | ssh_lib.generate_instances_ssh_config(instances=instances, 95 | ssh_config_file=self.config['ssh_config_file'], 96 | ssh_key_path=self.config['ssh_identity_file']) 97 | 98 | return instances 99 | 100 | def _write_instances_to_json(self, instances): 101 | with open(self.config['instances_json'], 'w') as file: 102 | json.dump(instances, file, indent=4) 103 | 104 | def prepare_environment(self, instances): 105 | print('Copying team SSH public keys in the running instance(s)...') 106 | ssh_lib.add_ssh_keys_to_instances(instances, self.config['ssh_config_file']) 107 | 108 | def run_tests_in_all_instances(self, instances): 109 | runner = SuiteRunner(cloud_provider=self.infra_configurator.cloud_name, 110 | instances=instances, 111 | ssh_config=self.config['ssh_config_file'], 112 | parallel=self.config['parallel'], 113 | debug=self.config['debug']) 114 | 115 | return runner.run_tests(self.config['test_suites'], 116 | self.config['output_file'], 117 | self.config['test_filter'], 118 | self.config['include_markers']) 119 | 120 | def cleanup(self): 121 | self.infra_controller.destroy_infra() 122 | 123 | if not self.config['debug']: 124 | os.remove(self.config['ssh_identity_file']) 125 | os.remove(self.config['ssh_pub_key_file']) 126 | os.remove(self.config['ssh_config_file']) 127 | os.remove(self.config['instances_json']) 128 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | render_collapsed = all 3 | markers = 4 | pub: Tests that have to be run on images that are already published into cloud providers marketplaces. 5 | wait: Number of seconds the test should wait before it starts. 6 | run_on: List of distro-version (or only distro) combinations where the test should be executed. 7 | exclude_on: List of distro-version (or only distro) combinations where the test should NOT be executed. 8 | jira_skip: List of Jira ticker ids. If any of these tickets are not closed, the test will be skipped. 9 | package: Specified whether a test case is related to packages only. 10 | cut: Related to Components Upgrade Testing phase during RHEL development. 11 | cut: Related to Comprehensive Tests Cycle (1 and 2) phases during RHEL development. 12 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | PyYAML==6.0.2 2 | pytest~=8.3.2 3 | pytest-mock==3.14.0 4 | pytest-cov==5.0.0 5 | pytest-testinfra==10.1.1 6 | pytest-order==1.2.1 7 | pytest-xdist==3.6.1 8 | pytest-rerunfailures==14.0 9 | pytest-html==4.1.1 10 | pytest-json-report==1.5.0 11 | sshconf==0.2.7 12 | flake8==7.1.1 13 | argparse==1.4.0 14 | py==1.11.0 15 | paramiko==3.4.1 16 | requests==2.32.3 17 | packaging==24.1 18 | -------------------------------------------------------------------------------- /result/reporter.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | class Reporter: 5 | def __init__(self, junit_report_path): 6 | self.report_path = junit_report_path 7 | 8 | def generate_html_report(self, destination_path): 9 | os.system(f'junit2html {self.report_path} --report-matrix {destination_path}') 10 | print(f'HTML report generated: {destination_path}') 11 | -------------------------------------------------------------------------------- /schutzbot/append_team_ssh_keys.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | for pub_key_file in schutzbot/team_ssh_keys/*.pub; do 4 | cat "$pub_key_file" | tee -a ~/.ssh/authorized_keys > /dev/null 5 | done 6 | -------------------------------------------------------------------------------- /schutzbot/define-compose-url.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euxo pipefail 3 | source /etc/os-release 4 | 5 | # This isn't needed when not running on RHEL 6 | if [[ $ID != rhel ]]; then 7 | return 0 8 | fi 9 | 10 | # This section serves to define target repos for rhel-upgrade test 11 | # Need to check that it's actually a number passed as this script is 12 | # being sources from a script that already has arguments passed 13 | if [[ $# -gt 0 ]] && [[ "$1" =~ ^[0-9]+(\.[0-9]+)?$ ]]; then 14 | VERSION_ID=$1 15 | fi 16 | 17 | if [[ $ID == rhel && ${VERSION_ID%.*} == 9 ]]; then 18 | COMPOSE_ID=$(curl -L http://download.devel.redhat.com/rhel-9/nightly/RHEL-9/latest-RHEL-"${VERSION_ID}"/COMPOSE_ID) 19 | 20 | # default to a nightly tree but respect values passed from ENV so we can test rel-eng composes as well 21 | COMPOSE_URL="${COMPOSE_URL:-http://download.devel.redhat.com/rhel-9/nightly/RHEL-9/$COMPOSE_ID}" 22 | 23 | elif [[ $ID == rhel && ${VERSION_ID%.*} == 10 ]]; then 24 | COMPOSE_ID=$(curl -L http://download.devel.redhat.com/rhel-10/nightly/RHEL-10/latest-RHEL-"${VERSION_ID}"/COMPOSE_ID) 25 | 26 | # default to a nightly tree but respect values passed from ENV so we can test rel-eng composes as well 27 | COMPOSE_URL="${COMPOSE_URL:-http://download.devel.redhat.com/rhel-10/nightly/RHEL-10/$COMPOSE_ID}" 28 | fi 29 | 30 | # in case COMPOSE_URL was defined from the outside refresh COMPOSE_ID file, 31 | # used for slack messages in case of success/failure 32 | curl -L "$COMPOSE_URL/COMPOSE_ID" > COMPOSE_ID 33 | 34 | echo "INFO: Testing COMPOSE_ID=$COMPOSE_ID at COMPOSE_URL=$COMPOSE_URL" 35 | 36 | # Make sure the compose URL really exists 37 | RETURN_CODE=$(curl --silent -o -I -L -s -w "%{http_code}" "${COMPOSE_URL}") 38 | if [ "$RETURN_CODE" != 200 ] 39 | then 40 | echo "Compose URL $COMPOSE_URL returned error code $RETURN_CODE, exiting." 41 | exit 1 42 | fi -------------------------------------------------------------------------------- /schutzbot/deploy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euxo pipefail 3 | 4 | # The project whose -tests package is installed. 5 | # 6 | # If it is osbuild-composer (the default), it is pulled from the same 7 | # repository as the osbuild-composer under test. For all other projects, the 8 | # "dependants" key in Schutzfile is consulted to determine the repository to 9 | # pull the -test package from. 10 | PROJECT=${1:-osbuild-composer} 11 | 12 | # set locale to en_US.UTF-8 13 | sudo dnf install -y glibc-langpack-en 14 | sudo localectl set-locale LANG=en_US.UTF-8 15 | 16 | # Colorful output. 17 | function greenprint { 18 | echo -e "\033[1;32m[$(date -Isecond)] ${1}\033[0m" 19 | } 20 | 21 | function retry { 22 | local count=0 23 | local retries=5 24 | until "$@"; do 25 | exit=$? 26 | count=$((count + 1)) 27 | if [[ $count -lt $retries ]]; then 28 | echo "Retrying command..." 29 | sleep 1 30 | else 31 | echo "Command failed after ${retries} retries. Giving up." 32 | return $exit 33 | fi 34 | done 35 | return 0 36 | } 37 | 38 | function setup_repo { 39 | local project=$1 40 | local commit=$2 41 | local priority=${3:-10} 42 | 43 | local REPO_PATH=${project}/${DISTRO_VERSION}/${ARCH}/${commit} 44 | if [[ "${INTERNAL_NIGHTLY:=false}" == "internal" && "${project}" == "osbuild-composer" ]]; then 45 | REPO_PATH=nightly/${REPO_PATH} 46 | fi 47 | 48 | greenprint "Setting up dnf repository for ${project} ${commit}" 49 | sudo tee "/etc/yum.repos.d/${project}.repo" << EOF 50 | [${project}] 51 | name=${project} ${commit} 52 | baseurl=http://osbuild-composer-repos.s3-website.us-east-2.amazonaws.com/${REPO_PATH} 53 | enabled=1 54 | gpgcheck=0 55 | priority=${priority} 56 | EOF 57 | } 58 | 59 | function get_last_passed_commit { 60 | # Using 'internal' instead of 'true' so it's easier to see the pipelines in the Gitlab page 61 | if [ "${INTERNAL_NIGHTLY:=false}" == "internal" ]; then 62 | project_id="34771166" 63 | base_curl="curl --header PRIVATE-TOKEN:${GITLAB_API_TOKEN}" 64 | 65 | # To get the schedule id use the ../pipeline_schedule endpoint 66 | if [[ ${VERSION_ID%.*} == "9" ]]; then 67 | # RHEL 9 scheduled pipeline id 68 | schedule_id="233736" 69 | elif [[ ${VERSION_ID%.*} == "10" ]]; then 70 | # RHEL 10 scheduled pipeline id (FYI - it was used for RHEL 8 before) 71 | schedule_id="233735" 72 | else 73 | echo "No scheduled pipeline defined for RHEL $VERSION_ID" 74 | exit 1 75 | fi 76 | 77 | # Last executed pipeline ID 78 | pipeline_id=$(${base_curl} "https://gitlab.com/api/v4/projects/${project_id}/pipeline_schedules/${schedule_id}" | jq '.last_pipeline.id') 79 | 80 | number_of_days=7 81 | warning_date=$(date -d "- $number_of_days days" +%s) 82 | created_at=$(${base_curl} "https://gitlab.com/api/v4/projects/${project_id}/pipelines/${pipeline_id}" | jq -r '.started_at') 83 | if [[ $(date -d "${created_at}" +%s) -lt "${warning_date}" ]]; then 84 | echo "We are using an old scheduled pipeline id (more than $number_of_days days ago). Please update it" 85 | exit 1 86 | fi 87 | 88 | statuses=$(${base_curl} "https://gitlab.com/api/v4/projects/${project_id}/pipelines/${pipeline_id}/jobs?per_page=100" | jq -cr '.[] | select(.stage=="rpmbuild") | .status') 89 | for status in ${statuses}; do 90 | if [ "$status" == "failed" ]; then 91 | echo "Last nightly pipeline ('rpmbuild' stage) failed in osbuild-composer CI. We will not run nightly-internal jobs in CIV." 92 | exit 1 93 | fi 94 | done 95 | 96 | commit=$(${base_curl} "https://gitlab.com/api/v4/projects/${project_id}/pipelines/${pipeline_id}" | jq -r '.sha') 97 | echo $commit 98 | 99 | else 100 | commit_list=$(curl -u ${API_USER}:${API_PAT} -s https://api.github.com/repos/osbuild/osbuild-composer/commits?per_page=100 | jq -cr '.[].sha') 101 | 102 | for commit in ${commit_list}; do 103 | gitlab_status=$(curl -u ${API_USER}:${API_PAT} -s https://api.github.com/repos/osbuild/osbuild-composer/commits/${commit}/status \ 104 | | jq -cr '.statuses[] | select(.context == "Schutzbot on GitLab") | .state') 105 | if [[ ${gitlab_status} == "success" ]]; then 106 | break 107 | fi 108 | done 109 | echo $commit 110 | fi 111 | } 112 | 113 | # Get OS details. 114 | source ci/set-env-variables.sh 115 | 116 | if [[ $ID == "rhel" && ${VERSION_ID%.*} == "9" ]]; then 117 | # There's a bug in RHEL 9 that causes /tmp to be mounted on tmpfs. 118 | # Explicitly stop and mask the mount unit to prevent this. 119 | # Otherwise, the tests will randomly fail because we use /tmp quite a lot. 120 | # See https://bugzilla.redhat.com/show_bug.cgi?id=1959826 121 | greenprint "Disabling /tmp as tmpfs on RHEL 9" 122 | sudo systemctl stop tmp.mount && sudo systemctl mask tmp.mount 123 | fi 124 | 125 | if [[ $ID == "centos" && $VERSION_ID == "8" ]]; then 126 | # Workaround for https://bugzilla.redhat.com/show_bug.cgi?id=2065292 127 | # Remove when podman-4.0.2-2.el8 is in Centos 8 repositories 128 | greenprint "Updating libseccomp on Centos 8" 129 | sudo dnf upgrade -y libseccomp 130 | fi 131 | 132 | # Distro version that this script is running on. 133 | DISTRO_VERSION=${ID}-${VERSION_ID} 134 | 135 | if [[ "$ID" == rhel ]] && sudo subscription-manager status; then 136 | # If this script runs on subscribed RHEL, install content built using CDN 137 | # repositories. 138 | DISTRO_VERSION=rhel-${VERSION_ID%.*}-cdn 139 | 140 | # workaround for https://github.com/osbuild/osbuild/issues/717 141 | sudo subscription-manager config --rhsm.manage_repos=1 142 | fi 143 | 144 | greenprint "Enabling fastestmirror to speed up dnf 🏎️" 145 | echo -e "fastestmirror=1" | sudo tee -a /etc/dnf/dnf.conf 146 | 147 | # TODO: include this in the jenkins runner (and split test/target machines out) 148 | sudo dnf -y install jq 149 | 150 | # Get latest commit from osbuild-composer main branch 151 | GIT_COMMIT=$(get_last_passed_commit) 152 | 153 | setup_repo osbuild-composer "${GIT_COMMIT}" 5 154 | 155 | OSBUILD_GIT_COMMIT=$(cat Schutzfile | jq -r '.["'"${ID}-${VERSION_ID}"'"].dependencies.osbuild.commit') 156 | if [[ "${OSBUILD_GIT_COMMIT}" != "null" ]]; then 157 | setup_repo osbuild "${OSBUILD_GIT_COMMIT}" 10 158 | fi 159 | 160 | if [[ "$PROJECT" != "osbuild-composer" ]]; then 161 | PROJECT_COMMIT=$(jq -r ".[\"${ID}-${VERSION_ID}\"].dependants[\"${PROJECT}\"].commit" Schutzfile) 162 | setup_repo "${PROJECT}" "${PROJECT_COMMIT}" 10 163 | 164 | # Get a list of packages needed to be preinstalled before "${PROJECT}-tests". 165 | # Useful mainly for EPEL. 166 | PRE_INSTALL_PACKAGES=$(jq -r ".[\"${ID}-${VERSION_ID}\"].dependants[\"${PROJECT}\"].pre_install_packages[]?" Schutzfile) 167 | 168 | if [ "${PRE_INSTALL_PACKAGES}" ]; then 169 | # shellcheck disable=SC2086 # We need to pass multiple arguments here. 170 | sudo dnf -y install ${PRE_INSTALL_PACKAGES} 171 | fi 172 | fi 173 | 174 | if [ -f "rhel${VERSION_ID%.*}internal.repo" ]; then 175 | greenprint "Preparing repos for internal build testing" 176 | sudo mv rhel"${VERSION_ID%.*}"internal.repo /etc/yum.repos.d/ 177 | fi 178 | 179 | greenprint "Installing test packages for ${PROJECT}" 180 | 181 | # NOTE: WORKAROUND FOR DEPENDENCY BUG 182 | retry sudo dnf -y upgrade selinux-policy 183 | 184 | # Note: installing only -tests to catch missing dependencies 185 | retry sudo dnf -y install "${PROJECT}-tests" 186 | 187 | # Save osbuild-composer NVR to a file to be used as CI artifact 188 | rpm -q osbuild-composer > COMPOSER_NVR 189 | 190 | if [ "${INTERNAL_NIGHTLY:=false}" == "internal" ]; then 191 | # check if we've installed the osbuild-composer RPM from the nightly tree 192 | # under test or happen to install a newer version from one of the S3 repositories 193 | rpm -qi osbuild-composer 194 | if ! rpm -qi osbuild-composer | grep "Build Host" | grep "redhat.com"; then 195 | echo "ERROR: Installed osbuild-composer RPM is not the official one" 196 | exit 2 197 | else 198 | echo "INFO: Installed osbuild-composer RPM seems to be official" 199 | fi 200 | 201 | # cross-check the installed RPM against the one under COMPOSE_URL 202 | source schutzbot/define-compose-url.sh 203 | 204 | INSTALLED=$(rpm -q --qf "%{name}-%{version}-%{release}.%{arch}.rpm" osbuild-composer) 205 | RPM_URL="${COMPOSE_URL}/compose/AppStream/${ARCH}/os/Packages/${INSTALLED}" 206 | RETURN_CODE=$(curl --silent -o -I -L -s -w "%{http_code}" "${RPM_URL}") 207 | if [ "$RETURN_CODE" != 200 ]; then 208 | echo "ERROR: Installed ${INSTALLED} not found at ${RPM_URL}. Response was ${RETURN_CODE}" 209 | exit 3 210 | else 211 | echo "INFO: Installed ${INSTALLED} found at ${RPM_URL}, which matches SUT!" 212 | fi 213 | fi 214 | 215 | if [ -n "${CI}" ]; then 216 | # copy repo files b/c GitLab can't upload artifacts 217 | # which are outside the build directory 218 | cp /etc/yum.repos.d/*.repo "$(pwd)" 219 | fi -------------------------------------------------------------------------------- /schutzbot/get_civ_config.py: -------------------------------------------------------------------------------- 1 | """ 2 | This script exports checks which files and test methods have changed. If the only thing 3 | that changed in the PR are test methods, only execute those. 4 | 5 | To do so, it creates bash script with "SKIP_" variables and "CIV_CONFIG_FILE" that 6 | is a file with the configuration of CIV in yaml format. 7 | """ 8 | import os 9 | import subprocess 10 | import sys 11 | import yaml 12 | 13 | from pprint import pprint 14 | 15 | 16 | def get_files_changed(): 17 | os.system('git remote add upstream https://github.com/osbuild/cloud-image-val.git') 18 | os.system('git fetch upstream') 19 | files_changed_cmd = ['git', 'diff', '--name-only', 'HEAD', 'upstream/main'] 20 | files_changed_raw = subprocess.run(files_changed_cmd, stdout=subprocess.PIPE) 21 | 22 | if files_changed_raw.stdout == b'' or files_changed_raw.stderr is not None: 23 | print('ERROR: git diff command failed or there are no changes in the PR') 24 | exit() 25 | 26 | return str(files_changed_raw.stdout)[2:-3].split('\\n') 27 | 28 | 29 | def lines_into_list(file_name): 30 | list = [] 31 | with open(file_name, 'r') as diff: 32 | file_started = False 33 | 34 | # Skip the diff header 35 | for line in diff: 36 | if not file_started: 37 | if line[0:2] == '@@': 38 | file_started = True 39 | continue 40 | 41 | list.append(line.rstrip()) 42 | 43 | return list 44 | 45 | 46 | def changed_file_to_diff_list(file_changed): 47 | # get whole sript diff to list (useful for debugging) 48 | file_changed_underscore = file_changed.replace('/', '_').replace('.', '_') 49 | os.system(f'git diff -U10000 --output=/tmp/diff_{file_changed_underscore} HEAD upstream/main {file_changed}') 50 | 51 | # Read file into list 52 | return lines_into_list(f'/tmp/diff_{file_changed_underscore}') 53 | 54 | 55 | def find_method_name(direction, line_num, diff): 56 | if direction == 'above': 57 | step = -1 58 | stop = 0 59 | elif direction == 'below': 60 | step = 1 61 | stop = len(diff) 62 | else: 63 | print(f'direction has to be "above" or "below", not {direction}') 64 | exit() 65 | 66 | for i in range(line_num, stop, step): 67 | raw_line = diff[i][1:].strip() 68 | if raw_line[0:3] == 'def': 69 | method = raw_line[4:].split('(')[0] 70 | return method 71 | elif raw_line[0:5] == 'class': 72 | print(f'A class was found before a function, the filter cannot be applied. Class: {raw_line}') 73 | return None 74 | 75 | 76 | def get_method_from_changed_line(line_num, diff): 77 | raw_line = diff[line_num][1:].strip() 78 | 79 | if raw_line[0:3] == 'def': 80 | method = find_method_name('above', line_num + 1, diff) 81 | elif raw_line[0:1] == '@': 82 | method = find_method_name('below', line_num, diff) 83 | else: 84 | method = find_method_name('above', line_num, diff) 85 | 86 | return method 87 | 88 | 89 | def get_modified_methods(): 90 | modified_methods = set() 91 | test_dirs = ['test_suite/cloud/', 'test_suite/generic/'] 92 | 93 | files_changed = get_files_changed() 94 | print('--- Files changed:') 95 | print(*files_changed, sep='\n') 96 | 97 | for file_changed in files_changed: 98 | # Check if file is a test suite file 99 | if test_dirs[0] not in file_changed and test_dirs[1] not in file_changed: 100 | print(f'{file_changed} is not a test suite file, filter cannot be applied') 101 | return None 102 | 103 | diff = changed_file_to_diff_list(file_changed) 104 | for line_num, line in enumerate(diff): 105 | if line[0:1] in ['+', '-']: 106 | method = get_method_from_changed_line(line_num, diff) 107 | 108 | if method is None: 109 | return None 110 | elif method[0:4] != 'test': 111 | print(f'The method "{method}" is not a test') 112 | return None 113 | else: 114 | modified_methods.add(method) 115 | 116 | return modified_methods 117 | 118 | 119 | def write_vars_file(vars, vars_file_path): 120 | with open(vars_file_path, 'w+') as vars_file: 121 | for var in vars: 122 | if vars[var] is not None: 123 | vars_file.write(f'export {var}="{vars[var]}"\n') 124 | 125 | 126 | def get_modified_methods_str(): 127 | modified_methods = get_modified_methods() 128 | if modified_methods is None: 129 | return None 130 | 131 | print('--- Modified methods:') 132 | print(*list(modified_methods), sep='\n') 133 | return ' or '.join(list(modified_methods)) 134 | 135 | 136 | def get_skip_vars(): 137 | skip_vars = {'skip_aws': 'true', 'skip_azure': 'true', 'skip_gcp': 'true'} 138 | files_changed = get_files_changed() 139 | for file_changed in files_changed: 140 | if 'test_suite/generic/' in file_changed: 141 | skip_vars = {'skip_aws': 'false', 'skip_azure': 'false', 'skip_gcp': 'false'} 142 | return skip_vars 143 | elif file_changed == 'test_suite/cloud/test_aws.py': 144 | skip_vars['skip_aws'] = 'false' 145 | elif file_changed == 'test_suite/cloud/test_azure.py': 146 | skip_vars['skip_azure'] = 'false' 147 | elif file_changed == 'test_suite/cloud/test_gcp.py': 148 | skip_vars['skip_gcp'] = 'false' 149 | 150 | return skip_vars 151 | 152 | 153 | def write_config_file(config_path, civ_config): 154 | with open(config_path, 'w+') as config_file: 155 | yaml.dump(civ_config, config_file) 156 | 157 | 158 | if __name__ == '__main__': 159 | vars_file_path = sys.argv[1] 160 | vars = {} 161 | 162 | if os.environ['CI_COMMIT_REF_SLUG'] != 'main': 163 | skip_vars = get_skip_vars() 164 | modified_methods_str = get_modified_methods_str() 165 | else: 166 | modified_methods_str = None 167 | 168 | civ_config = {'resources_file': '/tmp/resource-file.json', 169 | 'output_file': '/tmp/report.xml', 170 | 'environment': 'automated', 171 | 'tags': {'Workload': 'CI Runner', 172 | 'Job_name': 'In_CI_Cloud_Test:' + os.environ['CI_JOB_NAME'], 173 | 'Project': 'CIV', 174 | 'Branch': os.environ['CI_COMMIT_REF_SLUG'], 175 | 'Pipeline_id': os.environ['CI_PIPELINE_ID'], 176 | 'Pipeline_source': os.environ['CI_PIPELINE_SOURCE']}, 177 | 'debug': True, 178 | 'include_markers': 'not pub', 179 | 'test_filter': modified_methods_str} 180 | 181 | # This env var comes from GitLab CI pipeline 182 | if 'CUSTOM_PACKAGES' in os.environ: 183 | civ_config['tags']['custom_packages'] = os.environ['CUSTOM_PACKAGES'] 184 | 185 | # This env var comes from GitLab CI pipeline 186 | if 'TEST_SUITES' in os.environ: 187 | civ_config['test_suites'] = [] 188 | civ_config['test_suites'].extend(os.environ['TEST_SUITES'].split(' ')) 189 | 190 | # This env var comes from GitLab CI pipeline 191 | if 'AWS_EFS' in os.environ and os.environ['AWS_EFS'].lower() == 'true': 192 | civ_config['tags']['aws-efs'] = True 193 | 194 | # If modified_methods_str is different than None, we might need to skip some clouds 195 | # If it's None, just run CIV in all clouds, no skipping 196 | if modified_methods_str: 197 | vars['SKIP_AWS'] = skip_vars['skip_aws'] 198 | vars['SKIP_AZURE'] = skip_vars['skip_azure'] 199 | vars['SKIP_GCP'] = skip_vars['skip_gcp'] 200 | else: 201 | vars['SKIP_AWS'] = 'false' 202 | vars['SKIP_AZURE'] = 'false' 203 | vars['SKIP_GCP'] = 'false' 204 | 205 | print('--- SKIP_:') 206 | [print(key, ': ', value) for key, value in vars.items()] 207 | 208 | config_path = '/tmp/civ_config.yaml' 209 | vars['CIV_CONFIG_FILE'] = config_path 210 | 211 | write_config_file(config_path, civ_config) 212 | print('--- civ_config:') 213 | pprint(civ_config) 214 | 215 | write_vars_file(vars, vars_file_path) 216 | -------------------------------------------------------------------------------- /schutzbot/prepare-rhel-internal.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euo pipefail 3 | 4 | # Colorful output. 5 | function greenprint { 6 | echo -e "\033[1;32m[$(date -Isecond)] ${1}\033[0m" 7 | } 8 | 9 | ALL_ARCHES="aarch64 ppc64le s390x x86_64" 10 | 11 | if [ -e ../schutzbot/define-compose-url.sh ] 12 | then 13 | source ../schutzbot/define-compose-url.sh 14 | else 15 | source ./schutzbot/define-compose-url.sh 16 | fi 17 | 18 | # Create a repository file for installing the osbuild-composer RPMs 19 | greenprint "📜 Generating dnf repository file" 20 | rm -f rhel"${VERSION_ID%.*}"internal.repo 21 | for ARCH in $ALL_ARCHES; do 22 | tee -a rhel"${VERSION_ID%.*}"internal.repo << EOF 23 | 24 | [rhel${VERSION_ID}-internal-baseos-${ARCH}] 25 | name=RHEL Internal BaseOS 26 | baseurl=${COMPOSE_URL}/compose/BaseOS/${ARCH}/os/ 27 | enabled=1 28 | gpgcheck=0 29 | # Default dnf repo priority is 99. Lower number means higher priority. 30 | priority=1 31 | 32 | [rhel${VERSION_ID}-internal-appstream-${ARCH}] 33 | name=RHEL Internal AppStream 34 | baseurl=${COMPOSE_URL}/compose/AppStream/${ARCH}/os/ 35 | enabled=1 36 | gpgcheck=0 37 | # osbuild-composer repo priority is 5 38 | priority=1 39 | EOF 40 | done 41 | 42 | # Create tests .repo file if REPO_URL is provided from ENV 43 | # Otherwise osbuild-composer-tests.rpm will be downloaded from 44 | # existing repositories 45 | if [ -n "${REPO_URL+x}" ]; then 46 | JOB_NAME="${JOB_NAME:-${CI_JOB_ID}}" 47 | 48 | greenprint "📜 Amend dnf repository file" 49 | tee -a rhel"${VERSION_ID%.*}"internal.repo << EOF 50 | 51 | [osbuild-composer-tests-multi-arch] 52 | name=Tests ${JOB_NAME} 53 | baseurl=${REPO_URL} 54 | enabled=1 55 | gpgcheck=0 56 | # osbuild-composer repo priority is 5 57 | priority=1 58 | EOF 59 | 60 | fi 61 | -------------------------------------------------------------------------------- /schutzbot/team_ssh_keys/ccowman.pub: -------------------------------------------------------------------------------- 1 | ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFy+t5lb4+sh0zxYE9ASfOW8kxIZHA2/IORAx7rczzRv ccowman@ccowman-thinkpadx1nanogen2.wat.csb -------------------------------------------------------------------------------- /schutzbot/team_ssh_keys/fkolwa.pub: -------------------------------------------------------------------------------- 1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDVTAuDDziPc1lVbuXrt8mvAUeTfRW7FwesywSbA5d1Jwpeq2t8BxQ0kR2dhImJ4rZNgGeiXksAHPTHKh+vfscEHAYelnWzO111Q0UaV4O6IK4Mubs2OYLWik6ryoyZa4vrEz3uOLIAxultLBSVXQMgjzgLHjRc3aYRUFw0lkL679qObocIUj4AbVi7lAMZTjnlTEthi/3+cdAazGXAXTwdoKtKK9b92SKGRAeLHceohuScMhLs464KWF7zqGmYvMvFp33EM6XJkaC4MJww1xMH6PnkLAmLhfUjfWnA5STjPaaAvy5XN/OKDBGaoYceiAJIlA5QpvKAs8HmDEn3LtMd fkolwa@x1 -------------------------------------------------------------------------------- /schutzbot/team_ssh_keys/igulina.pub: -------------------------------------------------------------------------------- 1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKM/k0O9AR9rdXKMQXaNM8jONupIDise+xIM3jz+uduy6YAnYfmKkYZL+S4jYUR5zPnRnIsf/O1p7gZK4uxh5OjvrQHWzWPvXGQu5VEOSMaEP85StiM4xNDJcjpF6feMkSRWF1pmGJs7sIbxXeLP0VyaiNXWOWR9zyr9tVQKSD6ogduh59+q1ydPuwciIxGTciFDOTCsY+eracOk+DI9MnGhpBTHXYh/wK8OkC9kLUQRYFd+mKVQNC2Xm8/YAoI7NbOTPMzVvHqhOgIvuK5l3CTRCQaWabui3AvUSrAXruv+CqYrW3+16oDa2x07iVTB2lJEe7aeuc3NetnF0pQiTf igulina@localhost -------------------------------------------------------------------------------- /schutzbot/team_ssh_keys/knivnia.pub: -------------------------------------------------------------------------------- 1 | ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINN54QUiPkrBzGoaryeiDefDZbaLFkc3nUWBYyw3OjGc knivnja@gmail.com -------------------------------------------------------------------------------- /schutzbot/team_ssh_keys/nmunoz.pub: -------------------------------------------------------------------------------- 1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDFzaUMOcIRGmDE2+RwDb9Se8Omp35eCSaj4JvqNvg7DT4qxHgMIysWpmCu7xKHCQ3UsSmv89Do1F+5wiSmDjjE0UIuWl3oH2njmctCntDnV9vZGmWQ4lpNzQcpDcizsmo7dyD9SNNDTShm2EtWn76OvWeGaKCYfBonnLPFKbPC7BS4hIuiCvfaWh1E02+lkLcLbsWWFpSKSKyUmHoWuqmZZRFMNCSJ5XvYR7LNmvgsBKe3FtlQiHlN+8ulvGm5UIZFZEkWntyCqX3NexJOseA7aYG8fcWEK/+z6gG682nVoIwrdzARCy4K3u7+ZlFc+RsQVwXioO06bpBZqL8fQ5pj+pBWSZ55gryvhbp5g6NXwWFHg9avjSSPohZG6m0KzcPP8p7cxwd7G4PucmUsz2QfjZIvuEe0ry6cxVMDY3AwaI+y+b8CbRGob3Gu9LjKkNj478sm6K6Dt6d5naRPGIWinF5xCMr01818ufbyr7DYlH9jtGdbv3LIOpMEIFC9SDgfPLjFAja0KmXvtka8zfCC9pUMx/BmnC+UizLe0KphX2U+uNFz6SUNFZ5Czy7z/vBJh6NOCNr7h61F/JHpNt2LwXA9g9XSQTDe4gfPfZ+7NzUtbdgJRXhBBwXa1wl8ewN0yFJ1hzZnk0r7pOcJW5MyytqnBxZ4Y21dgBsKWuaK2Q== nmunoz - fedora -------------------------------------------------------------------------------- /schutzbot/team_ssh_keys/sshmulev.pub: -------------------------------------------------------------------------------- 1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDRWBaTx3mBYxRaO+fPZ2+LJlyusMB9wnx0k+ss/nE5+xC3tkZvhTGwop51RhMu99Yk5CWfZIoiC0W/zfGZNiwP4sR69sWkn+TRLE1XmlWFXkRG79GMorw8QmiUDh3UXaWdps3qiAO3VY502Klfv8MrHdI9CkWG0m4YnMt+2w4seZq3qeqt/0RcGTbJ5MOh2rAZXAIBYgB2wRzMH8XbwNhDgqwywrsGY4q96IcpbEQQW/uIqfYGFCqzw2/1/IurLNgKOAmAXJ7zKAWKqApfQGpzIGzfgcWkeqf0XkQzgVVttbZh7vyM+ZTBYep7alulQop9lmWRgKNUZiEpShjWPQE9IYUAcKbzHsLQSyy4iuTK+zLWZK1GwIY+7CwQ79VMT46rTXzYKrN8XG9UOPzlV9H9NTfpjFv1Vp8ZT6Mjlnsqy91Wq43BZ6Z3P1KqH2SwZXnxrY9NUoQYg3MKhn8ZhB0eAn6iYcHuSH7GftBp1u7BP7fsy2XTbLffenjS2OU9t6JjXovk823munhNtWN9iOzwIGpl/+yJMx2PsbAMUBSZK4zvjTdqtQW+pXMHWP6bTASEzW4Sd3+UtWw744IgYHDUkQeNVX5I6lmP9VjfOm0x++KVCfF2cbmthoutFxa1yHIXG+Fj5YNzrEcJCmd2sBHmPivVanSB7V4RE038ceSLrQ== sshmulev@redhat.com -------------------------------------------------------------------------------- /schutzbot/terraform: -------------------------------------------------------------------------------- 1 | 932f22fff75014dc079b3291157028923d2926d0 -------------------------------------------------------------------------------- /schutzbot/update-base.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | 4 | 5 | def get_files_changed(): 6 | os.system('git remote add upstream https://github.com/osbuild/cloud-image-val.git') 7 | os.system('git fetch upstream') 8 | files_changed_cmd = ['git', 'diff', '--name-only', 'HEAD', 'upstream/main'] 9 | files_changed_raw = subprocess.run(files_changed_cmd, stdout=subprocess.PIPE) 10 | 11 | if files_changed_raw.stdout == b'' or files_changed_raw.stderr is not None: 12 | print('ERROR: git diff command failed or there are no changes in the PR') 13 | exit() 14 | 15 | return str(files_changed_raw.stdout)[2:-3].split('\\n') 16 | 17 | 18 | if __name__ == "__main__": 19 | files_that_update_base = ["requirements.txt", "base.Dockerfile"] 20 | files_changed = get_files_changed() 21 | 22 | for file_changed in files_changed: 23 | if file_changed in files_that_update_base: 24 | print("true") 25 | exit() 26 | 27 | print("false") 28 | exit() 29 | -------------------------------------------------------------------------------- /schutzbot/update_github_status.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Don't overwrite CI statuses on GitHub branches for nightly pipelines 4 | if [[ "$CI_PIPELINE_SOURCE" == "schedule" ]]; then 5 | exit 0 6 | fi 7 | 8 | # if a user is logged in to the runner, wait until they're done 9 | while (( $(who -s | wc -l) > 0 )); do 10 | echo "Waiting for user(s) to log off" 11 | sleep 30 12 | done 13 | 14 | if [[ $1 == "start" ]]; then 15 | GITHUB_NEW_STATE="pending" 16 | GITHUB_NEW_DESC="I'm currently testing this commit, be patient." 17 | elif [[ $1 == "finish" ]]; then 18 | GITHUB_NEW_STATE="success" 19 | GITHUB_NEW_DESC="I like this commit!" 20 | elif [[ $1 == "update" ]]; then 21 | if [[ $CI_JOB_STATUS == "canceled" ]]; then 22 | GITHUB_NEW_STATE="failure" 23 | GITHUB_NEW_DESC="Someone told me to cancel this test run." 24 | elif [[ $CI_JOB_STATUS == "failed" ]]; then 25 | GITHUB_NEW_STATE="failure" 26 | GITHUB_NEW_DESC="I'm sorry, something is odd about this commit." 27 | else 28 | exit 0 29 | fi 30 | else 31 | echo "unknown command" 32 | exit 1 33 | fi 34 | 35 | curl \ 36 | -u "${SCHUTZBOT_LOGIN}" \ 37 | -X POST \ 38 | -H "Accept: application/vnd.github.v3+json" \ 39 | "https://api.github.com/repos/osbuild/cloud-image-val/statuses/${CI_COMMIT_SHA}" \ 40 | -d '{"state":"'"${GITHUB_NEW_STATE}"'", "description": "'"${GITHUB_NEW_DESC}"'", "context": "Schutzbot on GitLab", "target_url": "'"${CI_PIPELINE_URL}"'"}' 41 | -------------------------------------------------------------------------------- /test/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/osbuild/cloud-image-val/7682fc950842c962d066a637be9ac6ee4b83d9f2/test/__init__.py -------------------------------------------------------------------------------- /test/test_cloud_image_validator.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import os # noqa: F401 3 | 4 | from main.cloud_image_validator import CloudImageValidator 5 | from cloud.opentofu.opentofu_configurator import OpenTofuConfigurator 6 | from cloud.opentofu.opentofu_controller import OpenTofuController 7 | from test_suite.suite_runner import SuiteRunner 8 | 9 | 10 | class TestCloudImageValidator: 11 | test_config = {'resources_file': '/fake/test/resources_file.json', 12 | 'output_file': '/fake/test/output_file.xml', 13 | 'test_filter': 'test_test_name', 14 | 'include_markers': 'pub', 15 | 'parallel': True, 16 | 'debug': True, 17 | 'stop_cleanup': False, 18 | 'config_file': '/tmp/test_config_file.yml', 19 | 'test_suites': ['test_path_1', 'test_path_2'], 20 | 'instances_json': '/tmp/instances.json', 21 | 'ssh_identity_file': '/tmp/ssh_key', 22 | 'ssh_pub_key_file': '/tmp/ssh_key.pub', 23 | 'ssh_config_file': '/tmp/ssh_key.pub' 24 | } 25 | test_instances = { 26 | 'instance-1': {'public_dns': 'value_1', 'username': 'value_2'}, 27 | 'instance-2': {'public_dns': 'value_1', 'username': 'value_2'} 28 | } 29 | 30 | @pytest.fixture 31 | def validator(self): 32 | return CloudImageValidator(config=self.test_config) 33 | 34 | def test_main(self, mocker, validator): 35 | # Arrange 36 | test_controller = 'test controller' 37 | wait_status_test = 32512 38 | exit_code_test = 127 39 | 40 | mock_initialize_infrastructure = mocker.MagicMock(return_value=test_controller) 41 | validator.initialize_infrastructure = mock_initialize_infrastructure 42 | 43 | mock_print_divider = mocker.patch('lib.console_lib.print_divider') 44 | 45 | mock_deploy_infrastructure = mocker.MagicMock(return_value=self.test_instances) 46 | validator.deploy_infrastructure = mock_deploy_infrastructure 47 | 48 | mock_prepare_environment = mocker.MagicMock() 49 | validator.prepare_environment = mock_prepare_environment 50 | 51 | mock_run_tests_in_all_instances = mocker.MagicMock(return_value=wait_status_test) 52 | validator.run_tests_in_all_instances = mock_run_tests_in_all_instances 53 | 54 | mock_cleanup = mocker.MagicMock() 55 | validator.cleanup = mock_cleanup 56 | 57 | # Act 58 | result = validator.main() 59 | 60 | # Assert 61 | assert result == exit_code_test 62 | 63 | assert mock_print_divider.call_args_list == [ 64 | mocker.call('Initializing infrastructure'), 65 | mocker.call('Deploying infrastructure'), 66 | mocker.call('Preparing environment'), 67 | mocker.call('Running tests'), 68 | mocker.call('Cleanup') 69 | ] 70 | 71 | mock_initialize_infrastructure.assert_called_once() 72 | mock_deploy_infrastructure.assert_called_once() 73 | mock_run_tests_in_all_instances.assert_called_once_with(self.test_instances) 74 | mock_prepare_environment.assert_called_once_with(self.test_instances) 75 | mock_cleanup.assert_called_once() 76 | 77 | def test_initialize_infrastructure(self, mocker, validator): 78 | # Arrange 79 | mocker.patch('lib.ssh_lib.generate_ssh_key_pair') 80 | mock_get_cloud_provider_from_resources = mocker.patch.object(OpenTofuConfigurator, 81 | 'get_cloud_provider_from_resources') 82 | mock_configure_from_resources_json = mocker.patch.object(OpenTofuConfigurator, 83 | 'configure_from_resources_json') 84 | mock_print_configuration = mocker.patch.object(OpenTofuConfigurator, 85 | 'print_configuration') 86 | mock_initialize_resources_dict = mocker.patch.object(OpenTofuConfigurator, 87 | '_initialize_resources_dict') 88 | 89 | # Act 90 | validator.initialize_infrastructure() 91 | 92 | # Assert 93 | mock_get_cloud_provider_from_resources.assert_called_once() 94 | mock_configure_from_resources_json.assert_called_once() 95 | mock_print_configuration.assert_called_once() 96 | mock_initialize_resources_dict.assert_called_once() 97 | 98 | def test_deploy_infrastructure(self, mocker, validator): 99 | # Arrange 100 | mocker.patch.object(OpenTofuConfigurator, 'cloud_name', create=True) 101 | 102 | mock_create_infra = mocker.patch.object(OpenTofuController, 103 | 'create_infra') 104 | mock_get_instances = mocker.patch.object(OpenTofuController, 105 | 'get_instances', 106 | return_value=self.test_instances) 107 | mock_generate_instances_ssh_config = mocker.patch('lib.ssh_lib.generate_instances_ssh_config') 108 | 109 | mock_write_instances_to_json = mocker.MagicMock() 110 | validator._write_instances_to_json = mock_write_instances_to_json 111 | 112 | validator.infra_controller = OpenTofuController(OpenTofuConfigurator) 113 | 114 | # Act 115 | result = validator.deploy_infrastructure() 116 | 117 | # Assert 118 | assert result == self.test_instances 119 | 120 | mock_create_infra.assert_called_once() 121 | mock_get_instances.assert_called_once() 122 | mock_write_instances_to_json.assert_called_once_with( 123 | self.test_instances) 124 | mock_generate_instances_ssh_config.assert_called_once_with(instances=self.test_instances, 125 | ssh_config_file=validator.config['ssh_config_file'], 126 | ssh_key_path=validator.config['ssh_identity_file']) 127 | 128 | def test_prepare_environment(self, mocker, validator): 129 | mock_add_ssh_keys_to_instances = mocker.patch('lib.ssh_lib.add_ssh_keys_to_instances') 130 | 131 | validator.prepare_environment(self.test_instances) 132 | 133 | mock_add_ssh_keys_to_instances.assert_called_once_with(self.test_instances, 134 | validator.config['ssh_config_file']) 135 | 136 | def test_run_tests_in_all_instances(self, mocker, validator): 137 | mocker.patch.object(OpenTofuConfigurator, 'cloud_name', create=True) 138 | validator.infra_configurator = OpenTofuConfigurator 139 | 140 | mock_run_tests = mocker.patch.object(SuiteRunner, 'run_tests') 141 | 142 | validator.run_tests_in_all_instances(self.test_instances) 143 | 144 | mock_run_tests.assert_called_once_with(self.test_config["test_suites"], 145 | validator.config["output_file"], 146 | self.test_config["test_filter"], 147 | self.test_config["include_markers"]) 148 | 149 | def test_destroy_infrastructure(self, mocker, validator): 150 | mock_destroy_infra = mocker.patch.object(OpenTofuController, 'destroy_infra') 151 | validator.infra_controller = OpenTofuController 152 | validator.config["debug"] = False 153 | 154 | mock_os_remove = mocker.patch('os.remove') 155 | 156 | validator.cleanup() 157 | 158 | mock_destroy_infra.assert_called_once() 159 | 160 | assert mock_os_remove.call_args_list == [ 161 | mocker.call(validator.config['ssh_identity_file']), 162 | mocker.call(validator.config['ssh_pub_key_file']), 163 | mocker.call(validator.config['ssh_config_file']), 164 | mocker.call(validator.config['instances_json']) 165 | ] 166 | -------------------------------------------------------------------------------- /test/test_reporter.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from result.reporter import Reporter 4 | 5 | 6 | class TestReporter: 7 | test_junit_report_path = '/test/path/to/junit/report.xml' 8 | 9 | @pytest.fixture 10 | def reporter(self): 11 | return Reporter(self.test_junit_report_path) 12 | 13 | def test_generate_html_report(self, mocker, reporter): 14 | test_destination_path = 'test/path/to/report.html' 15 | mock_os_system = mocker.patch('os.system') 16 | mock_print = mocker.patch('builtins.print') 17 | 18 | reporter.generate_html_report(test_destination_path) 19 | 20 | mock_os_system.assert_called_once_with(f'junit2html {self.test_junit_report_path} ' 21 | f'--report-matrix {test_destination_path}') 22 | mock_print.assert_called_once_with(f'HTML report generated: {test_destination_path}') 23 | -------------------------------------------------------------------------------- /test/test_suite_runner.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from test_suite.suite_runner import SuiteRunner 4 | 5 | 6 | class TestSuiteRunner: 7 | test_cloud_provider = 'aws' 8 | test_instances = ['test instance 1', 'test instance 2'] 9 | test_ssh_config = '/path/to/ssh_config' 10 | test_output_filepath = 'test/output/filepath.xml' 11 | test_test_suite_paths = ['test_path_1', 'test_path_2'] 12 | test_filter = 'test_test_name' 13 | test_marker = 'pub' 14 | test_connection = 'paramiko' 15 | 16 | @pytest.fixture 17 | def suite_runner(self): 18 | return SuiteRunner(self.test_cloud_provider, 19 | self.test_instances, 20 | self.test_ssh_config) 21 | 22 | def test_run_tests(self, mocker, suite_runner): 23 | # Arrange 24 | test_output_filepath = '/test/output/filepath' 25 | test_composed_command = 'test composed command' 26 | 27 | mock_compose_pytest_command = mocker.MagicMock(return_value=test_composed_command) 28 | suite_runner.compose_pytest_command = mock_compose_pytest_command 29 | 30 | mock_os_path_exists = mocker.patch('os.path.exists', return_value=True) 31 | mock_os_remove = mocker.patch('os.remove') 32 | mock_os_system = mocker.patch('os.system') 33 | 34 | # Act 35 | suite_runner.run_tests(self.test_test_suite_paths, 36 | test_output_filepath, 37 | self.test_filter, 38 | self.test_marker) 39 | 40 | # Assert 41 | mock_os_path_exists.assert_called_once_with(test_output_filepath) 42 | mock_os_remove.assert_called_once_with(test_output_filepath) 43 | 44 | mock_os_system.assert_called_once_with(test_composed_command) 45 | mock_compose_pytest_command.assert_called_once_with(self.test_test_suite_paths, 46 | test_output_filepath, 47 | self.test_filter, 48 | self.test_marker) 49 | 50 | @pytest.mark.parametrize( 51 | 'test_test_suites, test_filter, test_marker, test_debug, test_parallel, expected_command_string', 52 | [(None, None, None, False, False, 53 | 'pytest path1 path2 --hosts=user1@host1,user2@host2 ' 54 | f'--connection={test_connection} ' 55 | f'--ssh-config {test_ssh_config} --junit-xml {test_output_filepath} ' 56 | f'--html {test_output_filepath.replace("xml", "html")} ' 57 | f'--self-contained-html ' 58 | f'--json-report --json-report-file={test_output_filepath.replace("xml", "json")}'), 59 | (None, test_filter, test_marker, False, False, 60 | 'pytest path1 path2 --hosts=user1@host1,user2@host2 ' 61 | f'--connection={test_connection} ' 62 | f'--ssh-config {test_ssh_config} --junit-xml {test_output_filepath} ' 63 | f'--html {test_output_filepath.replace("xml", "html")} ' 64 | f'--self-contained-html ' 65 | f'--json-report --json-report-file={test_output_filepath.replace("xml", "json")} ' 66 | f'-k "{test_filter}" ' 67 | f'-m "{test_marker}"'), 68 | (None, None, None, False, True, 69 | 'pytest path1 path2 --hosts=user1@host1,user2@host2 ' 70 | f'--connection={test_connection} ' 71 | f'--ssh-config {test_ssh_config} --junit-xml {test_output_filepath} ' 72 | f'--html {test_output_filepath.replace("xml", "html")} ' 73 | f'--self-contained-html ' 74 | f'--json-report --json-report-file={test_output_filepath.replace("xml", "json")} ' 75 | f'--numprocesses={len(test_instances)} --maxprocesses=162 ' 76 | '--only-rerun="socket.timeout|refused|ConnectionResetError|TimeoutError|SSHException|NoValidConnectionsError' 77 | '|Error while installing Development tools group" ' 78 | '--reruns 3 --reruns-delay 5'), 79 | (test_test_suite_paths, None, None, True, True, 80 | 'pytest test_path_1 test_path_2 --hosts=user1@host1,user2@host2 ' 81 | f'--connection={test_connection} ' 82 | f'--ssh-config {test_ssh_config} --junit-xml {test_output_filepath} ' 83 | f'--html {test_output_filepath.replace("xml", "html")} ' 84 | f'--self-contained-html ' 85 | f'--json-report --json-report-file={test_output_filepath.replace("xml", "json")} ' 86 | f'--numprocesses={len(test_instances)} --maxprocesses=162 ' 87 | '--only-rerun="socket.timeout|refused|ConnectionResetError|TimeoutError|SSHException|NoValidConnectionsError' 88 | '|Error while installing Development tools group" ' 89 | '--reruns 3 --reruns-delay 5 ' 90 | '-v')] 91 | ) 92 | def test_compose_pytest_command(self, 93 | mocker, 94 | suite_runner, 95 | test_test_suites, 96 | test_filter, 97 | test_marker, 98 | test_debug, 99 | test_parallel, 100 | expected_command_string): 101 | # Arrange 102 | test_hosts = 'user1@host1,user2@host2' 103 | test_suite_paths = ['path1', 'path2'] 104 | 105 | suite_runner.debug = test_debug 106 | suite_runner.parallel = test_parallel 107 | 108 | mock_get_all_instances_hosts_with_users = mocker.MagicMock(return_value=test_hosts) 109 | suite_runner.get_all_instances_hosts_with_users = mock_get_all_instances_hosts_with_users 110 | 111 | mock_get_test_suite_paths = mocker.MagicMock(return_value=test_suite_paths) 112 | suite_runner.get_default_test_suite_paths = mock_get_test_suite_paths 113 | 114 | # Act, Assert 115 | assert suite_runner.compose_pytest_command(test_test_suites, 116 | self.test_output_filepath, 117 | test_filter, 118 | test_marker) == expected_command_string 119 | 120 | mock_get_all_instances_hosts_with_users.assert_called_once() 121 | 122 | @pytest.mark.parametrize( 123 | 'test_instances, expected_hosts', 124 | [(dict(instance_1={'username': 'user1', 'address': 'host1'}), 'user1@host1'), 125 | (dict(instance_1={'username': 'user1', 'address': 'host1'}, 126 | instance_2={'username': 'user2', 'address': 'host2'}, 127 | instance_3={'username': 'user3', 'address': 'host3'}), 'user1@host1,user2@host2,user3@host3')] 128 | ) 129 | def test_get_all_instances_hosts_with_users(self, suite_runner, test_instances, expected_hosts): 130 | suite_runner.instances = test_instances 131 | 132 | assert suite_runner.get_all_instances_hosts_with_users() == expected_hosts 133 | 134 | @pytest.mark.parametrize( 135 | 'test_instances, exception', 136 | [(dict(instance_1={'wrong_key_for_username': 'user1', 'address': 'host1'}), 137 | pytest.raises(KeyError))] 138 | ) 139 | def test_get_all_instances_hosts_with_users_exception(self, suite_runner, test_instances, exception): 140 | suite_runner.instances = test_instances 141 | 142 | with exception: 143 | suite_runner.get_all_instances_hosts_with_users() 144 | 145 | @pytest.mark.parametrize( 146 | 'test_cloud_provider, expected_suite_paths', 147 | [('other', ['generic/test_generic.py']), 148 | ('aws', ['generic/test_generic.py', 'cloud/test_aws.py'])], 149 | ) 150 | def test_get_default_test_suite_paths(self, 151 | mocker, 152 | suite_runner, 153 | test_cloud_provider, 154 | expected_suite_paths): 155 | suite_runner.cloud_provider = test_cloud_provider 156 | 157 | mocker.patch('os.path.dirname', return_value='') 158 | 159 | assert suite_runner.get_default_test_suite_paths() == expected_suite_paths 160 | -------------------------------------------------------------------------------- /test_suite/conftest.py: -------------------------------------------------------------------------------- 1 | import os 2 | import re 3 | import time 4 | 5 | import pytest 6 | import requests 7 | from packaging import version 8 | from py.xml import html 9 | from pytest_html import extras 10 | from requests.adapters import HTTPAdapter 11 | from requests.packages.urllib3.util.retry import Retry 12 | from test_suite.generic import helpers 13 | 14 | from lib import test_lib, aws_lib 15 | 16 | 17 | def __get_host_info(host): 18 | host_info = {} 19 | host_info['distro'] = host.system_info.distribution 20 | host_info['version'] = version.parse(host.system_info.release) 21 | host_info['distro_version'] = f'{host_info["distro"]}{host_info["version"]}' 22 | host_info['skip_message'] = f'This test doesn\'t apply to {host_info["distro_version"]}' 23 | return host_info 24 | 25 | 26 | def __parse_distro_version(distro_version): 27 | res = re.search(r'\d+(\.\d+)?', distro_version) 28 | if res: 29 | return version.parse(res.group(0)) 30 | 31 | 32 | def __check_wait_marker(request): 33 | # Check if test needs to wait before being run 34 | wait_marker = request.node.get_closest_marker('wait') 35 | 36 | if wait_marker: 37 | seconds_to_wait = int(wait_marker.args[0]) 38 | print(f'Waiting {seconds_to_wait} seconds before running test...') 39 | time.sleep(seconds_to_wait) 40 | 41 | 42 | def __check_exclude_on_marker(request, host_info): 43 | exclude_on_marker = request.node.get_closest_marker('exclude_on') 44 | if not exclude_on_marker: 45 | return 46 | 47 | exclude_on_marker_list = exclude_on_marker.args[0] 48 | 49 | if host_info['distro'] in exclude_on_marker_list or host_info['distro_version'] in exclude_on_marker_list: 50 | pytest.skip(host_info['skip_message']) 51 | 52 | # Check if the current distro_version matches any condition of a marker element 53 | # with a relational operator. If it does, do not run the test 54 | for item in exclude_on_marker_list: 55 | if host_info['distro'] in item: 56 | item_distro_version = __parse_distro_version(item) 57 | 58 | if item[0] == '<' and host_info['version'] < item_distro_version: 59 | pytest.skip(host_info['skip_message']) 60 | 61 | if item[0] == '>' and host_info['version'] > item_distro_version: 62 | pytest.skip(host_info['skip_message']) 63 | 64 | if item[1] == '=' and host_info['version'] == item_distro_version: 65 | pytest.skip(host_info['skip_message']) 66 | 67 | 68 | def __check_run_on_marker(request, host_info): 69 | run_on_marker = request.node.get_closest_marker('run_on') 70 | if not run_on_marker: 71 | pytest.fail('All test cases have to be marked with the "run_on" marker. Check README.md for more information.') 72 | 73 | run_on_marker_list = run_on_marker.args[0] 74 | 75 | if host_info['distro'] in run_on_marker_list or \ 76 | host_info['distro_version'] in run_on_marker_list or \ 77 | 'all' in run_on_marker_list: 78 | return 79 | 80 | # Check if the current distro_version matches at least one condition of a marker element 81 | # with a relational operator. If no element matches, we do not run the test 82 | for item in run_on_marker_list: 83 | if host_info['distro'] in item: 84 | item_distro_version = __parse_distro_version(item) 85 | 86 | if item[0] == '<' and host_info['version'] < item_distro_version: 87 | return 88 | 89 | if item[0] == '>' and host_info['version'] > item_distro_version: 90 | return 91 | 92 | if item[1] == '=' and host_info['version'] == item_distro_version: 93 | return 94 | 95 | pytest.skip(host_info['skip_message']) 96 | 97 | 98 | def __check_jira_skip_marker(request): 99 | jira_skip_marker = request.node.get_closest_marker('jira_skip') 100 | if not jira_skip_marker: 101 | return 102 | 103 | jira_skip_marker_list = jira_skip_marker.args[0] 104 | 105 | s = requests.Session() 106 | retries = Retry(total=3, backoff_factor=3) 107 | s.mount('https://issues.redhat.com', HTTPAdapter(max_retries=retries)) 108 | 109 | JIRA_PAT = os.getenv('JIRA_PAT') 110 | if not JIRA_PAT: 111 | exit('JIRA_PAT was not found') 112 | 113 | headers = {'Authorization': f'Bearer {os.getenv("JIRA_PAT")}'} 114 | endpoint_base = 'https://issues.redhat.com/rest/api/2/issue/' 115 | 116 | for ticket_id in jira_skip_marker_list: 117 | endpoint = endpoint_base + ticket_id 118 | res = s.get(endpoint, headers=headers) 119 | 120 | if res.status_code != 200: 121 | print(f'ERROR: (JIRA API) - Could not check Jira ticket {ticket_id}\n' 122 | f'API request error code {res.status_code}\n' 123 | 'Running test as the status cannot be checked') 124 | 125 | status = res.json()['fields']['status']['name'] 126 | if status != 'Closed': 127 | pytest.skip(f'Test skipped because Jira ticket {ticket_id} is not Closed yet') 128 | else: 129 | print( 130 | f'WARNING: Jira ticket {ticket_id} is already closed. Please remove it from the marker and put it in the docstring') 131 | 132 | 133 | @pytest.fixture(autouse=True, scope='function') 134 | def check_markers(host, request): 135 | host_info = __get_host_info(host) 136 | 137 | __check_wait_marker(request) 138 | __check_exclude_on_marker(request, host_info) 139 | __check_run_on_marker(request, host_info) 140 | __check_jira_skip_marker(request) 141 | 142 | 143 | @pytest.fixture 144 | def rhel_sap_only(host): 145 | if not test_lib.is_rhel_sap(host): 146 | pytest.skip('Image is not SAP RHEL') 147 | 148 | 149 | @pytest.fixture 150 | def rhel_high_availability_only(host): 151 | if not test_lib.is_rhel_high_availability(host): 152 | pytest.skip('Image is not HA (High Availability)') 153 | 154 | 155 | @pytest.fixture 156 | def rhel_atomic_only(host): 157 | if not test_lib.is_rhel_atomic_host(host): 158 | pytest.skip('Image is not atomic RHEL') 159 | 160 | 161 | @pytest.fixture 162 | def rhel_aws_marketplace_only(host, instance_data): 163 | # Check if the image is AWS Stratosphere. If so, skip the test. 164 | if instance_data['cloud'] == 'aws' and aws_lib.is_rhel_aws_stratosphere(host): 165 | pytest.skip('Not applicable to RHEL AWS Stratosphere images.') 166 | 167 | 168 | @pytest.fixture(scope='function', autouse=True) 169 | def instance_data(host): 170 | values_to_find = [host.backend.hostname] 171 | values_to_find.extend(host.addr(host.backend.hostname).ipv4_addresses) 172 | 173 | return helpers.__get_instance_data_from_json( 174 | key_to_find='address', values_to_find=values_to_find, path=helpers.INSTANCES_JSON_PATH 175 | ) 176 | 177 | 178 | @pytest.fixture(autouse=True) 179 | def html_report_links(extra, host, instance_data): 180 | extra.append(extras.json(instance_data, 'Instance JSON')) 181 | 182 | link_name = f'{host.system_info.distribution}-{host.system_info.release}' 183 | extra.append(extras.json(vars(host.system_info)['sysinfo'], name=link_name)) 184 | 185 | 186 | def pytest_configure(config): 187 | pytest.markers_used = config.getoption('-m') 188 | 189 | 190 | def pytest_html_report_title(report): 191 | report.title = 'Cloud Image Validation Report' 192 | 193 | 194 | def pytest_html_results_summary(prefix, summary, postfix): 195 | prefix.extend([html.a('GitHub: https://github.com/osbuild/cloud-image-val', 196 | href='https://github.com/osbuild/cloud-image-val')]) 197 | 198 | if pytest.markers_used: 199 | postfix.extend([html.h4(f'Markers used: {pytest.markers_used}')]) 200 | 201 | 202 | def pytest_html_results_table_header(cells): 203 | del cells[1] 204 | 205 | cells.insert(1, html.th('Test_Case', class_='sortable', **{'data-column-type': 'test_case'})) 206 | cells.insert(2, html.th('Description', **{'data-column-type': 'description'})) 207 | cells.insert(3, html.th('Image', class_='sortable', **{'data-column-type': 'image'})) 208 | 209 | 210 | def pytest_html_results_table_row(report, cells): 211 | del cells[1] 212 | 213 | cells.insert(1, html.td(getattr(report, 'test_case', ''))) 214 | cells.insert(2, html.td(getattr(report, 'description', ''), 215 | style='white-space:pre-line; word-wrap:break-word')) 216 | cells.insert(3, html.td(getattr(report, 'image', ''))) 217 | 218 | 219 | def pytest_html_duration_format(duration): 220 | """ 221 | Format the duration of tests in the HTML report. 222 | The duration is given in seconds as a float. 223 | You can customize the output format here. 224 | """ 225 | if duration < 1: 226 | return f"{duration * 1000:.2f} ms" # Display in milliseconds 227 | else: 228 | return f"{duration:.2f} s" # Display in seconds 229 | 230 | 231 | @pytest.hookimpl(hookwrapper=True) 232 | def pytest_runtest_makereport(item, call): 233 | outcome = yield 234 | report = outcome.get_result() 235 | 236 | # Fill 'Test Case' column 237 | report.test_case = f'{str(item.parent.name)}::{str(item.function.__name__)}' 238 | 239 | # Fill 'Description' column 240 | description_text = __truncate_text(str(item.function.__doc__), 120) 241 | report.description = description_text 242 | 243 | # Fill 'Image' column 244 | if 'instance_data' in item.funcargs: 245 | instance = item.funcargs['instance_data'] 246 | if instance: 247 | image_ref = instance['image'] 248 | report.image = str(image_ref) 249 | 250 | 251 | def __truncate_text(text, max_chars): 252 | if len(text) > max_chars: 253 | text = f'{text[:max_chars]} [...]' 254 | return text 255 | -------------------------------------------------------------------------------- /test_suite/generic/helpers.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | from lib import test_lib 4 | 5 | INSTANCES_JSON_PATH = os.environ['CIV_INSTANCES_JSON'] 6 | 7 | 8 | def __get_instance_data_from_json(key_to_find, values_to_find, path=INSTANCES_JSON_PATH): 9 | with open(path, 'r') as f: 10 | instances_json_data = json.load(f) 11 | for instance in instances_json_data.values(): 12 | if key_to_find in instance.keys() and instance[key_to_find] in values_to_find: 13 | return instance 14 | 15 | 16 | def check_avc_denials(host): 17 | command_to_run = 'x=$(ausearch -m avc 2>&1 &); echo $x' 18 | result = test_lib.print_host_command_output(host, 19 | command_to_run, 20 | capture_result=True) 21 | 22 | no_avc_denials_found = 'no matches' in result.stdout 23 | 24 | # ignore avc denial for irqbalance 25 | # remove when RHEL-78630 is fixed 26 | if 'irqbalance' in result.stdout: 27 | no_avc_denials_found = True 28 | 29 | assert no_avc_denials_found, 'There should not be any avc denials (selinux)' 30 | -------------------------------------------------------------------------------- /test_suite/generic/test_markers.py: -------------------------------------------------------------------------------- 1 | """ 2 | The objective of this file is to facilitate testing of our markers. 3 | Every time new markers are added, or the code in conftest.py is changed, 4 | this testsuite should be run to make sure the markers work correctly. 5 | """ 6 | from datetime import datetime 7 | import pytest 8 | 9 | 10 | @pytest.mark.run_on(['all']) 11 | def test_markers_all_no_wait(): 12 | now = datetime.now() 13 | 14 | current_time = now.strftime("%H:%M:%S") 15 | print("Current Time =", current_time) 16 | 17 | 18 | @pytest.mark.wait(120) 19 | @pytest.mark.run_on(['all']) 20 | def test_markers_all_do_wait(): 21 | now = datetime.now() 22 | 23 | current_time = now.strftime("%H:%M:%S") 24 | print("Current Time =", current_time) 25 | 26 | 27 | @pytest.mark.run_on(["rhel"]) 28 | def test_marker_only_rhel(): 29 | print("rhel!") 30 | 31 | 32 | @pytest.mark.run_on(["fedora"]) 33 | def test_marker_only_fedora(): 34 | print("fedora!") 35 | 36 | 37 | @pytest.mark.run_on(["centos"]) 38 | def test_marker_only_centos(): 39 | print("centos!") 40 | 41 | 42 | @pytest.mark.run_on(["rhel8.4"]) 43 | def test_marker_only_rhel_8_4(): 44 | print("rhel8.4!") 45 | 46 | 47 | @pytest.mark.run_on(["rhel8.4", "rhel8.7", "rhel8.8"]) 48 | def test_marker_only_rhel_8_4_8_7_8_8(): 49 | print("rhel 8.4, 8.7, 8.8!") 50 | 51 | 52 | @pytest.mark.run_on(["fedora36"]) 53 | def test_marker_only_fedora36(): 54 | print("fedora36!") 55 | 56 | 57 | @pytest.mark.run_on(["centos8"]) 58 | def test_marker_only_centos8(): 59 | print("centos8!") 60 | 61 | 62 | @pytest.mark.run_on([">rhel8.7"]) 63 | def test_marker_only_bigger_rhel8_7(): 64 | print(">rhel8.7!") 65 | 66 | 67 | @pytest.mark.run_on(["=rhel8.8", "rhel8.4"]) 73 | def test_marker_bigger_or_equal_rhel8_8_and_rhel8_4(): 74 | print(">=rhel8.8 and rhel8.4!") 75 | 76 | 77 | @pytest.mark.run_on([">rhel9.1", "fedora"]) 78 | def test_marker_bigger_rhel9_1_and_fedora(): 79 | print(">rhel9.1 and fedora!") 80 | 81 | 82 | @pytest.mark.run_on(["all"]) 83 | @pytest.mark.exclude_on([" {self.hostname_before_reboot_file}') 12 | host.run_test(f'grubby --update-kernel=ALL --args="{self.kmemleak_arg}"') 13 | 14 | @pytest.mark.order(101) 15 | @pytest.mark.run_on(['all']) 16 | def test_launch_reboot(self, host, instance_data): 17 | self.setup_before_reboot(host) 18 | test_lib.reboot_host(host) 19 | 20 | @pytest.mark.order(102) 21 | @pytest.mark.run_on(['all']) 22 | def test_reboot_time(self, host, instance_data): 23 | """ 24 | Check reboot time after 1st init. 25 | BugZilla 1776710, 1446698, 1446688 26 | """ 27 | if instance_data['cloud'] == 'azure': 28 | max_boot_time_seconds = 60.0 29 | else: 30 | max_boot_time_seconds = 40.0 31 | 32 | boot_time = test_lib.get_host_last_boot_time(host) 33 | 34 | assert boot_time < max_boot_time_seconds, \ 35 | f'Reboot took more than {max_boot_time_seconds} sec.' 36 | 37 | @pytest.mark.order(103) 38 | @pytest.mark.run_on(['all']) 39 | def test_reboot_keeps_current_hostname(self, host): 40 | """ 41 | Check that reboot doesn't change the hostname 42 | """ 43 | hostname_after_reboot = host.check_output('hostname') 44 | 45 | with host.sudo(): 46 | assert host.file(self.hostname_before_reboot_file).contains(hostname_after_reboot), \ 47 | 'Instance hostname changed after reboot' 48 | 49 | # TODO: Review failure in RHEL 7.9, it may be related to a grubby bug 50 | @pytest.mark.order(104) 51 | @pytest.mark.run_on(['all']) 52 | def test_reboot_grubby(self, host): 53 | """ 54 | Check that user can update boot parameter using grubby tool 55 | """ 56 | file_to_check = '/proc/cmdline' 57 | 58 | with host.sudo(): 59 | assert host.file(file_to_check).contains(self.kmemleak_arg), \ 60 | f'Expected "{self.kmemleak_arg}" in "{file_to_check}"' 61 | 62 | host.run_test(f'grubby --update-kernel=ALL --remove-args="{self.kmemleak_arg}"') 63 | 64 | @pytest.mark.run_on(['all']) 65 | def test_first_boot_time(self, host, instance_data): 66 | if instance_data['cloud'] == 'azure': 67 | max_boot_time_aws = 120 68 | elif host.system_info.arch == 'aarch64': 69 | max_boot_time_aws = 70 70 | else: 71 | max_boot_time_aws = 60 72 | 73 | boot_time = test_lib.get_host_last_boot_time(host) 74 | 75 | assert boot_time < max_boot_time_aws, f'First boot took more than {max_boot_time_aws} seconds' 76 | -------------------------------------------------------------------------------- /test_suite/package/otel_package/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/osbuild/cloud-image-val/7682fc950842c962d066a637be9ac6ee4b83d9f2/test_suite/package/otel_package/__init__.py -------------------------------------------------------------------------------- /test_suite/package/otel_package/fixtures.py: -------------------------------------------------------------------------------- 1 | import json 2 | import re 3 | import time 4 | import pytest 5 | from lib import console_lib 6 | from lib import test_lib 7 | from test_suite.generic import helpers 8 | 9 | 10 | @pytest.fixture(scope='class') 11 | def initialize_variables(request, host): 12 | self = request.node.cls 13 | values_to_find = [host.backend.hostname] + host.addr(host.backend.hostname).ipv4_addresses 14 | instance_data = helpers.__get_instance_data_from_json(key_to_find='address', values_to_find=values_to_find) 15 | self.instance_id = instance_data['instance_id'] 16 | self.instance_address = instance_data['address'] 17 | self.instance_region = instance_data['availability_zone'][:-1] 18 | 19 | 20 | @pytest.fixture(scope='function') 21 | def check_instance_status(request, host, timeout_seconds=300): 22 | self = request.node.cls 23 | command_to_run = [ 24 | 'aws', 'ec2', 'describe-instance-status', 25 | '--instance-ids', self.instance_id, '--region', self.instance_region 26 | ] 27 | 28 | start_time = time.time() 29 | while True: 30 | if time.time() - start_time > timeout_seconds: 31 | raise TimeoutError("Timeout exceeded while waiting for instance status") 32 | 33 | command_output = host.backend.run_local(" ".join(command_to_run)).stdout 34 | cmd_output = json.loads(command_output) 35 | 36 | instance_status = cmd_output["InstanceStatuses"][0]["InstanceStatus"]["Status"] 37 | system_status = cmd_output["InstanceStatuses"][0]["SystemStatus"]["Status"] 38 | 39 | if instance_status == "ok" and system_status == "ok": 40 | break 41 | else: 42 | print("Instance status is not 'passed' yet. Waiting...") 43 | time.sleep(5) 44 | 45 | 46 | @pytest.fixture(scope='function') 47 | def modify_iam_role(request, host): 48 | self = request.node.cls 49 | iam_role_name = "CloudWatchAgentServerRole_2" 50 | 51 | command_to_run = [ 52 | 'aws', 'ec2', 'associate-iam-instance-profile', 53 | '--instance-id', self.instance_id, 54 | '--region', self.instance_region, 55 | '--iam-instance-profile', 'Name="{}"'.format(iam_role_name) 56 | ] 57 | modify_iam_role_cmd = ' '.join(command_to_run) 58 | assert host.backend.run_local(modify_iam_role_cmd), 'faild to update iam role' 59 | 60 | 61 | @pytest.fixture(scope='function') 62 | def setup_conf(host): 63 | file_path = '/etc/opentelemetry-collector/configs/10-cloudwatch-export.yaml' 64 | file_content = """ 65 | --- 66 | exporters: 67 | awscloudwatchlogs: 68 | log_group_name: "testing-logs-emf" 69 | log_stream_name: "testing-integrations-stream-emf" 70 | 71 | service: 72 | pipelines: 73 | logs: 74 | receivers: [journald] 75 | exporters: [awscloudwatchlogs] 76 | 77 | """ 78 | with host.sudo(): 79 | host.run(f"echo '{file_content}' > {file_path}") 80 | 81 | 82 | @pytest.fixture(scope='function') 83 | def install_packages(request, host): 84 | self = request.node.cls 85 | with host.sudo(): 86 | test_lib.print_host_command_output(host, "rpm -qa | grep opentelemetry*") 87 | 88 | def finalizer(): 89 | console_lib.print_divider(f'Removing the package {self.package_name}') 90 | assert host.run(f'sudo yum remove -y {self.package_name}') 91 | cmd_output = host.run(f'rpm -q {self.package_name}').stdout 92 | assert "package redhat-opentelemetry-collector-main is not installed" in cmd_output 93 | assert host.run(f'ssh {self.instance_address}').failed 94 | console_lib.print_divider("Verify logs don't appear") 95 | log_output = self.check_aws_cli_logs(self, host, self.instance_region).stdout 96 | assert re.search(r"invalid\s+user", log_output), "Expected 'invalid user' not found in logs" 97 | request.addfinalizer(finalizer) 98 | 99 | 100 | @pytest.fixture(scope='function') 101 | def start_service(request, host): 102 | self = request.node.cls 103 | start_service = (f'systemctl start {self.service_name}') 104 | enable_service = (f'systemctl enable {self.service_name}') 105 | is_active = (f'systemctl is-active {self.service_name}') 106 | 107 | with host.sudo(): 108 | assert host.run(start_service).succeeded, (f'Failed to start the service {self.service_name}') 109 | assert host.run(enable_service).succeeded, (f'Failed to enable the service {self.service_name}') 110 | assert host.run(is_active).succeeded, (f'Service is not active {self.service_name}') 111 | -------------------------------------------------------------------------------- /test_suite/package/otel_package/test_otel.py: -------------------------------------------------------------------------------- 1 | import time 2 | import re 3 | import pytest 4 | from lib import console_lib 5 | from test_suite.generic import helpers 6 | from test_suite.package.otel_package.fixtures import ( 7 | initialize_variables, check_instance_status, 8 | install_packages, setup_conf, modify_iam_role, start_service 9 | ) 10 | 11 | 12 | @pytest.mark.package 13 | @pytest.mark.run_on(['>=rhel9.5']) 14 | @pytest.mark.usefixtures( 15 | initialize_variables.__name__ 16 | ) 17 | class TestOtel: 18 | package_name = 'redhat-opentelemetry-collector-main' 19 | service_name = 'opentelemetry-collector.service' 20 | 21 | def check_aws_cli_logs(self, host, region): 22 | command_to_run = [ 23 | 'export', f'AWS_REGION={self.instance_region}', "&&", 24 | 'aws', 'logs', 'filter-log-events', 25 | '--log-stream-names', '"testing-integrations-stream-emf"', 26 | '--filter-pattern', '"invalid"', 27 | '--log-group-name', '"testing-logs-emf"' 28 | ] 29 | run_aws_cli_cmd = ' '.join(command_to_run) 30 | return host.backend.run_local(run_aws_cli_cmd) 31 | 32 | @pytest.mark.usefixtures( 33 | check_instance_status.__name__, 34 | setup_conf.__name__, 35 | install_packages.__name__, 36 | modify_iam_role.__name__, 37 | start_service.__name__ 38 | ) 39 | def test_otel(self, host): 40 | """ 41 | Verify basic funstionality for OpenTelemetry (OTEL) package: 42 | - Install the package. 43 | - Start the service. 44 | - Modify IAM role. 45 | - Make a failure ssh connection to the instance. 46 | - Check for error messages in the ssh logs cotaining within the instance. 47 | - Check the error logs with AWS CLI and compare it to the logs in "/var/log/secure". 48 | - Check there are no AVC denials. 49 | Finalize: 50 | - Remove the package from the instance and verify it's not present anymore. 51 | - Try a failure ssh again and check that the logs don't appear. 52 | """ 53 | with host.sudo(): 54 | console_lib.print_divider("Connect to the instance without a key in order to fail") 55 | result = host.backend.run_local(f'ssh -o BatchMode=yes {self.instance_address}').stderr 56 | assert "Host key verification failed" in result or "Permission denied" in result 57 | 58 | console_lib.print_divider("Check for error logs in the instance logs") 59 | assert host.run('echo "" > /var/log/secure') 60 | 61 | for attempt in range(3): 62 | try: 63 | host.backend.run_local(f'ssh -o BatchMode=yes {self.instance_address}') 64 | invalid = host.run('cat /var/log/secure | grep "invalid user"').stdout 65 | assert "invalid" in invalid, ('no logs of ssh connection failure exist') 66 | except AssertionError as e: 67 | print(f"AssertionError: {e}") 68 | time.sleep(15) 69 | 70 | console_lib.print_divider("Check for error logs in aws cli logs") 71 | log_output = self.check_aws_cli_logs(host, self.instance_region).stdout 72 | assert re.search(r"invalid\s+user", log_output), "Expected 'invalid user' not found in logs" 73 | 74 | helpers.check_avc_denials(host) 75 | -------------------------------------------------------------------------------- /test_suite/package/test_awscli2.py: -------------------------------------------------------------------------------- 1 | import json 2 | import pytest 3 | 4 | from lib import test_lib 5 | from packaging import version 6 | 7 | 8 | @pytest.mark.package 9 | @pytest.mark.run_on(['>=rhel9.5']) 10 | class TestsAwsCli2: 11 | @pytest.fixture(scope='module', autouse=True) 12 | def import_aws_credentials(self, host): 13 | token_duration = 900 # This is the minimum accepted value in seconds 14 | # Generate temporary credentials for this test. 15 | civ_local_command_to_run = f'aws sts get-session-token --duration-seconds {token_duration} --output json' 16 | 17 | result = host.backend.run_local(civ_local_command_to_run) 18 | assert result.succeeded, \ 19 | f'Failed to obtain temporary AWS credentials. Error: {result.stderr}' 20 | 21 | temporary_creds_json = json.loads(result.stdout)['Credentials'] 22 | 23 | temporary_auth_config = { 24 | 'aws_access_key_id': temporary_creds_json['AccessKeyId'], 25 | 'aws_secret_access_key': temporary_creds_json['SecretAccessKey'], 26 | 'aws_session_token': temporary_creds_json['SessionToken'], 27 | } 28 | # Export env vars from the local command output, from the host 29 | for key, value in temporary_auth_config.items(): 30 | result = host.run(f'aws configure set {key} {value}') 31 | assert result.succeeded, \ 32 | f'Could not configure temporary AWS credentials. Error: {result.stderr}' 33 | 34 | def test_awscli2_version(self, host): 35 | expected_version_rhel_9 = '2.15.31' 36 | expected_version_rhel_10 = '2.22.9' 37 | 38 | if version.parse(host.system_info.release).major == 10: 39 | expected_version = expected_version_rhel_10 40 | else: 41 | expected_version = expected_version_rhel_9 42 | 43 | result = test_lib.print_host_command_output(host, 44 | 'aws --version', 45 | capture_result=True, 46 | use_sudo=False) 47 | 48 | assert result.succeeded, 'Failed to get AWS version.' 49 | assert f'aws-cli/{expected_version}' in result.stdout, 'Unexpected aswcli2 version.' 50 | 51 | def test_awscli2_authentication(self, host): 52 | result = host.run('aws sts get-caller-identity') 53 | assert result.succeeded, \ 54 | f'Failed to get AWS identity. Error: {result.stderr}' 55 | 56 | identity_found = '"UserId":' in result.stdout and \ 57 | '"Account":' in result.stdout and \ 58 | '"Arn":' in result.stdout 59 | 60 | assert identity_found, 'Unexpected identity output.' 61 | print('Authentication successful!') 62 | 63 | def test_awscli2_basic_query(self, host, instance_data): 64 | """ 65 | Verify information about the instance where this test is being executed from 66 | """ 67 | region = instance_data['availability_zone'][:-1] 68 | 69 | # Run a query to get the instance IDs of all running instances 70 | command_to_run = (f'aws ec2 describe-instances ' 71 | f'--region {region} ' 72 | f'--query "Reservations[].Instances[*].InstanceId"') 73 | 74 | result = test_lib.print_host_command_output(host, 75 | command_to_run, 76 | capture_result=True, 77 | use_sudo=False) 78 | assert result.succeeded, f'Failed to query AWS instances. Error: {result.stderr}' 79 | 80 | # Search for our own instance ID in the output for a sanity check 81 | instance_id = instance_data['instance_id'] 82 | assert instance_id in result.stdout, \ 83 | f'Expected Instance ID {instance_id} not found in AWS query output.' 84 | -------------------------------------------------------------------------------- /test_suite/package/test_efs_utils.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | import pytest 4 | 5 | from lib import test_lib, console_lib 6 | 7 | 8 | @pytest.mark.package 9 | @pytest.mark.run_on(['rhel9.4']) 10 | class TestsEfsUtils: 11 | mount_point = '/tmp/efs' 12 | 13 | def test_efs_utils(self, host, instance_data): 14 | """ 15 | NOTE: This test case assumes efs-utils is preinstalled in the AMI image 16 | 17 | Check basic functionality of EFS utils: 18 | - Create a mount point and mount the EFS file system to the RHEL instance. 19 | - Create a file on the mount point. 20 | - Automate mount on boot. 21 | - Checksum the file before reboot. 22 | - reboot and checksum that the file is correct. 23 | """ 24 | # We get 'file_system_dns_name' as instance metadata. See instance_data fixture in conftest.py 25 | file_system_dns_name = instance_data['efs_file_system_dns_name'] 26 | 27 | # Adding current timestamp to prevent I/O issues 28 | test_file = f'{self.mount_point}/testfile_{time.time()}' 29 | 30 | result = host.run(f'mkdir {self.mount_point}') 31 | with host.sudo(): 32 | assert result.succeeded, f'Could not create mount point directory. {result.stderr}' 33 | console_lib.print_divider(f'Mount point {self.mount_point} created.', 34 | upper=False, center_text=False) 35 | 36 | result = host.run(f'mount -t efs {file_system_dns_name} {self.mount_point}') 37 | assert result.succeeded, \ 38 | f'Failed to mount {file_system_dns_name} into {self.mount_point}. {result.stderr}' 39 | assert host.mount_point(self.mount_point).exists 40 | console_lib.print_divider(f'EFS file system {file_system_dns_name} successfully mounted.', 41 | upper=False, center_text=False) 42 | 43 | result = host.run(f'dd if=/dev/zero of={test_file} bs=3K count=1') 44 | assert result.succeeded, f'Failed to write the test file. {result.stderr}' 45 | console_lib.print_divider(host.check_output(f'ls -l {test_file}'), 46 | upper=False, center_text=False) 47 | console_lib.print_divider(f'Test file {test_file} successfully written.', 48 | upper=False, center_text=False) 49 | 50 | write_fstab_cmd = f'echo "{file_system_dns_name}:/ {self.mount_point} efs _netdev,tls 0 0" >> "/etc/fstab"' 51 | 52 | result = host.run(write_fstab_cmd) 53 | assert result.succeeded, f'Failed to update /etc/fstab. {result.stderr}' 54 | console_lib.print_divider('/etc/fstab updated successfully.', upper=False, center_text=False) 55 | 56 | checksum_before_reboot = host.file(test_file).md5sum 57 | 58 | test_lib.reboot_host(host) 59 | 60 | assert checksum_before_reboot == host.file(test_file).md5sum 61 | console_lib.print_divider(f'{test_file} checksum is the expected one!', center_text=False) 62 | -------------------------------------------------------------------------------- /test_suite/rhel_devel/ctc/test_ctc.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from lib import console_lib 3 | from test_suite.rhel_devel import run_cloudx_components_testing 4 | 5 | """ 6 | CTC (Comprehensive Tests Cycle) refers to the RHEL testing phase 7 | were we thoroughly test our components on a specified RHEL version. 8 | There is also CTC2 which is another round in a later stage of SDLC. 9 | """ 10 | 11 | 12 | @pytest.mark.ctc 13 | @pytest.mark.run_on(['all']) 14 | class TestsComprehensiveTestsCycle: 15 | def test_components(self, host): 16 | console_lib.print_divider('Testing CloudX-owned components...') 17 | assert run_cloudx_components_testing.main() 18 | -------------------------------------------------------------------------------- /test_suite/rhel_devel/cut/test_cut.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from packaging import version 3 | from lib import test_lib, console_lib 4 | from test_suite.generic.test_generic import TestsSubscriptionManager as sub_man 5 | from test_suite.rhel_devel import run_cloudx_components_testing 6 | 7 | """ 8 | CUT (Components Upgrade Testing) refers to the RHEL testing phase 9 | were we test if our components are upgradable across major versions. 10 | Example: After upgrading from RHEL-9.6 to RHEL-10.0, make sure components work. 11 | """ 12 | 13 | 14 | @pytest.mark.cut 15 | class TestsComponentsUpgrade: 16 | @pytest.mark.run_on(['rhel9.6', 'rhel10.0']) 17 | def test_cut_rhel_90_to_rhel_100(self, host, instance_data): 18 | console_lib.print_divider('Testing components BEFORE major upgrade...') 19 | assert run_cloudx_components_testing.main() 20 | 21 | console_lib.print_divider('Registering system with subscription-manager...') 22 | sub_man_config = { 23 | "rhsmcertd.auto_registration": 1, 24 | "rhsm.manage_repos": 1, 25 | } 26 | for item, value in sub_man_config.items(): 27 | with host.sudo(): 28 | host.run_test(f'subscription-manager config --{item}={value}') 29 | 30 | sub_man.test_subscription_manager_auto(self, host, instance_data) 31 | 32 | console_lib.print_divider('Installing leapp package...') 33 | result = test_lib.print_host_command_output(host, 'dnf install leapp-upgrade-el9toel10 -y', capture_result=True) 34 | 35 | assert result.succeeded, 'Failed to install leapp-upgrade-el9toel10' 36 | 37 | # We will use the latest compose by defualt. 38 | # This can be manually changed in a CIV pull request for debugging purposes. 39 | compose_url = "http://download.devel.redhat.com/rhel-10/nightly/RHEL-10/latest-RHEL-10.0" 40 | 41 | basearch = host.system_info.arch 42 | 43 | console_lib.print_divider('Adding RHEL-10 repos...') 44 | repo_file_name = '/etc/yum.repos.d/rhel10.repo' 45 | rhel_10_repo_file = f""" 46 | [AppStream10] 47 | name=AppStream for RHEL-10 48 | baseurl={compose_url}/compose/AppStream/{basearch}/os/ 49 | enabled=0 50 | gpgcheck=0 51 | 52 | [BaseOS10] 53 | name=BaseOS for RHEL-10 54 | baseurl={compose_url}/compose/BaseOS/{basearch}/os/ 55 | enabled=0 56 | gpgcheck=0 57 | """ 58 | test_lib.print_host_command_output(host, f'echo "{rhel_10_repo_file}" > {repo_file_name}') 59 | 60 | console_lib.print_divider('Running leapp upgrade...') 61 | result = test_lib.print_host_command_output( 62 | host, 63 | 'leapp upgrade --no-rhsm --enablerepo AppStream10 --enablerepo BaseOS10', 64 | capture_result=True) 65 | 66 | if result.failed: 67 | reapp_report_file = '/var/log/leapp/leapp-report.txt' 68 | if host.file(reapp_report_file).exists: 69 | print('Leapp Report:\n', host.file(reapp_report_file).content_string) 70 | 71 | pytest.fail('RHEL major upgrade failed. Please check leapp-report.txt for more details.') 72 | 73 | console_lib.print_divider('Rebooting host...') 74 | # 15 minutes of timeout due to performing a major upgrade 75 | host = test_lib.reboot_host(host, max_timeout=900) 76 | 77 | assert version.parse(host.system_info.release).major == 10, \ 78 | 'Failed to upgrade from RHEL-9.6 to RHEL-10.0 even after reboot.' 79 | 80 | console_lib.print_divider('Testing components AFTER major upgrade...') 81 | assert run_cloudx_components_testing.main() 82 | -------------------------------------------------------------------------------- /test_suite/rhel_devel/run_cloudx_components_testing.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | from test_suite.suite_runner import SuiteRunner 4 | 5 | 6 | def main(): 7 | cloudx_components_test_suites = [ 8 | "test_suite/package/test_awscli2.py", 9 | "test_suite/package/otel_package/test_otel.py", 10 | ] 11 | 12 | # For this test, we asssume one instance has been deployed at a time. 13 | with open(os.environ['CIV_INSTANCES_JSON'], 'r') as f: 14 | inst = json.load(f) 15 | 16 | suite_runner = SuiteRunner(cloud_provider='aws', 17 | instances=inst, 18 | ssh_config=os.environ['CIV_SSH_CONFIG_FILE'], 19 | parallel=False, 20 | debug=True) 21 | 22 | status = suite_runner.run_tests(test_suite_paths=cloudx_components_test_suites, 23 | output_filepath=os.environ['CIV_OUTPUT_FILE']) 24 | 25 | return_code = status >> 8 26 | 27 | if return_code == 0: 28 | return True 29 | else: 30 | print("One or more components failed.") 31 | return False 32 | 33 | 34 | if __name__ == '__main__': 35 | main() 36 | -------------------------------------------------------------------------------- /test_suite/suite_runner.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | class SuiteRunner: 5 | max_processes = 162 # This is the maximum amount of images we have successfully tested in parallel (AWS) 6 | 7 | # Rerun failed tests in case ssh times out or connection is refused by host 8 | rerun_failing_tests_regex = '|'.join([ 9 | 'socket.timeout', 10 | 'refused', 11 | 'ConnectionResetError', 12 | 'TimeoutError', 13 | 'SSHException', 14 | 'NoValidConnectionsError', 15 | 'Error while installing Development tools group' 16 | ]) 17 | 18 | connection_backend = 'paramiko' 19 | max_reruns = 3 20 | rerun_delay_sec = 5 21 | 22 | def __init__(self, 23 | cloud_provider, 24 | instances: dict, 25 | ssh_config: str, 26 | parallel=True, 27 | debug=False): 28 | self.cloud_provider = cloud_provider 29 | self.instances = instances 30 | self.ssh_config = ssh_config 31 | self.parallel = parallel 32 | self.debug = debug 33 | 34 | def run_tests(self, 35 | test_suite_paths, 36 | output_filepath, 37 | test_filter=None, 38 | include_markers=None): 39 | if os.path.exists(output_filepath): 40 | os.remove(output_filepath) 41 | 42 | pytest_composed_command = self.compose_pytest_command(test_suite_paths, 43 | output_filepath, 44 | test_filter, 45 | include_markers) 46 | 47 | if self.debug: 48 | print('Composed pytest command:') 49 | print(pytest_composed_command) 50 | 51 | return os.system(pytest_composed_command) 52 | 53 | def compose_pytest_command(self, 54 | test_suite_paths, 55 | output_filepath, 56 | test_filter=None, 57 | include_markers=None): 58 | all_hosts = self.get_all_instances_hosts_with_users() 59 | 60 | if not test_suite_paths: 61 | test_suite_paths = self.get_default_test_suite_paths() 62 | 63 | command_with_args = [ 64 | 'pytest', 65 | ' '.join(test_suite_paths), 66 | f'--hosts={all_hosts}', 67 | f'--connection={self.connection_backend}', 68 | f'--ssh-config {self.ssh_config}', 69 | f'--junit-xml {output_filepath}', 70 | f'--html {output_filepath.replace("xml", "html")}', 71 | '--self-contained-html', 72 | f'--json-report --json-report-file={output_filepath.replace("xml", "json")}' 73 | ] 74 | 75 | if test_filter: 76 | command_with_args.append(f'-k "{test_filter}"') 77 | 78 | if include_markers: 79 | command_with_args.append(f'-m "{include_markers}"') 80 | 81 | if self.parallel: 82 | command_with_args.append(f'--numprocesses={len(self.instances)}') 83 | command_with_args.append(f'--maxprocesses={self.max_processes}') 84 | 85 | command_with_args.append(f'--only-rerun="{self.rerun_failing_tests_regex}"') 86 | command_with_args.append(f'--reruns {self.max_reruns}') 87 | command_with_args.append(f'--reruns-delay {self.rerun_delay_sec}') 88 | 89 | if self.debug: 90 | command_with_args.append('-v') 91 | 92 | return ' '.join(command_with_args) 93 | 94 | def get_default_test_suite_paths(self): 95 | """ 96 | :return: A list of test suite file paths that will be used in case there are no test suites passed as argument 97 | """ 98 | test_suites_to_run = ['generic/test_generic.py'] 99 | 100 | if self.cloud_provider == 'aws': 101 | test_suites_to_run.append('cloud/test_aws.py') 102 | elif self.cloud_provider == 'azure': 103 | test_suites_to_run.append('cloud/test_azure.py') 104 | 105 | return [os.path.join(os.path.dirname(__file__), p) for p in test_suites_to_run] 106 | 107 | def get_all_instances_hosts_with_users(self): 108 | """ 109 | :return: A string with comma-separated items in the form of '@,@' 110 | """ 111 | return ','.join(['{0}@{1}'.format(inst['username'], inst['address']) for inst in self.instances.values()]) 112 | --------------------------------------------------------------------------------