├── .ansible-lint ├── .github ├── dependabot.yml ├── linters │ ├── .gitleaks.toml │ └── .markdown-lint.yml └── workflows │ ├── ansible-lint.yml │ ├── linter.yml │ ├── superlinter.yml │ └── update-metadata.yml ├── .gitignore ├── .gitleaks.toml ├── Changes.md ├── LICENSE ├── Makefile ├── README.md ├── ansible.cfg ├── ansible ├── ansible.cfg ├── ansible_get_credentials.yml ├── check_kubevirt_worker.yml ├── deploy_kubevirt_worker.yml ├── odf_clean_pvcs.yml └── odf_fix_dataimportcrons.yml ├── common ├── .ansible-lint ├── .github │ ├── dependabot.yml │ ├── linters │ │ ├── .gitleaks.toml │ │ └── .markdown-lint.yml │ └── workflows │ │ ├── pattern-sh-ci.yml │ │ └── superlinter.yml ├── .gitignore ├── .gitleaks.toml ├── Changes.md ├── LICENSE ├── Makefile ├── README.md ├── requirements.yml └── scripts │ ├── deploy-pattern.sh │ ├── determine-main-clustergroup.sh │ ├── determine-pattern-name.sh │ ├── determine-secretstore-backend.sh │ ├── display-secrets-info.sh │ ├── load-k8s-secrets.sh │ ├── make-common-subtree.sh │ ├── manage-secret-app.sh │ ├── manage-secret-namespace.sh │ ├── pattern-util.sh │ ├── preview-all.sh │ ├── preview.sh │ ├── process-secrets.sh │ ├── set-secret-backend.sh │ ├── vault-utils.sh │ └── write-token-kubeconfig.sh ├── diagrams └── aeg-architecture.drawio ├── overrides ├── values-aap-config-aeg.yaml ├── values-egv-vms.yaml └── values-odf-chart.yaml ├── pattern-metadata.yaml ├── pattern.sh ├── scripts ├── ansible_get_credentials.sh ├── ansible_load_controller.sh ├── check_kubevirt_worker.sh ├── deploy_kubevirt_worker.sh ├── get_image_urls.sh └── update-tests.sh ├── tests └── interop │ ├── README.md │ ├── __init__.py │ ├── conftest.py │ ├── create_ci_badge.py │ ├── requirements.txt │ ├── run_tests.sh │ ├── test_check_vm_status.py │ ├── test_subscription_status_hub.py │ └── test_validate_hub_site_components.py ├── values-global.yaml ├── values-hub.yaml └── values-secret.yaml.template /.ansible-lint: -------------------------------------------------------------------------------- 1 | # Vim filetype=yaml 2 | --- 3 | offline: false 4 | 5 | exclude_paths: 6 | - .cache/ 7 | - .github/ 8 | - charts/ 9 | - common/ 10 | - tests/ 11 | 12 | warn_list: 13 | - yaml 14 | - schema 15 | - experimental 16 | - risky-file-permissions 17 | - var-spacing 18 | - name[casing] 19 | 20 | skip_list: 21 | - var-naming[no-role-prefix] 22 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: 2 3 | updates: 4 | # Check for updates to GitHub Actions every week 5 | - package-ecosystem: "github-actions" 6 | directory: "/" 7 | schedule: 8 | interval: "weekly" 9 | 10 | -------------------------------------------------------------------------------- /.github/linters/.gitleaks.toml: -------------------------------------------------------------------------------- 1 | [whitelist] 2 | # As of v4, gitleaks only matches against filename, not path in the 3 | # files directive. Leaving content for backwards compatibility. 4 | files = [ 5 | "tests/*.yaml", 6 | ] 7 | -------------------------------------------------------------------------------- /.github/linters/.markdown-lint.yml: -------------------------------------------------------------------------------- 1 | { 2 | "default": true, 3 | "MD003": false, 4 | "MD013": false, 5 | "MD033": false 6 | } -------------------------------------------------------------------------------- /.github/workflows/ansible-lint.yml: -------------------------------------------------------------------------------- 1 | name: Ansible Lint # feel free to pick your own name 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | build: 7 | runs-on: ubuntu-latest 8 | 9 | steps: 10 | # Important: This sets up your GITHUB_WORKSPACE environment variable 11 | - uses: actions/checkout@v4 12 | 13 | - name: Lint Ansible Playbook 14 | uses: ansible/ansible-lint-action@v6 15 | # Let's point it to the path 16 | with: 17 | path: "ansible/" 18 | -------------------------------------------------------------------------------- /.github/workflows/linter.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Unit tests 3 | 4 | # 5 | # Documentation: 6 | # https://help.github.com/en/articles/workflow-syntax-for-github-actions 7 | # 8 | 9 | ############################# 10 | # Start the job on all push # 11 | ############################# 12 | on: [push, pull_request] 13 | 14 | ############### 15 | # Set the Job # 16 | ############### 17 | jobs: 18 | build: 19 | # Name the Job 20 | name: Unit Test Code Base 21 | # Set the agent to run on 22 | runs-on: ubuntu-latest 23 | 24 | ################## 25 | # Load all steps # 26 | ################## 27 | steps: 28 | ########################## 29 | # Checkout the code base # 30 | ########################## 31 | - name: Checkout Code 32 | uses: actions/checkout@v4 33 | with: 34 | # Full git history is needed to get a proper list of changed files within `super-linter` 35 | fetch-depth: 0 36 | - name: Setup helm 37 | uses: azure/setup-helm@v4 38 | with: 39 | version: 'v3.13.2' 40 | id: install 41 | 42 | ################################ 43 | # Run Linter against code base # 44 | ################################ 45 | # - name: Lint Code Base 46 | # uses: github/super-linter@v4 47 | # env: 48 | # VALIDATE_ALL_CODEBASE: false 49 | # DEFAULT_BRANCH: main 50 | # GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 51 | - name: Run make test 52 | run: | 53 | make test 54 | -------------------------------------------------------------------------------- /.github/workflows/superlinter.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Super linter 3 | 4 | on: [push, pull_request] 5 | 6 | jobs: 7 | build: 8 | # Name the Job 9 | name: Super linter 10 | # Set the agent to run on 11 | runs-on: ubuntu-latest 12 | 13 | steps: 14 | - name: Checkout Code 15 | uses: actions/checkout@v4 16 | with: 17 | # Full git history is needed to get a proper list of changed files within `super-linter` 18 | fetch-depth: 0 19 | 20 | ################################ 21 | # Run Linter against code base # 22 | ################################ 23 | - name: Lint Code Base 24 | uses: super-linter/super-linter/slim@v7 25 | env: 26 | VALIDATE_ALL_CODEBASE: true 27 | DEFAULT_BRANCH: main 28 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 29 | # These are the validation we disable atm 30 | VALIDATE_ANSIBLE: false 31 | VALIDATE_BASH: false 32 | VALIDATE_CHECKOV: false 33 | VALIDATE_JSCPD: false 34 | VALIDATE_JSON_PRETTIER: false 35 | VALIDATE_MARKDOWN_PRETTIER: false 36 | VALIDATE_KUBERNETES_KUBECONFORM: false 37 | VALIDATE_PYTHON_PYLINT: false 38 | VALIDATE_SHELL_SHFMT: false 39 | VALIDATE_YAML: false 40 | VALIDATE_YAML_PRETTIER: false 41 | # VALIDATE_DOCKERFILE_HADOLINT: false 42 | # VALIDATE_MARKDOWN: false 43 | # VALIDATE_NATURAL_LANGUAGE: false 44 | # VALIDATE_TEKTON: false 45 | -------------------------------------------------------------------------------- /.github/workflows/update-metadata.yml: -------------------------------------------------------------------------------- 1 | # This job requires a secret called DOCS_TOKEN which should be a PAT token 2 | # that has the permissions described in: 3 | # validatedpatterns/docs/.github/workflows/metadata-docs.yml@main 4 | --- 5 | name: Update docs pattern metadata 6 | 7 | on: 8 | push: 9 | paths: 10 | - "pattern-metadata.yaml" 11 | - ".github/workflows/update-metadata.yml" 12 | 13 | jobs: 14 | update-metadata: 15 | uses: validatedpatterns/docs/.github/workflows/metadata-docs.yml@main 16 | permissions: # Workflow-level permissions 17 | contents: read # Required for "read-all" 18 | packages: write # Allows writing to packages 19 | id-token: write # Allows creating OpenID Connect (OIDC) tokens 20 | secrets: inherit 21 | # For testing you can point to a different branch in the docs repository 22 | # with: 23 | # DOCS_BRANCH: "main" 24 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | *.swp 3 | *.swo 4 | values-secret* 5 | .*.expected.yaml 6 | pattern-vault.init 7 | vault.init 8 | /ansible/execution_environment/context/_build/* 9 | pattern-vault.init.bak 10 | super-linter.log 11 | .vscode 12 | -------------------------------------------------------------------------------- /.gitleaks.toml: -------------------------------------------------------------------------------- 1 | title = "gitleaks config" 2 | 3 | # Gitleaks rules are defined by regular expressions and entropy ranges. 4 | # Some secrets have unique signatures which make detecting those secrets easy. 5 | # Examples of those secrets would be Gitlab Personal Access Tokens, AWS keys, and Github Access Tokens. 6 | # All these examples have defined prefixes like `glpat`, `AKIA`, `ghp_`, etc. 7 | # 8 | # Other secrets might just be a hash which means we need to write more complex rules to verify 9 | # that what we are matching is a secret. 10 | # 11 | # Here is an example of a semi-generic secret 12 | # 13 | # discord_client_secret = "8dyfuiRyq=vVc3RRr_edRk-fK__JItpZ" 14 | # 15 | # We can write a regular expression to capture the variable name (identifier), 16 | # the assignment symbol (like '=' or ':='), and finally the actual secret. 17 | # The structure of a rule to match this example secret is below: 18 | # 19 | # Beginning string 20 | # quotation 21 | # │ End string quotation 22 | # │ │ 23 | # ▼ ▼ 24 | # (?i)(discord[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9=_\-]{32})['\"] 25 | # 26 | # ▲ ▲ ▲ 27 | # │ │ │ 28 | # │ │ │ 29 | # identifier assignment symbol 30 | # Secret 31 | # 32 | 33 | [[rules]] 34 | id = "gitlab_personal_access_token" 35 | description = "GitLab Personal Access Token" 36 | regex = '''glpat-[0-9a-zA-Z_\-]{20}''' 37 | tags = ["gitlab", "revocation_type"] 38 | 39 | [[rules]] 40 | id = "gitlab_runner_registration_token" 41 | description = "GitLab Runner Registration Token" 42 | regex = '''GR1348941[0-9a-zA-Z_\-]{20}''' 43 | tags = ["gitlab"] 44 | 45 | [[rules]] 46 | id = "AWS" 47 | description = "AWS Access Token" 48 | regex = '''AKIA[0-9A-Z]{16}''' 49 | tags = ["aws", "revocation_type"] 50 | 51 | # Cryptographic keys 52 | [[rules]] 53 | id = "PKCS8 private key" 54 | description = "PKCS8 private key" 55 | regex = '''-----BEGIN PRIVATE KEY-----''' 56 | 57 | [[rules]] 58 | id = "RSA private key" 59 | description = "RSA private key" 60 | regex = '''-----BEGIN RSA PRIVATE KEY-----''' 61 | 62 | [[rules]] 63 | id = "SSH private key" 64 | description = "SSH private key" 65 | regex = '''-----BEGIN OPENSSH PRIVATE KEY-----''' 66 | 67 | [[rules]] 68 | id = "PGP private key" 69 | description = "PGP private key" 70 | regex = '''-----BEGIN PGP PRIVATE KEY BLOCK-----''' 71 | 72 | [[rules]] 73 | id = "Github Personal Access Token" 74 | description = "Github Personal Access Token" 75 | regex = '''ghp_[0-9a-zA-Z]{36}''' 76 | 77 | [[rules]] 78 | id = "Github OAuth Access Token" 79 | description = "Github OAuth Access Token" 80 | regex = '''gho_[0-9a-zA-Z]{36}''' 81 | 82 | [[rules]] 83 | id = "SSH (DSA) private key" 84 | description = "SSH (DSA) private key" 85 | regex = '''-----BEGIN DSA PRIVATE KEY-----''' 86 | 87 | [[rules]] 88 | id = "SSH (EC) private key" 89 | description = "SSH (EC) private key" 90 | regex = '''-----BEGIN EC PRIVATE KEY-----''' 91 | 92 | 93 | [[rules]] 94 | id = "Github App Token" 95 | description = "Github App Token" 96 | regex = '''(ghu|ghs)_[0-9a-zA-Z]{36}''' 97 | 98 | [[rules]] 99 | id = "Github Refresh Token" 100 | description = "Github Refresh Token" 101 | regex = '''ghr_[0-9a-zA-Z]{76}''' 102 | 103 | [[rules]] 104 | id = "Shopify shared secret" 105 | description = "Shopify shared secret" 106 | regex = '''shpss_[a-fA-F0-9]{32}''' 107 | 108 | [[rules]] 109 | id = "Shopify access token" 110 | description = "Shopify access token" 111 | regex = '''shpat_[a-fA-F0-9]{32}''' 112 | 113 | [[rules]] 114 | id = "Shopify custom app access token" 115 | description = "Shopify custom app access token" 116 | regex = '''shpca_[a-fA-F0-9]{32}''' 117 | 118 | [[rules]] 119 | id = "Shopify private app access token" 120 | description = "Shopify private app access token" 121 | regex = '''shppa_[a-fA-F0-9]{32}''' 122 | 123 | [[rules]] 124 | id = "Slack token" 125 | description = "Slack token" 126 | regex = '''xox[baprs]-([0-9a-zA-Z]{10,48})?''' 127 | 128 | [[rules]] 129 | id = "Stripe" 130 | description = "Stripe" 131 | regex = '''(?i)(sk|pk)_(test|live)_[0-9a-z]{10,32}''' 132 | 133 | [[rules]] 134 | id = "PyPI upload token" 135 | description = "PyPI upload token" 136 | regex = '''pypi-AgEIcHlwaS5vcmc[A-Za-z0-9-_]{50,1000}''' 137 | tags = ["pypi", "revocation_type"] 138 | 139 | [[rules]] 140 | id = "Google (GCP) Service-account" 141 | description = "Google (GCP) Service-account" 142 | regex = '''\"type\": \"service_account\"''' 143 | 144 | [[rules]] 145 | # demo of this regex not matching passwords in urls that contain env vars: 146 | # https://regex101.com/r/rT9Lv9/6 147 | id = "Password in URL" 148 | description = "Password in URL" 149 | regex = '''[a-zA-Z]{3,10}:\/\/[^$][^:@\/\n]{3,20}:[^$][^:@\n\/]{3,40}@.{1,100}''' 150 | 151 | 152 | [[rules]] 153 | id = "Heroku API Key" 154 | description = "Heroku API Key" 155 | regex = ''' (?i)(heroku[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([0-9A-F]{8}-[0-9A-F]{4}-[0-9A-F]{4}-[0-9A-F]{4}-[0-9A-F]{12})['\"]''' 156 | secretGroup = 3 157 | 158 | [[rules]] 159 | id = "Slack Webhook" 160 | description = "Slack Webhook" 161 | regex = '''https://hooks.slack.com/services/T[a-zA-Z0-9_]{8}/B[a-zA-Z0-9_]{8}/[a-zA-Z0-9_]{24}''' 162 | 163 | [[rules]] 164 | id = "Twilio API Key" 165 | description = "Twilio API Key" 166 | regex = '''SK[0-9a-fA-F]{32}''' 167 | 168 | [[rules]] 169 | id = "Age secret key" 170 | description = "Age secret key" 171 | regex = '''AGE-SECRET-KEY-1[QPZRY9X8GF2TVDW0S3JN54KHCE6MUA7L]{58}''' 172 | 173 | [[rules]] 174 | id = "Facebook token" 175 | description = "Facebook token" 176 | regex = '''(?i)(facebook[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-f0-9]{32})['\"]''' 177 | secretGroup = 3 178 | 179 | [[rules]] 180 | id = "Twitter token" 181 | description = "Twitter token" 182 | regex = '''(?i)(twitter[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-f0-9]{35,44})['\"]''' 183 | secretGroup = 3 184 | 185 | [[rules]] 186 | id = "Adobe Client ID (Oauth Web)" 187 | description = "Adobe Client ID (Oauth Web)" 188 | regex = '''(?i)(adobe[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-f0-9]{32})['\"]''' 189 | secretGroup = 3 190 | 191 | [[rules]] 192 | id = "Adobe Client Secret" 193 | description = "Adobe Client Secret" 194 | regex = '''(p8e-)(?i)[a-z0-9]{32}''' 195 | 196 | [[rules]] 197 | id = "Alibaba AccessKey ID" 198 | description = "Alibaba AccessKey ID" 199 | regex = '''(LTAI)(?i)[a-z0-9]{20}''' 200 | 201 | [[rules]] 202 | id = "Alibaba Secret Key" 203 | description = "Alibaba Secret Key" 204 | regex = '''(?i)(alibaba[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9]{30})['\"]''' 205 | secretGroup = 3 206 | 207 | [[rules]] 208 | id = "Asana Client ID" 209 | description = "Asana Client ID" 210 | regex = '''(?i)(asana[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([0-9]{16})['\"]''' 211 | secretGroup = 3 212 | 213 | [[rules]] 214 | id = "Asana Client Secret" 215 | description = "Asana Client Secret" 216 | regex = '''(?i)(asana[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9]{32})['\"]''' 217 | secretGroup = 3 218 | 219 | [[rules]] 220 | id = "Atlassian API token" 221 | description = "Atlassian API token" 222 | regex = '''(?i)(atlassian[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9]{24})['\"]''' 223 | secretGroup = 3 224 | 225 | [[rules]] 226 | id = "Bitbucket client ID" 227 | description = "Bitbucket client ID" 228 | regex = '''(?i)(bitbucket[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9]{32})['\"]''' 229 | secretGroup = 3 230 | 231 | [[rules]] 232 | id = "Bitbucket client secret" 233 | description = "Bitbucket client secret" 234 | regex = '''(?i)(bitbucket[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9_\-]{64})['\"]''' 235 | secretGroup = 3 236 | 237 | [[rules]] 238 | id = "Beamer API token" 239 | description = "Beamer API token" 240 | regex = '''(?i)(beamer[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"](b_[a-z0-9=_\-]{44})['\"]''' 241 | secretGroup = 3 242 | 243 | [[rules]] 244 | id = "Clojars API token" 245 | description = "Clojars API token" 246 | regex = '''(CLOJARS_)(?i)[a-z0-9]{60}''' 247 | 248 | [[rules]] 249 | id = "Contentful delivery API token" 250 | description = "Contentful delivery API token" 251 | regex = '''(?i)(contentful[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9\-=_]{43})['\"]''' 252 | secretGroup = 3 253 | 254 | [[rules]] 255 | id = "Contentful preview API token" 256 | description = "Contentful preview API token" 257 | regex = '''(?i)(contentful[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9\-=_]{43})['\"]''' 258 | secretGroup = 3 259 | 260 | [[rules]] 261 | id = "Databricks API token" 262 | description = "Databricks API token" 263 | regex = '''dapi[a-h0-9]{32}''' 264 | 265 | [[rules]] 266 | id = "Discord API key" 267 | description = "Discord API key" 268 | regex = '''(?i)(discord[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-h0-9]{64})['\"]''' 269 | secretGroup = 3 270 | 271 | [[rules]] 272 | id = "Discord client ID" 273 | description = "Discord client ID" 274 | regex = '''(?i)(discord[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([0-9]{18})['\"]''' 275 | secretGroup = 3 276 | 277 | [[rules]] 278 | id = "Discord client secret" 279 | description = "Discord client secret" 280 | regex = '''(?i)(discord[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9=_\-]{32})['\"]''' 281 | secretGroup = 3 282 | 283 | [[rules]] 284 | id = "Doppler API token" 285 | description = "Doppler API token" 286 | regex = '''['\"](dp\.pt\.)(?i)[a-z0-9]{43}['\"]''' 287 | 288 | [[rules]] 289 | id = "Dropbox API secret/key" 290 | description = "Dropbox API secret/key" 291 | regex = '''(?i)(dropbox[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9]{15})['\"]''' 292 | 293 | [[rules]] 294 | id = "Dropbox short lived API token" 295 | description = "Dropbox short lived API token" 296 | regex = '''(?i)(dropbox[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"](sl\.[a-z0-9\-=_]{135})['\"]''' 297 | 298 | [[rules]] 299 | id = "Dropbox long lived API token" 300 | description = "Dropbox long lived API token" 301 | regex = '''(?i)(dropbox[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"][a-z0-9]{11}(AAAAAAAAAA)[a-z0-9\-_=]{43}['\"]''' 302 | 303 | [[rules]] 304 | id = "Duffel API token" 305 | description = "Duffel API token" 306 | regex = '''['\"]duffel_(test|live)_(?i)[a-z0-9_-]{43}['\"]''' 307 | 308 | [[rules]] 309 | id = "Dynatrace API token" 310 | description = "Dynatrace API token" 311 | regex = '''['\"]dt0c01\.(?i)[a-z0-9]{24}\.[a-z0-9]{64}['\"]''' 312 | 313 | [[rules]] 314 | id = "EasyPost API token" 315 | description = "EasyPost API token" 316 | regex = '''['\"]EZAK(?i)[a-z0-9]{54}['\"]''' 317 | 318 | [[rules]] 319 | id = "EasyPost test API token" 320 | description = "EasyPost test API token" 321 | regex = '''['\"]EZTK(?i)[a-z0-9]{54}['\"]''' 322 | 323 | [[rules]] 324 | id = "Fastly API token" 325 | description = "Fastly API token" 326 | regex = '''(?i)(fastly[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9\-=_]{32})['\"]''' 327 | secretGroup = 3 328 | 329 | [[rules]] 330 | id = "Finicity client secret" 331 | description = "Finicity client secret" 332 | regex = '''(?i)(finicity[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9]{20})['\"]''' 333 | secretGroup = 3 334 | 335 | [[rules]] 336 | id = "Finicity API token" 337 | description = "Finicity API token" 338 | regex = '''(?i)(finicity[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-f0-9]{32})['\"]''' 339 | secretGroup = 3 340 | 341 | [[rules]] 342 | id = "Flutterweave public key" 343 | description = "Flutterweave public key" 344 | regex = '''FLWPUBK_TEST-(?i)[a-h0-9]{32}-X''' 345 | 346 | [[rules]] 347 | id = "Flutterweave secret key" 348 | description = "Flutterweave secret key" 349 | regex = '''FLWSECK_TEST-(?i)[a-h0-9]{32}-X''' 350 | 351 | [[rules]] 352 | id = "Flutterweave encrypted key" 353 | description = "Flutterweave encrypted key" 354 | regex = '''FLWSECK_TEST[a-h0-9]{12}''' 355 | 356 | [[rules]] 357 | id = "Frame.io API token" 358 | description = "Frame.io API token" 359 | regex = '''fio-u-(?i)[a-z0-9-_=]{64}''' 360 | 361 | [[rules]] 362 | id = "GoCardless API token" 363 | description = "GoCardless API token" 364 | regex = '''['\"]live_(?i)[a-z0-9-_=]{40}['\"]''' 365 | 366 | [[rules]] 367 | id = "Grafana API token" 368 | description = "Grafana API token" 369 | regex = '''['\"]eyJrIjoi(?i)[a-z0-9-_=]{72,92}['\"]''' 370 | 371 | [[rules]] 372 | id = "Hashicorp Terraform user/org API token" 373 | description = "Hashicorp Terraform user/org API token" 374 | regex = '''['\"](?i)[a-z0-9]{14}\.atlasv1\.[a-z0-9-_=]{60,70}['\"]''' 375 | 376 | [[rules]] 377 | id = "Hashicorp Vault batch token" 378 | description = "Hashicorp Vault batch token" 379 | regex = '''b\.AAAAAQ[0-9a-zA-Z_-]{156}''' 380 | 381 | [[rules]] 382 | id = "Hubspot API token" 383 | description = "Hubspot API token" 384 | regex = '''(?i)(hubspot[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-h0-9]{8}-[a-h0-9]{4}-[a-h0-9]{4}-[a-h0-9]{4}-[a-h0-9]{12})['\"]''' 385 | secretGroup = 3 386 | 387 | [[rules]] 388 | id = "Intercom API token" 389 | description = "Intercom API token" 390 | regex = '''(?i)(intercom[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9=_]{60})['\"]''' 391 | secretGroup = 3 392 | 393 | [[rules]] 394 | id = "Intercom client secret/ID" 395 | description = "Intercom client secret/ID" 396 | regex = '''(?i)(intercom[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-h0-9]{8}-[a-h0-9]{4}-[a-h0-9]{4}-[a-h0-9]{4}-[a-h0-9]{12})['\"]''' 397 | secretGroup = 3 398 | 399 | [[rules]] 400 | id = "Ionic API token" 401 | description = "Ionic API token" 402 | regex = '''ion_(?i)[a-z0-9]{42}''' 403 | 404 | [[rules]] 405 | id = "Linear API token" 406 | description = "Linear API token" 407 | regex = '''lin_api_(?i)[a-z0-9]{40}''' 408 | 409 | [[rules]] 410 | id = "Linear client secret/ID" 411 | description = "Linear client secret/ID" 412 | regex = '''(?i)(linear[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-f0-9]{32})['\"]''' 413 | secretGroup = 3 414 | 415 | [[rules]] 416 | id = "Lob API Key" 417 | description = "Lob API Key" 418 | regex = '''(?i)(lob[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]((live|test)_[a-f0-9]{35})['\"]''' 419 | secretGroup = 3 420 | 421 | [[rules]] 422 | id = "Lob Publishable API Key" 423 | description = "Lob Publishable API Key" 424 | regex = '''(?i)(lob[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]((test|live)_pub_[a-f0-9]{31})['\"]''' 425 | secretGroup = 3 426 | 427 | [[rules]] 428 | id = "Mailchimp API key" 429 | description = "Mailchimp API key" 430 | regex = '''(?i)(mailchimp[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-f0-9]{32}-us20)['\"]''' 431 | secretGroup = 3 432 | 433 | [[rules]] 434 | id = "Mailgun private API token" 435 | description = "Mailgun private API token" 436 | regex = '''(?i)(mailgun[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"](key-[a-f0-9]{32})['\"]''' 437 | secretGroup = 3 438 | 439 | [[rules]] 440 | id = "Mailgun public validation key" 441 | description = "Mailgun public validation key" 442 | regex = '''(?i)(mailgun[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"](pubkey-[a-f0-9]{32})['\"]''' 443 | secretGroup = 3 444 | 445 | [[rules]] 446 | id = "Mailgun webhook signing key" 447 | description = "Mailgun webhook signing key" 448 | regex = '''(?i)(mailgun[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-h0-9]{32}-[a-h0-9]{8}-[a-h0-9]{8})['\"]''' 449 | secretGroup = 3 450 | 451 | [[rules]] 452 | id = "Mapbox API token" 453 | description = "Mapbox API token" 454 | regex = '''(?i)(pk\.[a-z0-9]{60}\.[a-z0-9]{22})''' 455 | 456 | [[rules]] 457 | id = "messagebird-api-token" 458 | description = "MessageBird API token" 459 | regex = '''(?i)(messagebird[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9]{25})['\"]''' 460 | secretGroup = 3 461 | 462 | [[rules]] 463 | id = "MessageBird API client ID" 464 | description = "MessageBird API client ID" 465 | regex = '''(?i)(messagebird[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-h0-9]{8}-[a-h0-9]{4}-[a-h0-9]{4}-[a-h0-9]{4}-[a-h0-9]{12})['\"]''' 466 | secretGroup = 3 467 | 468 | [[rules]] 469 | id = "New Relic user API Key" 470 | description = "New Relic user API Key" 471 | regex = '''['\"](NRAK-[A-Z0-9]{27})['\"]''' 472 | 473 | [[rules]] 474 | id = "New Relic user API ID" 475 | description = "New Relic user API ID" 476 | regex = '''(?i)(newrelic[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([A-Z0-9]{64})['\"]''' 477 | secretGroup = 3 478 | 479 | [[rules]] 480 | id = "New Relic ingest browser API token" 481 | description = "New Relic ingest browser API token" 482 | regex = '''['\"](NRJS-[a-f0-9]{19})['\"]''' 483 | 484 | [[rules]] 485 | id = "npm access token" 486 | description = "npm access token" 487 | regex = '''['\"](npm_(?i)[a-z0-9]{36})['\"]''' 488 | 489 | [[rules]] 490 | id = "Planetscale password" 491 | description = "Planetscale password" 492 | regex = '''pscale_pw_(?i)[a-z0-9\-_\.]{43}''' 493 | 494 | [[rules]] 495 | id = "Planetscale API token" 496 | description = "Planetscale API token" 497 | regex = '''pscale_tkn_(?i)[a-z0-9\-_\.]{43}''' 498 | 499 | [[rules]] 500 | id = "Postman API token" 501 | description = "Postman API token" 502 | regex = '''PMAK-(?i)[a-f0-9]{24}\-[a-f0-9]{34}''' 503 | 504 | [[rules]] 505 | id = "Pulumi API token" 506 | description = "Pulumi API token" 507 | regex = '''pul-[a-f0-9]{40}''' 508 | 509 | [[rules]] 510 | id = "Rubygem API token" 511 | description = "Rubygem API token" 512 | regex = '''rubygems_[a-f0-9]{48}''' 513 | 514 | [[rules]] 515 | id = "Sendgrid API token" 516 | description = "Sendgrid API token" 517 | regex = '''SG\.(?i)[a-z0-9_\-\.]{66}''' 518 | 519 | [[rules]] 520 | id = "Sendinblue API token" 521 | description = "Sendinblue API token" 522 | regex = '''xkeysib-[a-f0-9]{64}\-(?i)[a-z0-9]{16}''' 523 | 524 | [[rules]] 525 | id = "Shippo API token" 526 | description = "Shippo API token" 527 | regex = '''shippo_(live|test)_[a-f0-9]{40}''' 528 | 529 | [[rules]] 530 | id = "Linkedin Client secret" 531 | description = "Linkedin Client secret" 532 | regex = '''(?i)(linkedin[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z]{16})['\"]''' 533 | secretGroup = 3 534 | 535 | [[rules]] 536 | id = "Linkedin Client ID" 537 | description = "Linkedin Client ID" 538 | regex = '''(?i)(linkedin[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9]{14})['\"]''' 539 | secretGroup = 3 540 | 541 | [[rules]] 542 | id = "Twitch API token" 543 | description = "Twitch API token" 544 | regex = '''(?i)(twitch[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9]{30})['\"]''' 545 | secretGroup = 3 546 | 547 | [[rules]] 548 | id = "Typeform API token" 549 | description = "Typeform API token" 550 | regex = '''(?i)(typeform[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}(tfp_[a-z0-9\-_\.=]{59})''' 551 | secretGroup = 3 552 | 553 | [[rules]] 554 | id = "Social Security Number" 555 | description = "Social Security Number" 556 | regex = '''\d{3}-\d{2}-\d{4}''' 557 | 558 | [[rules]] 559 | description = "Yandex.Cloud IAM Cookie v1" 560 | regex = '''\bc1\.[A-Z0-9a-z_-]+[=]{0,2}\.[A-Z0-9a-z_-]{86}[=]{0,2}['|\"|\n|\r|\s|\x60]''' 561 | 562 | [[rules]] 563 | description = "Yandex.Cloud IAM Token v1" 564 | regex = '''\bt1\.[A-Z0-9a-z_-]+[=]{0,2}\.[A-Z0-9a-z_-]{86}[=]{0,2}['|\"|\n|\r|\s|\x60]''' 565 | 566 | [[rules]] 567 | description = "Yandex.Cloud IAM API key v1" 568 | regex = '''\bAQVN[A-Za-z0-9_\-]{35,38}['|\"|\n|\r|\s|\x60]''' 569 | 570 | [[rules]] 571 | description = "Yandex.Cloud AWS API compatible Access Secret" 572 | regex = '''\bYC[a-zA-Z0-9_\-]{38}['|\"|\n|\r|\s|\x60]''' 573 | 574 | [allowlist] 575 | description = "global allow lists" 576 | regexes = ['''219-09-9999''', '''078-05-1120''', '''(9[0-9]{2}|666)-\d{2}-\d{4}''', '''6toh-n1d5-9xpq''' ] 577 | paths = [ 578 | '''gitleaks.toml''', 579 | '''.gitleaks.toml''', 580 | '''test_vault_load_secrets.py''', 581 | '''(.*?)(jpg|gif|doc|pdf|bin|svg|socket|md|adoc)$''', 582 | ] 583 | -------------------------------------------------------------------------------- /Changes.md: -------------------------------------------------------------------------------- 1 | # Change history for significant pattern releases 2 | 3 | ## Changes for v1.1 (October 28, 2022) 4 | 5 | * ODF improvements: Kiosk VMs now explicitly request ceph storage from ODF by in order to be live-migratable, in situations with multiple virt-capable workers. Storage types can be customized per-VM using the `storageClassName`, `volumeMode`, and `accessMode` attributes of a specific VM type, or by setting the `defaultStorageClass`, `defultAccessMode`, and `defaultAccessMode` values in the chart. The settings are `coalesce`'d in the chart template so you can mix and match as desired. 6 | 7 | * edge-gitops-vms wait: edge-gitops-vms now waits until there is at least one metal node "ready" before creating the VM-related resources. This prevents the application from being marked as in "error" state due to potential repeated failures due to the kubevirt.io API not being available. This can be turned off by using the `.Values.waitForMetalNode` toggle in the `edge-gitops-vms` chart. 8 | 9 | * More comprehensive use of (Hashicorp) Vault secrets: All secrets are now stored in Vault and published in the `aap-config` application so that playbooks can retrieve them and provide them to AAP. 10 | 11 | * More declarative configuration: AAP configuration now runs as part of the imperative framework, so that changes to the `ansible_configure_controller.yml` playbook will be applied on future runs of the imperative job. 12 | 13 | * Fix Out-of-Sync conditions: Fixed cosmetic issues in both the ODF (OpenShift Data Foundations) and CNV (OpenShift Virtualization/Container Native Virtualization) spaces that would make those applications show as out-of-sync. These issues caused our internal CI to fail and we judged it better to fix those issues than to "live" with the out-of-syncs. 14 | 15 | ## Changes for v1.2 (February 9, 2023) 16 | 17 | * Kiosk_mode improvements: kiosk_mode role now has a variable `kiosk_port` which influences the kiosk-mode script and controls which port firefox connects to. (Previously this was hardcoded to port 8088; the var defaults to 8088 so existing setups will continue to work. This will make it easier to tailor or customize the pattern to work with containers other than Ignition. 18 | 19 | * cloud-init changes: move the cloud-init configuration file, user, and password to secrets from edge-gitops-vms values. This was a regrettable oversight in v1.0 and v1.1. 20 | 21 | * Common updates: Update common to upstream hybrid-cloud-patterns/common main branch. 22 | 23 | * Secrets update: Documented secrets-template is now compliant with the version 2.0 secrets mechanism from hybrid-cloud-patterns/common. Secrets following the older unversioned format will still work. 24 | 25 | ## Changes for v1.2 (April 27, 2023) 26 | 27 | * No "visible" changes so not updating the branch pointer 28 | 29 | * Updated ansible code to follow best practices and silent many linting warnings 30 | 31 | * Updated edge-gitops-vms chart to add SkipDryRunOnMissingResource annotations to prevent errors occuring due to race conditions with OpenShift Virtualization 32 | 33 | * Updated wait-for-metal-nodes machinery to also skip RBAC creation since the only reason for it in e-g-v is for the job, which should only be needed when provisioning a separate metal node as is needed by AWS 34 | 35 | * Updated common to refresh vault and external-secrets and pick up default features for gitops-1.8 36 | 37 | ## Changes for v1.3 (October 27, 2023) 38 | 39 | * Introduce Portworx Enterprise as an alternative resilient storage solution for the VMs 40 | * Update common for feature/functionality upgrades 41 | * Update default metal node type from c5n.metal to m5.metal to better accommodate different AWS Zones 42 | * Remove support for 4.10 (since it is out of support) 43 | * Update platform level override using new templated valuefile name feature in common 44 | * Skip multicloud gateway (noobaa) installation in ODF by default 45 | 46 | ## Changes for v1.4 (July 29, 2024) 47 | 48 | * Introduce clean-golden-images job to imperative. This is a workaround for a bug in CNV 4.15/ODF 4.15 where if the default StorageClass is not the same as the default virtualization storage class, CNV cannot properly provision datavolumes. 49 | * Default storageclass for edge-gitops-vms to "ocs-storagecluster-ceph-rbd-virtualization", available since ODF 4.14. 50 | * Use api_version for Route queries when discovering credentials for AAP instance. 51 | * Update common. 52 | * Update deploy_kubevirt_worker.yml Ansible playbook to copy securityGroups and blockDevices config from first machineSet. Tag naming schemes changed from OCP 4.15 to 4.16; this method ensures forward and backward compatibility. 53 | * Remove ODF overrides from OCP 4.12/3 that force storageClass to gp2; all released versions should use gp3-csi now. 54 | * Include overrides for OCP 4.12 and OCP 4.13 to use the older `ocs-storagecluster-ceph-rbd` storageClass. 55 | 56 | ## Changes for v2.0 (March 5, 2025) 57 | 58 | * Split HMI Demo Project out to separate [repository](https://github.com/validatedpatterns-demos/rhvp.ansible_edge_hmi) 59 | * Split HMI Config out to separate [repository](https://github.com/validatedpatterns-demos/ansible-edge-gitops-hmi-config-as-code.git) 60 | * Drop the custom execution environment because AAP can resolve these dependencies itself 61 | * Switch to modular common 62 | * Use the Validated Patterns ODF Chart (dropping our custom version) 63 | * Drop portworx chart and Makefile targets, as the only OCP version that supports is 4.12, which is now past 64 | the end of its maintenance support lifecycle. 65 | * Refactor installation mechannism to use standard configuration-as-code approach, which will make it easier to drop 66 | in a new config-as-code repository. 67 | * Move VM definitions outside of edge-gitops-vms chart so that derived patterns do not inherit the HMI kiosks. Kiosk 68 | VMs now defined by default in overrides. 69 | * Use Validated Patterns charts for Installing Ansible Automation Platform, OpenShift Virtualization, and 70 | edge-gitops-vms. 71 | * Switch to AAP-2.5 support as provided by Validated Patterns aap-config chart. 72 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | CHART_OPTS=-f values-secret.yaml.template -f values-global.yaml -f values-hub.yaml --set global.targetRevision=main --set global.valuesDirectoryURL="https://github.com/hybrid-cloud-patterns/ansible-edge-gitops/raw/main/" --set global.pattern="$(NAME)" --set global.namespace="$(NAME)" --set global.hubClusterDomain=example.com --set global.localClusterDomain=local.example.com 2 | 3 | CHARTS=$(shell find . -type f -iname 'Chart.yaml' -exec dirname "{}" \; | grep -v examples | sed -e 's/.\///') 4 | 5 | .PHONY: default 6 | default: help 7 | 8 | help: 9 | @make -f common/Makefile MAKEFILE_LIST="Makefile common/Makefile" help 10 | 11 | %: 12 | make -f common/Makefile $* 13 | 14 | install upgrade deploy: operator-deploy post-install ## Install or upgrade the pattern via the operator 15 | echo "Installed/Upgraded" 16 | 17 | post-install: ## Post-install tasks - load-secrets 18 | make load-secrets 19 | echo "Post-deploy complete" 20 | 21 | deploy-kubevirt-worker: ## Deploy the metal node worker (from workstation). This is normally done in-cluster 22 | ./scripts/deploy_kubevirt_worker.sh 23 | 24 | configure-controller: ## Configure AAP operator (from workstation). This is normally done in-cluster 25 | ansible-playbook ./scripts/ansible_load_controller.sh -e "aeg_project_repo=$(TARGET_REPO) aeg_project_branch=$(TARGET_BRANCH)" 26 | 27 | test: ## Run tests 28 | @set -e; for i in $(CHARTS); do echo "$${i}"; helm template "$${i}"; done 29 | echo Tests SUCCESSFUL 30 | 31 | update-tests: ## Update test results 32 | ./scripts/update-tests.sh $(CHART_OPTS) 33 | 34 | .phony: install test 35 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Ansible Edge Gitops 2 | 3 | [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) 4 | [![AWS](https://img.shields.io/endpoint?url=https%3A%2F%2Fstorage.googleapis.com%2Fhcp-results%2Faegitops-aws-ci.json)](https://storage.googleapis.com/hcp-results/aegitops-aws-ci.json) 5 | 6 | ## Start Here 7 | 8 | If you've followed a link to this repository, but are not really sure what it contains 9 | or how to use it, head over to [Ansible Edge GitOps](https://validatedpatterns.io/patterns/ansible-edge-gitops/) 10 | for additional context and installation instructions 11 | -------------------------------------------------------------------------------- /ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | display_skipped_hosts=False 3 | localhost_warning=False 4 | retry_files_enabled=False 5 | library=~/.ansible/plugins/modules:./ansible/plugins/modules:./common/ansible/plugins/modules:/usr/share/ansible/plugins/modules 6 | roles_path=~/.ansible/roles:./ansible/roles:./common/ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles 7 | -------------------------------------------------------------------------------- /ansible/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | display_skipped_hosts=False 3 | localhost_warning=False 4 | roles_path=./roles:~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles 5 | -------------------------------------------------------------------------------- /ansible/ansible_get_credentials.yml: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ansible-playbook 2 | --- 3 | - name: Retrieve Credentials for AAP on OpenShift 4 | become: false 5 | connection: local 6 | hosts: localhost 7 | gather_facts: false 8 | vars: 9 | kubeconfig: "{{ lookup('env', 'KUBECONFIG') }}" 10 | tasks: 11 | - name: Retrieve API hostname for AAP 12 | kubernetes.core.k8s_info: 13 | api_version: route.openshift.io/v1 14 | kind: Route 15 | namespace: ansible-automation-platform 16 | name: aap 17 | register: aap_host 18 | until: aap_host.resources | length == 1 19 | retries: 20 20 | delay: 5 21 | 22 | - name: Set ansible_host 23 | ansible.builtin.set_fact: 24 | ansible_host: "{{ aap_host.resources[0].spec.host }}" 25 | 26 | - name: Retrieve admin password for AAP 27 | kubernetes.core.k8s_info: 28 | kind: Secret 29 | namespace: ansible-automation-platform 30 | name: aap-admin-password 31 | register: admin_pw 32 | until: admin_pw.resources | length == 1 33 | retries: 20 34 | delay: 5 35 | 36 | - name: Set admin_password fact 37 | ansible.builtin.set_fact: 38 | admin_password: "{{ admin_pw.resources[0].data.password | b64decode }}" 39 | 40 | - name: Report AAP Endpoint 41 | ansible.builtin.debug: 42 | msg: "AAP Endpoint: https://{{ ansible_host }}" 43 | 44 | - name: Report AAP User 45 | ansible.builtin.debug: 46 | msg: "AAP Admin User: admin" 47 | 48 | - name: Report AAP Admin Password 49 | ansible.builtin.debug: 50 | msg: "AAP Admin Password: {{ admin_password }}" 51 | -------------------------------------------------------------------------------- /ansible/check_kubevirt_worker.yml: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ansible-playbook 2 | --- 3 | - name: Install manifest on AAP controller 4 | become: false 5 | connection: local 6 | hosts: localhost 7 | gather_facts: false 8 | vars: 9 | kubeconfig: "{{ lookup('env', 'KUBECONFIG') }}" 10 | tasks: 11 | - name: Fetch infrastructure values 12 | kubernetes.core.k8s_info: 13 | kind: Infrastructure 14 | namespace: "" 15 | name: cluster 16 | register: infra_values 17 | 18 | # Do platform specific set facts here 19 | 20 | - name: Check for metal machinesets 21 | kubernetes.core.k8s_info: 22 | api: machine.openshift.io/v1beta1 23 | kind: MachineSet 24 | namespace: openshift-machine-api 25 | register: metal_machinesets 26 | 27 | - name: Display metal machinesets 28 | ansible.builtin.debug: 29 | msg: "{{ metal_machinesets }}" 30 | 31 | - name: End play early if found 32 | ansible.builtin.meta: end_play 33 | when: metal_machinesets.resources | length > 0 34 | 35 | - name: End play 36 | ansible.builtin.meta: end_play 37 | 38 | - name: Display infrastructure values 39 | ansible.builtin.debug: 40 | msg: "{{ infra_values }}" 41 | 42 | - name: Fetch machinesets 43 | kubernetes.core.k8s_info: 44 | api: machine.openshift.io/v1beta1 45 | kind: MachineSet 46 | namespace: openshift-machine-api 47 | register: machine_sets 48 | 49 | - name: Display machineset values 50 | ansible.builtin.debug: 51 | msg: "{{ machine_sets.resources[0] }}" 52 | -------------------------------------------------------------------------------- /ansible/deploy_kubevirt_worker.yml: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ansible-playbook 2 | --- 3 | - name: Install a metal worker 4 | become: false 5 | connection: local 6 | hosts: localhost 7 | gather_facts: false 8 | vars: 9 | kubeconfig: "{{ lookup('env', 'KUBECONFIG') }}" 10 | machineset_instance_type: m5.metal 11 | machineset_machine_role: worker 12 | machineset_machine_type: worker 13 | machineset_name: metal-worker 14 | machineset_node_labels: 15 | node-role.kubernetes.io/worker: "" 16 | machineset_replicas: 1 17 | machineset_user_data_secret: worker-user-data 18 | machineset_user_data_namespace: openshift-machine-api 19 | tasks: 20 | - name: Query Cluster Infrastructure Name 21 | kubernetes.core.k8s_info: 22 | api_version: config.openshift.io/v1 23 | kind: Infrastructure 24 | name: cluster 25 | register: cluster_info 26 | 27 | - name: Assert Platform is AWS 28 | ansible.builtin.assert: 29 | fail_msg: Platform for OpenShift cluster must be AWS! 30 | that: 31 | - cluster_info.resources[0].status.platform == "AWS" 32 | 33 | - name: Query MachineSets 34 | kubernetes.core.k8s_info: 35 | api_version: machine.openshift.io/v1beta1 36 | kind: MachineSet 37 | namespace: openshift-machine-api 38 | register: cluster_machinesets 39 | 40 | - name: Set Dynamic MachineSet Facts 41 | ansible.builtin.set_fact: 42 | machineset_ami_id: "{{ cluster_machinesets.resources[0].spec.template.spec.providerSpec.value.ami.id }}" 43 | machineset_subnet: "{{ cluster_machinesets.resources[0].spec.template.spec.providerSpec.value.subnet.filters[0]['values'][0] }}" 44 | machineset_tags: "{{ cluster_machinesets.resources[0].spec.template.spec.providerSpec.value.tags }}" 45 | machineset_blockdevices: "{{ cluster_machinesets.resources[0].spec.template.spec.providerSpec.value.blockDevices }}" 46 | machineset_securitygroups: "{{ cluster_machinesets.resources[0].spec.template.spec.providerSpec.value.securityGroups }}" 47 | machineset_zone: "{{ cluster_machinesets.resources[0].spec.template.spec.providerSpec.value.placement.availabilityZone }}" 48 | infrastructure_name: "{{ cluster_info.resources[0].status.infrastructureName }}" 49 | infrastructure_region: "{{ cluster_info.resources[0].status.platformStatus.aws.region }}" 50 | 51 | - name: Define template for creating machineset 52 | ansible.builtin.set_fact: 53 | machineset_yaml: | 54 | apiVersion: machine.openshift.io/v1beta1 55 | kind: MachineSet 56 | metadata: 57 | labels: 58 | machine.openshift.io/cluster-api-cluster: "{{ infrastructure_name }}" 59 | edge-gitops-role: kubevirt-worker 60 | name: "{{ infrastructure_name }}-{{ machineset_name }}-{{ machineset_zone }}" 61 | namespace: openshift-machine-api 62 | spec: 63 | replicas: {{ machineset_replicas | int }} 64 | selector: 65 | matchLabels: 66 | machine.openshift.io/cluster-api-cluster: "{{ infrastructure_name }}" 67 | machine.openshift.io/cluster-api-machineset: "{{ infrastructure_name }}-{{ machineset_name }}-{{ machineset_zone }}" 68 | template: 69 | metadata: 70 | labels: 71 | machine.openshift.io/cluster-api-cluster: "{{ infrastructure_name }}" 72 | machine.openshift.io/cluster-api-machine-role: "{{ machineset_machine_role }}" 73 | machine.openshift.io/cluster-api-machine-type: "{{ machineset_machine_type }}" 74 | machine.openshift.io/cluster-api-machineset: "{{ infrastructure_name }}-{{ machineset_name }}-{{ machineset_zone }}" 75 | {% if machineset_os is defined %} 76 | machine.openshift.io/os-id: {{ machineset_os }} 77 | {% endif %} 78 | spec: 79 | metadata: 80 | labels: {{ machineset_node_labels }} 81 | providerSpec: 82 | value: 83 | ami: 84 | id: "{{ machineset_ami_id }}" 85 | apiVersion: awsproviderconfig.openshift.io/v1beta1 86 | blockDevices: {{ machineset_blockdevices }} 87 | credentialsSecret: 88 | name: aws-cloud-credentials 89 | deviceIndex: 0 90 | iamInstanceProfile: 91 | id: "{{ infrastructure_name }}-worker-profile" 92 | instanceType: "{{ machineset_instance_type }}" 93 | kind: AWSMachineProviderConfig 94 | placement: 95 | availabilityZone: "{{ machineset_zone }}" 96 | region: "{{ infrastructure_region }}" 97 | securityGroups: {{ machineset_securitygroups }} 98 | subnet: 99 | filters: 100 | - name: tag:Name 101 | values: 102 | - "{{ machineset_subnet }}" 103 | tags: {{ machineset_tags }} 104 | userDataSecret: 105 | name: "{{ machineset_user_data_secret }}" 106 | namespace: "{{ machineset_user_data_namespace }}" 107 | 108 | - name: Create MachineSet 109 | kubernetes.core.k8s: 110 | definition: "{{ machineset_yaml | from_yaml }}" 111 | state: present 112 | -------------------------------------------------------------------------------- /ansible/odf_clean_pvcs.yml: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ansible-playbook 2 | --- 3 | - name: Determine if we have PVC clean-up to do 4 | become: false 5 | connection: local 6 | hosts: localhost 7 | gather_facts: false 8 | vars: 9 | kubeconfig: "{{ lookup('env', 'KUBECONFIG') }}" 10 | pvc_cleanup: false 11 | image_cleanup_namespace: "openshift-virtualization-os-images" 12 | dv_namespace: edge-gitops-vms 13 | dv_remove_timeout: 1800 14 | dv_remove_status: ["Pending"] 15 | ts_fmt: '%Y-%m-%dT%H:%M:%SZ' 16 | tasks: 17 | - name: Find default storageclass 18 | ansible.builtin.shell: | 19 | set -o pipefail 20 | oc get storageclass -o json | jq -r '.items[] | select(.metadata.annotations."storageclass.kubernetes.io/is-default-class")' 21 | register: default_sc_output 22 | changed_when: false 23 | 24 | - name: Find virtualization default storageclass 25 | ansible.builtin.shell: | 26 | set -o pipefail 27 | oc get storageclass -o json | jq -r '.items[] | select(.metadata.annotations."storageclass.kubevirt.io/is-default-virt-class")' 28 | register: default_virt_sc_output 29 | changed_when: false 30 | 31 | - name: Compare default virtualization storageclass and default storageclass to determine whether to clean PVCs 32 | block: 33 | - name: Parse results 34 | ansible.builtin.set_fact: 35 | default_sc: '{{ default_sc_output.stdout | from_json }}' 36 | default_virt_sc: '{{ default_virt_sc_output.stdout | from_json }}' 37 | 38 | - name: Commit to PVC cleanup 39 | ansible.builtin.set_fact: 40 | pvc_cleanup: true 41 | when: 42 | - default_virt_sc.metadata.name == "ocs-storagecluster-ceph-rbd-virtualization" 43 | - default_sc.metadata.name != default_virt_sc.metadata.name 44 | rescue: 45 | - name: Note that we exited 46 | ansible.builtin.debug: 47 | msg: "Caught an error before we could determine to clean up PVCs, exiting" 48 | 49 | - name: Cleanup incorrect datasourceimport images (PVCs) 50 | when: 51 | - pvc_cleanup 52 | block: 53 | - name: Find PVCs 54 | kubernetes.core.k8s_info: 55 | kind: pvc 56 | namespace: '{{ image_cleanup_namespace }}' 57 | register: pvc_cleanup_list 58 | 59 | - name: Remove stray datasource PVCs 60 | kubernetes.core.k8s: 61 | kind: pvc 62 | namespace: '{{ image_cleanup_namespace }}' 63 | name: '{{ item.metadata.name }}' 64 | state: absent 65 | loop: "{{ pvc_cleanup_list.resources }}" 66 | when: 67 | - item.spec.storageClassName != default_virt_sc.metadata.name 68 | 69 | - name: Check for stuck datavolumes 70 | kubernetes.core.k8s_info: 71 | namespace: '{{ dv_namespace }}' 72 | kind: DataVolume 73 | api_version: cdi.kubevirt.io/v1beta1 74 | register: vm_ds 75 | 76 | - name: Remove stuck datavolume if needed 77 | kubernetes.core.k8s: 78 | name: "{{ item.metadata.name }}" 79 | namespace: "{{ item.metadata.namespace }}" 80 | kind: "{{ item.kind }}" 81 | api_version: "{{ item.apiVersion }}" 82 | state: absent 83 | when: 84 | - item.status.phase in dv_remove_status 85 | - (now(utc=true) - (item.metadata.creationTimestamp|to_datetime(ts_fmt))).seconds >= dv_remove_timeout 86 | loop: '{{ vm_ds.resources }}' 87 | 88 | rescue: 89 | - name: Note that we exited 90 | ansible.builtin.debug: 91 | msg: "Caught an error while cleaning up PVCs, exiting" 92 | -------------------------------------------------------------------------------- /ansible/odf_fix_dataimportcrons.yml: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ansible-playbook 2 | --- 3 | # This workaround was implemented to fix a problem where openshift-cnv would 4 | # not recognize a default virt storage class change and change the format of 5 | # datasources. The issue was fixed in OpenShift Virtualization 4.16.4. 6 | - name: Determine if we have PVC clean-up to do 7 | become: false 8 | connection: local 9 | hosts: localhost 10 | gather_facts: false 11 | vars: 12 | kubeconfig: "{{ lookup('env', 'KUBECONFIG') }}" 13 | dataimportcron_cleanup: false 14 | image_cleanup_namespace: "openshift-virtualization-os-images" 15 | cluster_version: "{{ global['clusterVersion'] | default('UNSET') }}" 16 | tasks: 17 | - name: Check cluster version 18 | ansible.builtin.debug: 19 | var: cluster_version 20 | 21 | - name: Exit if normal version check is not right 22 | ansible.builtin.meta: end_play 23 | when: cluster_version not in [ '4.17', '4.16', 'UNSET' ] 24 | 25 | - name: Find default storageclass 26 | ansible.builtin.shell: | 27 | set -o pipefail 28 | oc get storageclass -o json | jq -r '.items[] | select(.metadata.annotations."storageclass.kubernetes.io/is-default-class")' 29 | register: default_sc_output 30 | changed_when: false 31 | 32 | - name: Find virtualization default storageclass 33 | ansible.builtin.shell: | 34 | set -o pipefail 35 | oc get storageclass -o json | jq -r '.items[] | select(.metadata.annotations."storageclass.kubevirt.io/is-default-virt-class")' 36 | register: default_virt_sc_output 37 | changed_when: false 38 | 39 | - name: Compare default virtualization storageclass and default storageclass to determine whether to clean PVCs 40 | block: 41 | - name: Parse results 42 | ansible.builtin.set_fact: 43 | default_sc: '{{ default_sc_output.stdout | from_json }}' 44 | default_virt_sc: '{{ default_virt_sc_output.stdout | from_json }}' 45 | 46 | - name: Commit to dataimportcron cleanup 47 | ansible.builtin.set_fact: 48 | dataimportcron_cleanup: true 49 | when: 50 | - default_virt_sc.metadata.name == "ocs-storagecluster-ceph-rbd-virtualization" 51 | - default_sc.metadata.name != default_virt_sc.metadata.name 52 | rescue: 53 | - name: Note that we exited 54 | ansible.builtin.debug: 55 | msg: "Caught an error before we could determine to clean up dataimportcrons, exiting" 56 | 57 | - name: End play (successfully) 58 | ansible.builtin.meta: end_play 59 | 60 | - name: Cleanup incorrect datasourceimport images (PVCs) 61 | when: 62 | - dataimportcron_cleanup 63 | block: 64 | - name: Find dataimportcrons 65 | kubernetes.core.k8s_info: 66 | kind: dataimportcron 67 | namespace: '{{ image_cleanup_namespace }}' 68 | api_version: cdi.kubevirt.io/v1beta1 69 | register: dic_list 70 | 71 | - name: Extract dic names 72 | ansible.builtin.set_fact: 73 | dic_names: "{{ dic_names | default([]) + [ item.metadata.name ] }}" 74 | loop: "{{ dic_list.resources }}" 75 | 76 | - name: Show names 77 | ansible.builtin.debug: 78 | var: dic_names 79 | 80 | - name: Find datasources to cleanup 81 | kubernetes.core.k8s_info: 82 | kind: datasource 83 | namespace: '{{ image_cleanup_namespace }}' 84 | api_version: cdi.kubevirt.io/v1beta1 85 | register: ds_cleanup_list 86 | 87 | - name: Keep track of objects to remove 88 | ansible.builtin.set_fact: 89 | cron_cleanups: [] 90 | ds_cleanups: [] 91 | 92 | - name: Record datasources that need cleanup 93 | ansible.builtin.set_fact: 94 | cron_cleanups: "{{ cron_cleanups + [ item.metadata.labels['cdi.kubevirt.io/dataImportCron'] ] }}" 95 | ds_cleanups: "{{ ds_cleanups + [ item.metadata.name ] }}" 96 | loop: "{{ ds_cleanup_list.resources }}" 97 | when: 98 | - item['metadata']['labels']['cdi.kubevirt.io/dataImportCron'] is defined 99 | - item['metadata']['labels']['cdi.kubevirt.io/dataImportCron'] in dic_names 100 | - item.status.conditions[0].message != "DataSource is ready to be consumed" 101 | 102 | - name: Check on removables 103 | ansible.builtin.debug: 104 | msg: 105 | - "cron_cleanups: {{ cron_cleanups }}" 106 | - "ds_cleanups: {{ ds_cleanups }}" 107 | 108 | - name: Delete datasources in cleanup list 109 | kubernetes.core.k8s: 110 | kind: datasource 111 | namespace: '{{ image_cleanup_namespace }}' 112 | api_version: cdi.kubevirt.io/v1beta1 113 | name: "{{ item }}" 114 | state: absent 115 | loop: "{{ ds_cleanups }}" 116 | 117 | - name: Delete datavolumes in cleanup list 118 | kubernetes.core.k8s: 119 | kind: datavolume 120 | namespace: '{{ image_cleanup_namespace }}' 121 | api_version: cdi.kubevirt.io/v1beta1 122 | label_selectors: 123 | - 'cdi.kubevirt.io/dataImportCron={{ item }}' 124 | state: absent 125 | loop: "{{ cron_cleanups }}" 126 | 127 | - name: Delete dataimportcrons in cleanup list 128 | kubernetes.core.k8s: 129 | kind: dataimportcron 130 | namespace: '{{ image_cleanup_namespace }}' 131 | api_version: cdi.kubevirt.io/v1beta1 132 | name: "{{ item }}" 133 | state: absent 134 | loop: "{{ cron_cleanups }}" 135 | rescue: 136 | - name: Note that we exited 137 | ansible.builtin.debug: 138 | msg: "Caught an error while cleaning up dataimportcrons, exiting" 139 | -------------------------------------------------------------------------------- /common/.ansible-lint: -------------------------------------------------------------------------------- 1 | # Vim filetype=yaml 2 | --- 3 | offline: false 4 | skip_list: 5 | - name[template] # Allow Jinja templating inside task and play names 6 | - template-instead-of-copy # Templated files should use template instead of copy 7 | - yaml[line-length] # too long lines 8 | - yaml[indentation] # Forcing lists to be always indented by 2 chars is silly IMO 9 | - var-naming[no-role-prefix] # This would be too much churn for very little gain 10 | - no-changed-when 11 | - var-naming[no-role-prefix] # There are too many changes now and it would be too risky 12 | 13 | # ansible-lint gh workflow cannot find ansible.cfg hence fails to import vault_utils role 14 | exclude_paths: 15 | - ./ansible/playbooks/vault/vault.yaml 16 | - ./ansible/playbooks/iib-ci/iib-ci.yaml 17 | - ./ansible/playbooks/k8s_secrets/k8s_secrets.yml 18 | - ./ansible/playbooks/process_secrets/process_secrets.yml 19 | - ./ansible/playbooks/write-token-kubeconfig/write-token-kubeconfig.yml 20 | - ./ansible/playbooks/process_secrets/display_secrets_info.yml 21 | - ./ansible/roles/vault_utils/tests/test.yml 22 | -------------------------------------------------------------------------------- /common/.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: 2 3 | updates: 4 | # Check for updates to GitHub Actions every week 5 | - package-ecosystem: "github-actions" 6 | directory: "/" 7 | schedule: 8 | interval: "weekly" 9 | 10 | -------------------------------------------------------------------------------- /common/.github/linters/.gitleaks.toml: -------------------------------------------------------------------------------- 1 | [whitelist] 2 | # As of v4, gitleaks only matches against filename, not path in the 3 | # files directive. Leaving content for backwards compatibility. 4 | files = [ ] 5 | -------------------------------------------------------------------------------- /common/.github/linters/.markdown-lint.yml: -------------------------------------------------------------------------------- 1 | { 2 | "default": true, 3 | "MD003": false, 4 | "MD013": false, 5 | "MD033": false 6 | } -------------------------------------------------------------------------------- /common/.github/workflows/pattern-sh-ci.yml: -------------------------------------------------------------------------------- 1 | name: Run Bash Script on Multiple Distributions 2 | 3 | on: 4 | push: 5 | paths: 6 | - "scripts/**" 7 | - "Makefile" 8 | branches: 9 | - main 10 | pull_request: 11 | paths: 12 | - "scripts/**" 13 | - "Makefile" 14 | 15 | jobs: 16 | run-script: 17 | name: Run Bash Script 18 | strategy: 19 | matrix: 20 | # Fedora is not an option yet 21 | os: [ubuntu-latest, ubuntu-22.04] 22 | runs-on: ${{ matrix.os }} 23 | 24 | steps: 25 | - name: Checkout Repository 26 | uses: actions/checkout@v4 27 | 28 | - name: Install Podman on Ubuntu 29 | if: contains(matrix.os, 'ubuntu') 30 | run: | 31 | sudo apt-get update 32 | sudo apt-get install -y podman 33 | 34 | # Currently we do not do MacOSX as it is not free, maybe in the future 35 | # - name: Install Podman on macOS 36 | # if: contains(matrix.os, 'macos') 37 | # run: | 38 | # brew install podman 39 | # podman machine init 40 | # podman machine start 41 | 42 | - name: Verify Podman Installation 43 | run: podman --version 44 | 45 | - name: Run pattern.sh script 46 | run: | 47 | export TARGET_BRANCH=main 48 | ./scripts/pattern-util.sh make validate-origin 49 | -------------------------------------------------------------------------------- /common/.github/workflows/superlinter.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Super linter 3 | 4 | on: [push, pull_request] 5 | 6 | jobs: 7 | build: 8 | # Name the Job 9 | name: Super linter 10 | # Set the agent to run on 11 | runs-on: ubuntu-latest 12 | 13 | steps: 14 | - name: Checkout Code 15 | uses: actions/checkout@v4 16 | with: 17 | # Full git history is needed to get a proper list of changed files within `super-linter` 18 | fetch-depth: 0 19 | 20 | ################################ 21 | # Run Linter against code base # 22 | ################################ 23 | - name: Lint Code Base 24 | uses: super-linter/super-linter/slim@v7 25 | env: 26 | VALIDATE_ALL_CODEBASE: true 27 | DEFAULT_BRANCH: main 28 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 29 | # These are the validation we disable atm 30 | VALIDATE_ANSIBLE: false 31 | VALIDATE_BASH: false 32 | VALIDATE_CHECKOV: false 33 | VALIDATE_JSCPD: false 34 | VALIDATE_JSON_PRETTIER: false 35 | VALIDATE_MARKDOWN_PRETTIER: false 36 | VALIDATE_KUBERNETES_KUBECONFORM: false 37 | VALIDATE_PYTHON_PYLINT: false 38 | VALIDATE_SHELL_SHFMT: false 39 | VALIDATE_YAML: false 40 | VALIDATE_YAML_PRETTIER: false 41 | # VALIDATE_DOCKERFILE_HADOLINT: false 42 | # VALIDATE_MARKDOWN: false 43 | # VALIDATE_NATURAL_LANGUAGE: false 44 | # VALIDATE_TEKTON: false 45 | -------------------------------------------------------------------------------- /common/.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__/ 2 | *.py[cod] 3 | *~ 4 | *.swp 5 | *.swo 6 | values-secret.yaml 7 | .*.expected.yaml 8 | .vscode 9 | pattern-vault.init 10 | pattern-vault.init.bak 11 | super-linter.log 12 | golang-external-secrets/Chart.lock 13 | hashicorp-vault/Chart.lock 14 | -------------------------------------------------------------------------------- /common/.gitleaks.toml: -------------------------------------------------------------------------------- 1 | .github/linters/.gitleaks.toml -------------------------------------------------------------------------------- /common/Changes.md: -------------------------------------------------------------------------------- 1 | # Changes 2 | 3 | ## Sep 24, 2024 4 | 5 | * Ansible has been moved out of the common code tree, you must use a clustergroup chart that is >= 0.9.1 6 | 7 | ## Sep 6, 2024 8 | 9 | * Most charts have been removed from the tree. To get the charts you now have to point to them 10 | 11 | ## Sep 25, 2023 12 | 13 | * Upgraded ESO to v0.9.5 14 | 15 | ## Aug 17, 2023 16 | 17 | * Introduced support for multisource applications via .chart + .chartVersion 18 | 19 | ## Jul 8, 2023 20 | 21 | * Introduced a default of 20 for sync failures retries in argo applications (global override via global.options.applicationRetryLimit 22 | and per-app override via .syncPolicy) 23 | 24 | ## May 22, 2023 25 | 26 | * Upgraded ESO to 0.8.2 27 | * *Important* we now use the newly blessed sso config for argo. This means that gitops < 1.8 are *unsupported* 28 | 29 | ## May 18, 2023 30 | 31 | * Introduce a EXTRA_HELM_OPTS env variable that will be passed to the helm invocations 32 | 33 | ## April 21, 2023 34 | 35 | * Added labels and annotation support to namespaces.yaml template 36 | 37 | ## Apr 11, 2023 38 | 39 | * Apply the ACM ocp-gitops-policy everywhere but the hub 40 | 41 | ## Apr 7, 2023 42 | 43 | * Moved to gitops-1.8 channel by default (stable is unmaintained and will be dropped starting with ocp-4.13) 44 | 45 | ## March 20, 2023 46 | 47 | * Upgraded ESO to 0.8.1 48 | 49 | ## February 9, 2023 50 | 51 | * Add support for /values-.yaml and for /values--.yaml 52 | 53 | ## January 29, 2023 54 | 55 | * Stop extracting the HUB's CA via an imperative job running on the imported cluster. 56 | Just use ACM to push the HUB's CA out to the managed clusters. 57 | 58 | ## January 23, 2023 59 | 60 | * Add initial support for running ESO on ACM-imported clusters 61 | 62 | ## January 18, 2023 63 | 64 | * Add validate-schema target 65 | 66 | ## January 13, 2023 67 | 68 | * Simplify the secrets paths when using argo hosted sites 69 | 70 | ## January 10, 2023 71 | 72 | * vaultPrefixes is now optional in the v2 secret spec and defaults to ["hub"] 73 | 74 | ## December 9, 2022 75 | 76 | * Dropped insecureUnsealVaultInsideCluster (and file_unseal) entirely. Now 77 | vault is always unsealed via a cronjob in the cluster. It is recommended to 78 | store the imperative/vaultkeys secret offline securely and then delete it. 79 | 80 | ## December 8, 2022 81 | 82 | * Removed the legacy installation targets: 83 | `deploy upgrade legacy-deploy legacy-upgrade` 84 | Patterns must now use the operator-based installation 85 | 86 | ## November 29, 2022 87 | 88 | * Upgraded vault-helm to 0.23.0 89 | * Enable vault-ssl by default 90 | 91 | ## November 22, 2022 92 | 93 | * Implemented a new format for the values-secret.yaml. Example can be found in examples/ folder 94 | * Now the order of values-secret file lookup is the following: 95 | 1. ~/values-secret-.yaml 96 | 2. ~/values-secret.yaml 97 | 3. /values-secret.yaml.template 98 | * Add support for ansible vault encrypted values-secret files. You can now encrypt your values-secret file 99 | at rest with `ansible-vault encrypt ~/values-secret.yaml`. When running `make load-secrets` if an encrypted 100 | file is encountered the user will be prompted automatically for the password to decrypt it. 101 | 102 | ## November 6, 2022 103 | 104 | * Add support for /values--.yaml (e.g. /values-AWS-group-one.yaml) 105 | 106 | ## October 28, 2022 107 | 108 | * Updated vault helm chart to v0.22.1 and vault containers to 1.12.0 109 | 110 | ## October 25, 2022 111 | 112 | * Updated External Secrets Operator to v0.6.0 113 | * Moved to -UBI based ESO containers 114 | 115 | ## October 13, 2022 116 | 117 | * Added global.clusterVersion as a new helm variable which represents the OCP 118 | Major.Minor cluster version. By default now a user can add a 119 | values--.yaml file to have specific cluster version 120 | overrides (e.g. values-4.10-hub.yaml). Will need Validated Patterns Operator >= 0.0.6 121 | when deploying with the operator. Note: When using the ArgoCD Hub and spoke model, 122 | you cannot have spokes with a different version of OCP than the hub. 123 | 124 | ## October 4, 2022 125 | 126 | * Extended the values-secret.yaml file to support multiple vault paths and re-wrote 127 | the push_secrets feature as python module plugin. This requires the following line 128 | in a pattern's ansible.cfg's '[defaults]' stanza: 129 | 130 | `library=~/.ansible/plugins/modules:./ansible/plugins/modules:./common/ansible/plugins/modules:/usr/share/ansible/plugins/modules` 131 | 132 | ## October 3, 2022 133 | 134 | * Restore the ability to install a non-default site: `make TARGET_SITE=mysite install` 135 | * Revised tests (new output and filenames, requires adding new result files to Git) 136 | * ACM 2.6 required for ACM-based managed sites 137 | * Introduced global.clusterDomain template variable (without the `apps.` prefix) 138 | * Removed the ability to send specific charts to another cluster, use hosted argo sites instead 139 | * Added the ability to have the hub host `values-{site}.yaml` for spoke clusters. 140 | 141 | The following example would deploy the namespaces, subscriptions, and 142 | applications defined in `values-group-one.yaml` to the `perth` cluster 143 | directly from ArgoCD on the hub. 144 | 145 | ```yaml 146 | managedClusterGroups: 147 | - name: group-one 148 | hostedArgoSites: 149 | - name: perth 150 | domain: perth1.beekhof.net 151 | bearerKeyPath: secret/data/hub/cluster_perth 152 | caKeyPath: secret/data/hub/cluster_perth_ca 153 | ``` 154 | -------------------------------------------------------------------------------- /common/LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /common/Makefile: -------------------------------------------------------------------------------- 1 | NAME ?= $(shell basename "`pwd`") 2 | 3 | ifneq ($(origin TARGET_SITE), undefined) 4 | TARGET_SITE_OPT=--set main.clusterGroupName=$(TARGET_SITE) 5 | endif 6 | 7 | # This variable can be set in order to pass additional helm arguments from the 8 | # the command line. I.e. we can set things without having to tweak values files 9 | EXTRA_HELM_OPTS ?= 10 | 11 | # This variable can be set in order to pass additional ansible-playbook arguments from the 12 | # the command line. I.e. we can set -vvv for more verbose logging 13 | EXTRA_PLAYBOOK_OPTS ?= 14 | 15 | # INDEX_IMAGES=registry-proxy.engineering.redhat.com/rh-osbs/iib:394248 16 | # or 17 | # INDEX_IMAGES=registry-proxy.engineering.redhat.com/rh-osbs/iib:394248,registry-proxy.engineering.redhat.com/rh-osbs/iib:394249 18 | INDEX_IMAGES ?= 19 | 20 | TARGET_ORIGIN ?= origin 21 | # This is to ensure that whether we start with a git@ or https:// URL, we end up with an https:// URL 22 | # This is because we expect to use tokens for repo authentication as opposed to SSH keys 23 | TARGET_REPO=$(shell git ls-remote --get-url --symref $(TARGET_ORIGIN) | sed -e 's/.*URL:[[:space:]]*//' -e 's%^git@%%' -e 's%^https://%%' -e 's%:%/%' -e 's%^%https://%') 24 | # git branch --show-current is also available as of git 2.22, but we will use this for compatibility 25 | TARGET_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD) 26 | 27 | UUID_FILE ?= ~/.config/validated-patterns/pattern-uuid 28 | UUID_HELM_OPTS ?= 29 | 30 | # --set values always take precedence over the contents of -f 31 | ifneq ("$(wildcard $(UUID_FILE))","") 32 | UUID := $(shell cat $(UUID_FILE)) 33 | UUID_HELM_OPTS := --set main.analyticsUUID=$(UUID) 34 | endif 35 | 36 | # Set the secret name *and* its namespace when deploying from private repositories 37 | # The format of said secret is documented here: https://argo-cd.readthedocs.io/en/stable/operator-manual/declarative-setup/#repositories 38 | TOKEN_SECRET ?= 39 | TOKEN_NAMESPACE ?= 40 | 41 | ifeq ($(TOKEN_SECRET),) 42 | HELM_OPTS=-f values-global.yaml --set main.git.repoURL="$(TARGET_REPO)" --set main.git.revision=$(TARGET_BRANCH) $(TARGET_SITE_OPT) $(UUID_HELM_OPTS) $(EXTRA_HELM_OPTS) 43 | else 44 | # When we are working with a private repository we do not escape the git URL as it might be using an ssh secret which does not use https:// 45 | TARGET_CLEAN_REPO=$(shell git ls-remote --get-url --symref $(TARGET_ORIGIN)) 46 | HELM_OPTS=-f values-global.yaml --set main.tokenSecret=$(TOKEN_SECRET) --set main.tokenSecretNamespace=$(TOKEN_NAMESPACE) --set main.git.repoURL="$(TARGET_CLEAN_REPO)" --set main.git.revision=$(TARGET_BRANCH) $(TARGET_SITE_OPT) $(UUID_HELM_OPTS) $(EXTRA_HELM_OPTS) 47 | endif 48 | 49 | # Helm does the right thing and fetches all the tags and detects the newest one 50 | PATTERN_INSTALL_CHART ?= oci://quay.io/hybridcloudpatterns/pattern-install 51 | 52 | ##@ Pattern Common Tasks 53 | 54 | .PHONY: help 55 | help: ## This help message 56 | @echo "Pattern: $(NAME)" 57 | @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^(\s|[a-zA-Z_0-9-])+:.*?##/ { printf " \033[36m%-35s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) 58 | 59 | # Makefiles in the individual patterns should call these targets explicitly 60 | # e.g. from industrial-edge: make -f common/Makefile show 61 | .PHONY: show 62 | show: ## show the starting template without installing it 63 | helm template $(PATTERN_INSTALL_CHART) --name-template $(NAME) $(HELM_OPTS) 64 | 65 | preview-all: ## (EXPERIMENTAL) Previews all applications on hub and managed clusters 66 | @echo "NOTE: This is just a tentative approximation of rendering all hub and managed clusters templates" 67 | @common/scripts/preview-all.sh $(TARGET_REPO) $(TARGET_BRANCH) 68 | 69 | preview-%: 70 | $(eval CLUSTERGROUP ?= $(shell yq ".main.clusterGroupName" values-global.yaml)) 71 | @common/scripts/preview.sh $(CLUSTERGROUP) $* $(TARGET_REPO) $(TARGET_BRANCH) 72 | 73 | .PHONY: operator-deploy 74 | operator-deploy operator-upgrade: validate-prereq validate-origin validate-cluster ## runs helm install 75 | @common/scripts/deploy-pattern.sh $(NAME) $(PATTERN_INSTALL_CHART) $(HELM_OPTS) 76 | 77 | .PHONY: uninstall 78 | uninstall: ## runs helm uninstall 79 | $(eval CSV := $(shell oc get subscriptions -n openshift-operators openshift-gitops-operator -ojsonpath={.status.currentCSV})) 80 | helm uninstall $(NAME) 81 | @oc delete csv -n openshift-operators $(CSV) 82 | 83 | .PHONY: load-secrets 84 | load-secrets: ## loads the secrets into the backend determined by values-global setting 85 | common/scripts/process-secrets.sh $(NAME) 86 | 87 | .PHONY: legacy-load-secrets 88 | legacy-load-secrets: ## loads the secrets into vault (only) 89 | common/scripts/vault-utils.sh push_secrets $(NAME) 90 | 91 | .PHONY: secrets-backend-vault 92 | secrets-backend-vault: ## Edits values files to use default Vault+ESO secrets config 93 | common/scripts/set-secret-backend.sh vault 94 | common/scripts/manage-secret-app.sh vault present 95 | common/scripts/manage-secret-app.sh golang-external-secrets present 96 | common/scripts/manage-secret-namespace.sh validated-patterns-secrets absent 97 | @git diff --exit-code || echo "Secrets backend set to vault, please review changes, commit, and push to activate in the pattern" 98 | 99 | .PHONY: secrets-backend-kubernetes 100 | secrets-backend-kubernetes: ## Edits values file to use Kubernetes+ESO secrets config 101 | common/scripts/set-secret-backend.sh kubernetes 102 | common/scripts/manage-secret-namespace.sh validated-patterns-secrets present 103 | common/scripts/manage-secret-app.sh vault absent 104 | common/scripts/manage-secret-app.sh golang-external-secrets present 105 | @git diff --exit-code || echo "Secrets backend set to kubernetes, please review changes, commit, and push to activate in the pattern" 106 | 107 | .PHONY: secrets-backend-none 108 | secrets-backend-none: ## Edits values files to remove secrets manager + ESO 109 | common/scripts/set-secret-backend.sh none 110 | common/scripts/manage-secret-app.sh vault absent 111 | common/scripts/manage-secret-app.sh golang-external-secrets absent 112 | common/scripts/manage-secret-namespace.sh validated-patterns-secrets absent 113 | @git diff --exit-code || echo "Secrets backend set to none, please review changes, commit, and push to activate in the pattern" 114 | 115 | .PHONY: load-iib 116 | load-iib: ## CI target to install Index Image Bundles 117 | @set -e; if [ x$(INDEX_IMAGES) != x ]; then \ 118 | ansible-playbook $(EXTRA_PLAYBOOK_OPTS) rhvp.cluster_utils.iib_ci; \ 119 | else \ 120 | echo "No INDEX_IMAGES defined. Bailing out"; \ 121 | exit 1; \ 122 | fi 123 | 124 | .PHONY: token-kubeconfig 125 | token-kubeconfig: ## Create a local ~/.kube/config with password (not usually needed) 126 | common/scripts/write-token-kubeconfig.sh 127 | 128 | ##@ Validation Tasks 129 | 130 | # We only check the remote ssh git branch's existance if we're not running inside a container 131 | # as getting ssh auth working inside a container seems a bit brittle 132 | # If the main repoUpstreamURL field is set, then we need to check against 133 | # that and not target_repo 134 | .PHONY: validate-origin 135 | validate-origin: ## verify the git origin is available 136 | @echo "Checking repository:" 137 | $(eval UPSTREAMURL := $(shell yq -r '.main.git.repoUpstreamURL // (.main.git.repoUpstreamURL = "")' values-global.yaml)) 138 | @if [ -z "$(UPSTREAMURL)" ]; then\ 139 | echo -n " $(TARGET_REPO) - branch '$(TARGET_BRANCH)': ";\ 140 | git ls-remote --exit-code --heads $(TARGET_REPO) $(TARGET_BRANCH) >/dev/null &&\ 141 | echo "OK" || (echo "NOT FOUND"; exit 1);\ 142 | else\ 143 | echo "Upstream URL set to: $(UPSTREAMURL)";\ 144 | echo -n " $(UPSTREAMURL) - branch '$(TARGET_BRANCH)': ";\ 145 | git ls-remote --exit-code --heads $(UPSTREAMURL) $(TARGET_BRANCH) >/dev/null &&\ 146 | echo "OK" || (echo "NOT FOUND"; exit 1);\ 147 | fi 148 | 149 | .PHONY: validate-cluster 150 | validate-cluster: ## Do some cluster validations before installing 151 | @echo "Checking cluster:" 152 | @echo -n " cluster-info: " 153 | @oc cluster-info >/dev/null && echo "OK" || (echo "Error"; exit 1) 154 | @echo -n " storageclass: " 155 | @if [ `oc get storageclass -o go-template='{{printf "%d\n" (len .items)}}'` -eq 0 ]; then\ 156 | echo "WARNING: No storageclass found";\ 157 | else\ 158 | echo "OK";\ 159 | fi 160 | 161 | 162 | .PHONY: validate-schema 163 | validate-schema: ## validates values files against schema in common/clustergroup 164 | $(eval VAL_PARAMS := $(shell for i in ./values-*.yaml; do echo -n "$${i} "; done)) 165 | @echo -n "Validating clustergroup schema of: " 166 | @set -e; for i in $(VAL_PARAMS); do echo -n " $$i"; helm template oci://quay.io/hybridcloudpatterns/clustergroup $(HELM_OPTS) -f "$${i}" >/dev/null; done 167 | @echo 168 | 169 | .PHONY: validate-prereq 170 | validate-prereq: ## verify pre-requisites 171 | $(eval GLOBAL_PATTERN := $(shell yq -r .global.pattern values-global.yaml)) 172 | @if [ $(NAME) != $(GLOBAL_PATTERN) ]; then\ 173 | echo "";\ 174 | echo "WARNING: folder directory is \"$(NAME)\" and global.pattern is set to \"$(GLOBAL_PATTERN)\"";\ 175 | echo "this can create problems. Please make sure they are the same!";\ 176 | echo "";\ 177 | fi 178 | @if [ ! -f /run/.containerenv ]; then\ 179 | echo "Checking prerequisites:";\ 180 | echo -n " Check for python-kubernetes: ";\ 181 | if ! ansible -m ansible.builtin.command -a "{{ ansible_python_interpreter }} -c 'import kubernetes'" localhost > /dev/null 2>&1; then echo "Not found"; exit 1; fi;\ 182 | echo "OK";\ 183 | echo -n " Check for kubernetes.core collection: ";\ 184 | if ! ansible-galaxy collection list | grep kubernetes.core > /dev/null 2>&1; then echo "Not found"; exit 1; fi;\ 185 | echo "OK";\ 186 | else\ 187 | if [ -f values-global.yaml ]; then\ 188 | OUT=`yq -r '.main.multiSourceConfig.enabled // (.main.multiSourceConfig.enabled = "false")' values-global.yaml`;\ 189 | if [ "$${OUT,,}" = "false" ]; then\ 190 | echo "You must set \".main.multiSourceConfig.enabled: true\" in your 'values-global.yaml' file";\ 191 | echo "because your common subfolder is the slimmed down version with no helm charts in it";\ 192 | exit 1;\ 193 | fi;\ 194 | fi;\ 195 | fi 196 | 197 | .PHONY: argo-healthcheck 198 | argo-healthcheck: ## Checks if all argo applications are synced 199 | @echo "Checking argo applications" 200 | $(eval APPS := $(shell oc get applications.argoproj.io -A -o jsonpath='{range .items[*]}{@.metadata.namespace}{","}{@.metadata.name}{"\n"}{end}')) 201 | @NOTOK=0; \ 202 | for i in $(APPS); do\ 203 | n=`echo "$${i}" | cut -f1 -d,`;\ 204 | a=`echo "$${i}" | cut -f2 -d,`;\ 205 | STATUS=`oc get -n "$${n}" applications.argoproj.io/"$${a}" -o jsonpath='{.status.sync.status}'`;\ 206 | if [[ $$STATUS != "Synced" ]]; then\ 207 | NOTOK=$$(( $${NOTOK} + 1));\ 208 | fi;\ 209 | HEALTH=`oc get -n "$${n}" applications.argoproj.io/"$${a}" -o jsonpath='{.status.health.status}'`;\ 210 | if [[ $$HEALTH != "Healthy" ]]; then\ 211 | NOTOK=$$(( $${NOTOK} + 1));\ 212 | fi;\ 213 | echo "$${n} $${a} -> Sync: $${STATUS} - Health: $${HEALTH}";\ 214 | done;\ 215 | if [ $${NOTOK} -gt 0 ]; then\ 216 | echo "Some applications are not synced or are unhealthy";\ 217 | exit 1;\ 218 | fi 219 | 220 | 221 | ##@ Test and Linters Tasks 222 | 223 | .PHONY: qe-tests 224 | qe-tests: ## Runs the tests that QE runs 225 | @set -e; if [ -f ./tests/interop/run_tests.sh ]; then \ 226 | pushd ./tests/interop; ./run_tests.sh; popd; \ 227 | else \ 228 | echo "No ./tests/interop/run_tests.sh found skipping"; \ 229 | fi 230 | 231 | .PHONY: super-linter 232 | super-linter: ## Runs super linter locally 233 | rm -rf .mypy_cache 234 | podman run -e RUN_LOCAL=true -e USE_FIND_ALGORITHM=true \ 235 | -e VALIDATE_ANSIBLE=false \ 236 | -e VALIDATE_BASH=false \ 237 | -e VALIDATE_CHECKOV=false \ 238 | -e VALIDATE_DOCKERFILE_HADOLINT=false \ 239 | -e VALIDATE_JSCPD=false \ 240 | -e VALIDATE_JSON_PRETTIER=false \ 241 | -e VALIDATE_MARKDOWN_PRETTIER=false \ 242 | -e VALIDATE_KUBERNETES_KUBECONFORM=false \ 243 | -e VALIDATE_PYTHON_PYLINT=false \ 244 | -e VALIDATE_SHELL_SHFMT=false \ 245 | -e VALIDATE_TEKTON=false \ 246 | -e VALIDATE_YAML=false \ 247 | -e VALIDATE_YAML_PRETTIER=false \ 248 | $(DISABLE_LINTERS) \ 249 | -v $(PWD):/tmp/lint:rw,z \ 250 | -w /tmp/lint \ 251 | ghcr.io/super-linter/super-linter:slim-v7 252 | 253 | .PHONY: deploy upgrade legacy-deploy legacy-upgrade 254 | deploy upgrade legacy-deploy legacy-upgrade: 255 | @echo "UNSUPPORTED TARGET: please switch to 'operator-deploy'"; exit 1 256 | -------------------------------------------------------------------------------- /common/README.md: -------------------------------------------------------------------------------- 1 | # Validated Patterns common/ repository 2 | 3 | [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) 4 | 5 | ## Note 6 | 7 | This is the `main` branch of common and it assumes that the pattern is fully 8 | multisource (meaning that any used charts from VP is actually referenced from 9 | either a helm chart repository or quay repository). I.e. there are no helm 10 | charts contained in this branch of common and there is no ansible code neither. 11 | 12 | The helm charts now live in separate repositories under the VP 13 | [organization](https://github.com/validatedpatterns) on GitHub. The repositories are: 14 | 15 | - clustergroup-chart 16 | - pattern-install-chart 17 | - hashicorp-vault-chart 18 | - golang-external-secrets-chart 19 | - acm-chart 20 | - letsencrypt-chart 21 | 22 | The ansible bits live in this [repository](https://github.com/validatedpatterns/rhvp.cluster_utils) 23 | 24 | In order to be able to use this "slimmed-down" main branch of common you *must* 25 | use a 0.9.* clustergroup-chart that. Add the following to your `values-global.yaml`: 26 | 27 | ```yaml 28 | main: 29 | multiSourceConfig: 30 | enabled: true 31 | clusterGroupChartVersion: 0.9.* 32 | ``` 33 | 34 | ## Start Here 35 | 36 | This repository is never used as standalone. It is usually imported in each pattern as a subtree. 37 | In order to import the common subtree the very first time you can use the script 38 | [make_common_subtree.sh](scripts/make-common-subtree.sh). 39 | 40 | In order to update your common subtree inside your pattern repository you can either use 41 | `https://github.com/validatedpatterns/utilities/blob/main/scripts/update-common-everywhere.sh` or 42 | do it manually with the following commands: 43 | 44 | ```sh 45 | git remote add -f common-upstream https://github.com/validatedpatterns/common.git 46 | git merge -s subtree -Xtheirs -Xsubtree=common common-upstream/main 47 | ``` 48 | 49 | ## Secrets 50 | 51 | There are two different secret formats parsed by the ansible bits. Both are documented [here](https://github.com/validatedpatterns/common/tree/main/ansible/roles/vault_utils/README.md) 52 | -------------------------------------------------------------------------------- /common/requirements.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Define Ansible collection requirements here 3 | collections: 4 | - name: git+https://github.com/validatedpatterns/rhvp.cluster_utils.git,v1 5 | -------------------------------------------------------------------------------- /common/scripts/deploy-pattern.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -o pipefail 3 | 4 | RUNS=10 5 | WAIT=15 6 | # Retry five times because the CRD might not be fully installed yet 7 | echo -n "Installing pattern: " 8 | for i in $(seq 1 ${RUNS}); do \ 9 | exec 3>&1 4>&2 10 | OUT=$( { helm template --include-crds --name-template $* 2>&4 | oc apply -f- 2>&4 1>&3; } 4>&1 3>&1) 11 | ret=$? 12 | exec 3>&- 4>&- 13 | if [ ${ret} -eq 0 ]; then 14 | break; 15 | else 16 | echo -n "." 17 | sleep "${WAIT}" 18 | fi 19 | done 20 | 21 | # All the runs failed 22 | if [ ${i} -eq ${RUNS} ]; then 23 | echo "Installation failed [${i}/${RUNS}]. Error:" 24 | echo "${OUT}" 25 | exit 1 26 | fi 27 | echo "Done" 28 | -------------------------------------------------------------------------------- /common/scripts/determine-main-clustergroup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | PATTERN_DIR="$1" 4 | 5 | if [ -z "$PATTERN_DIR" ]; then 6 | PATTERN_DIR="." 7 | fi 8 | 9 | CGNAME=$(yq '.main.clusterGroupName' "$PATTERN_DIR/values-global.yaml") 10 | 11 | if [ -z "$CGNAME" ] || [ "$CGNAME" == "null" ]; then 12 | echo "Error - cannot detrmine clusterGroupName" 13 | exit 1 14 | fi 15 | 16 | echo "$CGNAME" 17 | -------------------------------------------------------------------------------- /common/scripts/determine-pattern-name.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | PATTERN_DIR="$1" 4 | 5 | if [ -z "$PATTERN_DIR" ]; then 6 | PATTERN_DIR="." 7 | fi 8 | 9 | PATNAME=$(yq '.global.pattern' "$PATTERN_DIR/values-global.yaml" 2>/dev/null) 10 | 11 | if [ -z "$PATNAME" ] || [ "$PATNAME" == "null" ]; then 12 | PATNAME="$(basename "$PWD")" 13 | fi 14 | 15 | echo "$PATNAME" 16 | -------------------------------------------------------------------------------- /common/scripts/determine-secretstore-backend.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | PATTERN_DIR="$1" 4 | 5 | if [ -z "$PATTERN_DIR" ]; then 6 | PATTERN_DIR="." 7 | fi 8 | 9 | BACKEND=$(yq '.global.secretStore.backend' "$PATTERN_DIR/values-global.yaml" 2>/dev/null) 10 | 11 | if [ -z "$BACKEND" -o "$BACKEND" == "null" ]; then 12 | BACKEND="vault" 13 | fi 14 | 15 | echo "$BACKEND" 16 | -------------------------------------------------------------------------------- /common/scripts/display-secrets-info.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eu 3 | 4 | get_abs_filename() { 5 | # $1 : relative filename 6 | echo "$(cd "$(dirname "$1")" && pwd)/$(basename "$1")" 7 | } 8 | 9 | SCRIPT=$(get_abs_filename "$0") 10 | SCRIPTPATH=$(dirname "${SCRIPT}") 11 | COMMONPATH=$(dirname "${SCRIPTPATH}") 12 | PATTERNPATH=$(dirname "${COMMONPATH}") 13 | 14 | if [ "$#" -ge 1 ]; then 15 | export VALUES_SECRET=$(get_abs_filename "${1}") 16 | fi 17 | 18 | if [[ "$#" == 2 ]]; then 19 | SECRETS_BACKING_STORE="$2" 20 | else 21 | SECRETS_BACKING_STORE="$($SCRIPTPATH/determine-secretstore-backend.sh)" 22 | fi 23 | 24 | PATTERN_NAME=$(basename "`pwd`") 25 | 26 | EXTRA_PLAYBOOK_OPTS="${EXTRA_PLAYBOOK_OPTS:-}" 27 | 28 | ansible-playbook -e pattern_name="${PATTERN_NAME}" -e pattern_dir="${PATTERNPATH}" -e secrets_backing_store="${SECRETS_BACKING_STORE}" -e hide_sensitive_output=false ${EXTRA_PLAYBOOK_OPTS} "rhvp.cluster_utils.display_secrets_info" 29 | -------------------------------------------------------------------------------- /common/scripts/load-k8s-secrets.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eu 3 | 4 | get_abs_filename() { 5 | # $1 : relative filename 6 | echo "$(cd "$(dirname "$1")" && pwd)/$(basename "$1")" 7 | } 8 | 9 | SCRIPT=$(get_abs_filename "$0") 10 | SCRIPTPATH=$(dirname "${SCRIPT}") 11 | COMMONPATH=$(dirname "${SCRIPTPATH}") 12 | PATTERNPATH=$(dirname "${COMMONPATH}") 13 | 14 | PATTERN_NAME=${1:-$(basename "`pwd`")} 15 | 16 | EXTRA_PLAYBOOK_OPTS="${EXTRA_PLAYBOOK_OPTS:-}" 17 | 18 | ansible-playbook -e pattern_name="${PATTERN_NAME}" -e pattern_dir="${PATTERNPATH}" ${EXTRA_PLAYBOOK_OPTS} "rhvp.cluster_utils.k8s_secrets" 19 | -------------------------------------------------------------------------------- /common/scripts/make-common-subtree.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if [ "$1" = "-h" ]; then 4 | echo "This script will convert common into a subtree and add a remote to help manage it." 5 | echo "The script takes three positional arguments, as follows:" 6 | echo 7 | echo "$0 " 8 | echo 9 | echo "Run without arguments, the script would run as if these arguments had been passed:" 10 | echo "$0 https://github.com/validatedpatterns/common.git main common-upstream" 11 | echo 12 | echo "Please ensure the git subtree command is available. On RHEL/Fedora, the git subtree command" 13 | echo "is in a separate package called git-subtree" 14 | exit 1 15 | fi 16 | 17 | if [ -f '/etc/redhat-release' ]; then 18 | rpm -qa | grep git-subtree 2>&1 19 | if [ ! $? = 0 ]; then 20 | echo "you need to install git-subtree" 21 | echo "would you like to install it now?" 22 | select ANS in yes no 23 | do 24 | case $ANS in 25 | yes) 26 | sudo dnf install git-subtree -y 27 | break 28 | ;; 29 | no) 30 | exit 31 | break 32 | ;; 33 | *) 34 | echo "You must enter yes or no" 35 | ;; 36 | esac 37 | done 38 | fi 39 | fi 40 | 41 | if [ "$1" ]; then 42 | subtree_repo=$1 43 | else 44 | subtree_repo=https://github.com/validatedpatterns/common.git 45 | fi 46 | 47 | if [ "$2" ]; then 48 | subtree_branch=$2 49 | else 50 | subtree_branch=main 51 | fi 52 | 53 | if [ "$3" ]; then 54 | subtree_remote=$3 55 | else 56 | subtree_remote=common-upstream 57 | fi 58 | 59 | git diff --quiet || (echo "This script must be run on a clean working tree" && exit 1) 60 | 61 | echo "Changing directory to project root" 62 | cd `git rev-parse --show-toplevel` 63 | 64 | echo "Removing existing common and replacing it with subtree from $subtree_repo $subtree_remote" 65 | rm -rf common 66 | 67 | echo "Committing removal of common" 68 | (git add -A :/ && git commit -m "Removed previous version of common to convert to subtree from $subtree_repo $subtree_branch") || exit 1 69 | 70 | echo "Adding (possibly replacing) subtree remote $subtree_remote" 71 | git remote rm "$subtree_remote" 72 | git remote add -f "$subtree_remote" "$subtree_repo" || exit 1 73 | git subtree add --prefix=common "$subtree_remote" "$subtree_branch" || exit 1 74 | 75 | echo "Complete. You may now push these results if you are satisfied" 76 | exit 0 77 | -------------------------------------------------------------------------------- /common/scripts/manage-secret-app.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | APP=$1 4 | STATE=$2 5 | 6 | MAIN_CLUSTERGROUP_FILE="./values-$(common/scripts/determine-main-clustergroup.sh).yaml" 7 | MAIN_CLUSTERGROUP_PROJECT="$(common/scripts/determine-main-clustergroup.sh)" 8 | 9 | case "$APP" in 10 | "vault") 11 | APP_NAME="vault" 12 | NAMESPACE="vault" 13 | PROJECT="$MAIN_CLUSTERGROUP_PROJECT" 14 | CHART_NAME="hashicorp-vault" 15 | CHART_VERSION=0.1.* 16 | 17 | ;; 18 | "golang-external-secrets") 19 | APP_NAME="golang-external-secrets" 20 | NAMESPACE="golang-external-secrets" 21 | PROJECT="$MAIN_CLUSTERGROUP_PROJECT" 22 | CHART_NAME="golang-external-secrets" 23 | CHART_VERSION=0.1.* 24 | 25 | ;; 26 | *) 27 | echo "Error - cannot manage $APP can only manage vault and golang-external-secrets" 28 | exit 1 29 | ;; 30 | esac 31 | 32 | case "$STATE" in 33 | "present") 34 | common/scripts/manage-secret-namespace.sh "$NAMESPACE" "$STATE" 35 | 36 | RES=$(yq ".clusterGroup.applications[] | select(.path == \"$CHART_LOCATION\")" "$MAIN_CLUSTERGROUP_FILE" 2>/dev/null) 37 | if [ -z "$RES" ]; then 38 | echo "Application with chart location $CHART_LOCATION not found, adding" 39 | yq -i ".clusterGroup.applications.$APP_NAME = { \"name\": \"$APP_NAME\", \"namespace\": \"$NAMESPACE\", \"project\": \"$PROJECT\", \"chart\": \"$CHART_NAME\", \"chartVersion\": \"$CHART_VERSION\"}" "$MAIN_CLUSTERGROUP_FILE" 40 | fi 41 | ;; 42 | "absent") 43 | common/scripts/manage-secret-namespace.sh "$NAMESPACE" "$STATE" 44 | echo "Removing application wth chart location $CHART_LOCATION" 45 | yq -i "del(.clusterGroup.applications[] | select(.chart == \"$CHART_NAME\"))" "$MAIN_CLUSTERGROUP_FILE" 46 | ;; 47 | *) 48 | echo "$STATE not supported" 49 | exit 1 50 | ;; 51 | esac 52 | 53 | exit 0 54 | -------------------------------------------------------------------------------- /common/scripts/manage-secret-namespace.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | NAMESPACE=$1 4 | STATE=$2 5 | 6 | MAIN_CLUSTERGROUP_FILE="./values-$(common/scripts/determine-main-clustergroup.sh).yaml" 7 | MAIN_CLUSTERGROUP_PROJECT="$(common/scripts/determine-main-clustergroup.sh)" 8 | 9 | case "$STATE" in 10 | "present") 11 | 12 | RES=$(yq ".clusterGroup.namespaces[] | select(. == \"$NAMESPACE\")" "$MAIN_CLUSTERGROUP_FILE" 2>/dev/null) 13 | if [ -z "$RES" ]; then 14 | echo "Namespace $NAMESPACE not found, adding" 15 | yq -i ".clusterGroup.namespaces += [ \"$NAMESPACE\" ]" "$MAIN_CLUSTERGROUP_FILE" 16 | fi 17 | ;; 18 | "absent") 19 | echo "Removing namespace $NAMESPACE" 20 | yq -i "del(.clusterGroup.namespaces[] | select(. == \"$NAMESPACE\"))" "$MAIN_CLUSTERGROUP_FILE" 21 | ;; 22 | *) 23 | echo "$STATE not supported" 24 | exit 1 25 | ;; 26 | esac 27 | 28 | exit 0 29 | -------------------------------------------------------------------------------- /common/scripts/pattern-util.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | function is_available { 4 | command -v $1 >/dev/null 2>&1 || { echo >&2 "$1 is required but it's not installed. Aborting."; exit 1; } 5 | } 6 | 7 | function version { 8 | echo "$@" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }' 9 | } 10 | 11 | if [ -z "$PATTERN_UTILITY_CONTAINER" ]; then 12 | PATTERN_UTILITY_CONTAINER="quay.io/hybridcloudpatterns/utility-container" 13 | fi 14 | # If PATTERN_DISCONNECTED_HOME is set it will be used to populate both PATTERN_UTILITY_CONTAINER 15 | # and PATTERN_INSTALL_CHART automatically 16 | if [ -n "${PATTERN_DISCONNECTED_HOME}" ]; then 17 | PATTERN_UTILITY_CONTAINER="${PATTERN_DISCONNECTED_HOME}/utility-container" 18 | PATTERN_INSTALL_CHART="oci://${PATTERN_DISCONNECTED_HOME}/pattern-install" 19 | echo "PATTERN_DISCONNECTED_HOME is set to ${PATTERN_DISCONNECTED_HOME}" 20 | echo "Setting the following variables:" 21 | echo " PATTERN_UTILITY_CONTAINER: ${PATTERN_UTILITY_CONTAINER}" 22 | echo " PATTERN_INSTALL_CHART: ${PATTERN_INSTALL_CHART}" 23 | fi 24 | 25 | readonly commands=(podman) 26 | for cmd in ${commands[@]}; do is_available "$cmd"; done 27 | 28 | UNSUPPORTED_PODMAN_VERSIONS="1.6 1.5" 29 | PODMAN_VERSION_STR=$(podman --version) 30 | for i in ${UNSUPPORTED_PODMAN_VERSIONS}; do 31 | # We add a space 32 | if echo "${PODMAN_VERSION_STR}" | grep -q -E "\b${i}"; then 33 | echo "Unsupported podman version. We recommend > 4.3.0" 34 | podman --version 35 | exit 1 36 | fi 37 | done 38 | 39 | # podman --version outputs: 40 | # podman version 4.8.2 41 | PODMAN_VERSION=$(echo "${PODMAN_VERSION_STR}" | awk '{ print $NF }') 42 | 43 | # podman < 4.3.0 do not support keep-id:uid=... 44 | if [ $(version "${PODMAN_VERSION}") -lt $(version "4.3.0") ]; then 45 | PODMAN_ARGS="-v ${HOME}:/root" 46 | else 47 | # We do not rely on bash's $UID and $GID because on MacOSX $GID is not set 48 | MYNAME=$(id -n -u) 49 | MYUID=$(id -u) 50 | MYGID=$(id -g) 51 | PODMAN_ARGS="--passwd-entry ${MYNAME}:x:${MYUID}:${MYGID}::/pattern-home:/bin/bash --user ${MYUID}:${MYGID} --userns keep-id:uid=${MYUID},gid=${MYGID}" 52 | 53 | fi 54 | 55 | if [ -n "$KUBECONFIG" ]; then 56 | if [[ ! "${KUBECONFIG}" =~ ^$HOME* ]]; then 57 | echo "${KUBECONFIG} is pointing outside of the HOME folder, this will make it unavailable from the container." 58 | echo "Please move it somewhere inside your $HOME folder, as that is what gets bind-mounted inside the container" 59 | exit 1 60 | fi 61 | fi 62 | 63 | # Detect if we use podman machine. If we do not then we bind mount local host ssl folders 64 | # if we are using podman machine then we do not bind mount anything (for now!) 65 | REMOTE_PODMAN=$(podman system connection list -q | wc -l) 66 | if [ $REMOTE_PODMAN -eq 0 ]; then # If we are not using podman machine we check the hosts folders 67 | # We check /etc/pki/tls because on ubuntu /etc/pki/fwupd sometimes 68 | # exists but not /etc/pki/tls and we do not want to bind mount in such a case 69 | # as it would find no certificates at all. 70 | if [ -d /etc/pki/tls ]; then 71 | PKI_HOST_MOUNT_ARGS="-v /etc/pki:/etc/pki:ro" 72 | elif [ -d /etc/ssl ]; then 73 | PKI_HOST_MOUNT_ARGS="-v /etc/ssl:/etc/ssl:ro" 74 | else 75 | PKI_HOST_MOUNT_ARGS="-v /usr/share/ca-certificates:/usr/share/ca-certificates:ro" 76 | fi 77 | else 78 | PKI_HOST_MOUNT_ARGS="" 79 | fi 80 | 81 | # Copy Kubeconfig from current environment. The utilities will pick up ~/.kube/config if set so it's not mandatory 82 | # $HOME is mounted as itself for any files that are referenced with absolute paths 83 | # $HOME is mounted to /root because the UID in the container is 0 and that's where SSH looks for credentials 84 | 85 | podman run -it --rm --pull=newer \ 86 | --security-opt label=disable \ 87 | -e EXTRA_HELM_OPTS \ 88 | -e EXTRA_PLAYBOOK_OPTS \ 89 | -e TARGET_ORIGIN \ 90 | -e TARGET_SITE \ 91 | -e TARGET_BRANCH \ 92 | -e NAME \ 93 | -e TOKEN_SECRET \ 94 | -e TOKEN_NAMESPACE \ 95 | -e VALUES_SECRET \ 96 | -e KUBECONFIG \ 97 | -e PATTERN_INSTALL_CHART \ 98 | -e PATTERN_DISCONNECTED_HOME \ 99 | -e K8S_AUTH_HOST \ 100 | -e K8S_AUTH_VERIFY_SSL \ 101 | -e K8S_AUTH_SSL_CA_CERT \ 102 | -e K8S_AUTH_USERNAME \ 103 | -e K8S_AUTH_PASSWORD \ 104 | -e K8S_AUTH_TOKEN \ 105 | ${PKI_HOST_MOUNT_ARGS} \ 106 | -v "${HOME}":"${HOME}" \ 107 | -v "${HOME}":/pattern-home \ 108 | ${PODMAN_ARGS} \ 109 | ${EXTRA_ARGS} \ 110 | -w "$(pwd)" \ 111 | "$PATTERN_UTILITY_CONTAINER" \ 112 | $@ 113 | -------------------------------------------------------------------------------- /common/scripts/preview-all.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | REPO=$1; shift; 4 | TARGET_BRANCH=$1; shift 5 | 6 | HUB=$( yq ".main.clusterGroupName" values-global.yaml ) 7 | MANAGED_CLUSTERS=$( yq ".clusterGroup.managedClusterGroups.[].name" values-$HUB.yaml ) 8 | ALL_CLUSTERS=( $HUB $MANAGED_CLUSTERS ) 9 | 10 | CLUSTER_INFO_OUT=$(oc cluster-info 2>&1) 11 | CLUSTER_INFO_RET=$? 12 | if [ $CLUSTER_INFO_RET -ne 0 ]; then 13 | echo "Could not access the cluster:" 14 | echo "${CLUSTER_INFO_OUT}" 15 | exit 1 16 | fi 17 | 18 | for cluster in ${ALL_CLUSTERS[@]}; do 19 | # We always add clustergroup as it is the entry point and it gets special cased in preview.sh. 20 | APPS="clustergroup $( yq ".clusterGroup.applications.[].name" values-$cluster.yaml )" 21 | for app in $APPS; do 22 | printf "# Parsing application $app from cluster $cluster\n" 23 | common/scripts/preview.sh $cluster $app $REPO $TARGET_BRANCH 24 | done 25 | done 26 | -------------------------------------------------------------------------------- /common/scripts/preview.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # DISCLAIMER 4 | # 5 | # - Parsing of applications needs to be more clever. 6 | # - There is currently not a mechanism to actually preview against multiple clusters 7 | # (i.e. a hub and a remote). All previews will be done against the current. 8 | # - Make output can be included in the YAML. 9 | 10 | SITE=$1; shift 11 | APPNAME=$1; shift 12 | GIT_REPO=$1; shift 13 | GIT_BRANCH=$1; shift 14 | 15 | if [ "${APPNAME}" != "clustergroup" ]; then 16 | # This covers the following case: 17 | # foobar: 18 | # name: foo 19 | # namespace: foo 20 | # project: foo 21 | # path: charts/all/foo 22 | # So we retrieve the actual index ("foobar") given the name attribute of the application 23 | APP=$(yq ".clusterGroup.applications | with_entries(select(.value.name == \"$APPNAME\")) | keys | .[0]" values-$SITE.yaml) 24 | isLocalHelmChart=$(yq ".clusterGroup.applications.$APP.path" values-$SITE.yaml) 25 | if [ $isLocalHelmChart != "null" ]; then 26 | chart=$(yq ".clusterGroup.applications.$APP.path" values-$SITE.yaml) 27 | else 28 | helmrepo=$(yq ".clusterGroup.applications.$APP.repoURL" values-$SITE.yaml) 29 | helmrepo="${helmrepo:+oci://quay.io/hybridcloudpatterns}" 30 | chartversion=$(yq ".clusterGroup.applications.$APP.chartVersion" values-$SITE.yaml) 31 | chartname=$(yq ".clusterGroup.applications.$APP.chart" values-$SITE.yaml) 32 | chart="${helmrepo}/${chartname} --version ${chartversion}" 33 | fi 34 | namespace=$(yq ".clusterGroup.applications.$APP.namespace" values-$SITE.yaml) 35 | else 36 | APP=$APPNAME 37 | clusterGroupChartVersion=$(yq ".main.multiSourceConfig.clusterGroupChartVersion" values-global.yaml) 38 | helmrepo="oci://quay.io/hybridcloudpatterns" 39 | chart="${helmrepo}/clustergroup --version ${clusterGroupChartVersion}" 40 | namespace="openshift-operators" 41 | fi 42 | pattern=$(yq ".global.pattern" values-global.yaml) 43 | 44 | # You can override the default lookups by using OCP_{PLATFORM,VERSION,DOMAIN} 45 | # Note that when using the utility container you need to pass in the above variables 46 | # by export EXTRA_ARGS="-e OCP_PLATFORM -e OCP_VERSION -e OCP_DOMAIN" before 47 | # invoking pattern-util.sh 48 | platform=${OCP_PLATFORM:-$(oc get Infrastructure.config.openshift.io/cluster -o jsonpath='{.spec.platformSpec.type}')} 49 | ocpversion=${OCP_VERSION:-$(oc get clusterversion/version -o jsonpath='{.status.desired.version}' | awk -F. '{print $1"."$2}')} 50 | domain=${OCP_DOMAIN:-$(oc get Ingress.config.openshift.io/cluster -o jsonpath='{.spec.domain}' | sed 's/^apps.//')} 51 | 52 | function replaceGlobals() { 53 | output=$( echo $1 | sed -e 's/ //g' -e 's/\$//g' -e s@^-@@g -e s@\'@@g ) 54 | 55 | output=$(echo $output | sed "s@{{.Values.global.clusterPlatform}}@${platform}@g") 56 | output=$(echo $output | sed "s@{{.Values.global.clusterVersion}}@${ocpversion}@g") 57 | output=$(echo $output | sed "s@{{.Values.global.clusterDomain}}@${domain}@g") 58 | 59 | echo $output 60 | } 61 | 62 | function getOverrides() { 63 | overrides='' 64 | overrides=$( yq ".clusterGroup.applications.$APP.overrides[]" "values-$SITE.yaml" ) 65 | overrides=$( echo "$overrides" | tr -d '\n' ) 66 | overrides=$( echo "$overrides" | sed -e 's/name:/ --set/g; s/value: /=/g' ) 67 | if [ -n "$overrides" ]; then 68 | echo "$overrides" 69 | fi 70 | } 71 | 72 | 73 | CLUSTER_OPTS="" 74 | CLUSTER_OPTS="$CLUSTER_OPTS --set global.pattern=$pattern" 75 | CLUSTER_OPTS="$CLUSTER_OPTS --set global.repoURL=$GIT_REPO" 76 | CLUSTER_OPTS="$CLUSTER_OPTS --set main.git.repoURL=$GIT_REPO" 77 | CLUSTER_OPTS="$CLUSTER_OPTS --set main.git.revision=$GIT_BRANCH" 78 | CLUSTER_OPTS="$CLUSTER_OPTS --set global.namespace=$namespace" 79 | CLUSTER_OPTS="$CLUSTER_OPTS --set global.hubClusterDomain=apps.$domain" 80 | CLUSTER_OPTS="$CLUSTER_OPTS --set global.localClusterDomain=apps.$domain" 81 | CLUSTER_OPTS="$CLUSTER_OPTS --set global.clusterDomain=$domain" 82 | CLUSTER_OPTS="$CLUSTER_OPTS --set global.clusterVersion=$ocpversion" 83 | CLUSTER_OPTS="$CLUSTER_OPTS --set global.clusterPlatform=$platform" 84 | 85 | 86 | sharedValueFiles=$(yq ".clusterGroup.sharedValueFiles" values-$SITE.yaml) 87 | appValueFiles=$(yq ".clusterGroup.applications.$APP.extraValueFiles" values-$SITE.yaml) 88 | isKustomize=$(yq ".clusterGroup.applications.$APP.kustomize" values-$SITE.yaml) 89 | OVERRIDES=$( getOverrides ) 90 | 91 | VALUE_FILES="-f values-global.yaml -f values-$SITE.yaml" 92 | IFS=$'\n' 93 | for line in $sharedValueFiles; do 94 | if [ $line != "null" ] && [ -f $line ]; then 95 | file=$(replaceGlobals $line) 96 | VALUE_FILES="$VALUE_FILES -f $PWD$file" 97 | fi 98 | done 99 | 100 | for line in $appValueFiles; do 101 | if [ $line != "null" ] && [ -f $line ]; then 102 | file=$(replaceGlobals $line) 103 | VALUE_FILES="$VALUE_FILES -f $PWD$file" 104 | fi 105 | done 106 | 107 | if [ $isKustomize == "true" ]; then 108 | kustomizePath=$(yq ".clusterGroup.applications.$APP.path" values-$SITE.yaml) 109 | repoURL=$(yq ".clusterGroup.applications.$APP.repoURL" values-$SITE.yaml) 110 | if [[ $repoURL == http* ]] || [[ $repoURL == git@ ]]; then 111 | kustomizePath="${repoURL}/${kustomizePath}" 112 | fi 113 | cmd="oc kustomize ${kustomizePath}" 114 | eval "$cmd" 115 | else 116 | cmd="helm template $chart --name-template ${APP} -n ${namespace} ${VALUE_FILES} ${OVERRIDES} ${CLUSTER_OPTS}" 117 | eval "$cmd" 118 | fi 119 | -------------------------------------------------------------------------------- /common/scripts/process-secrets.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eu 3 | 4 | get_abs_filename() { 5 | # $1 : relative filename 6 | echo "$(cd "$(dirname "$1")" && pwd)/$(basename "$1")" 7 | } 8 | 9 | SCRIPT=$(get_abs_filename "$0") 10 | SCRIPTPATH=$(dirname "${SCRIPT}") 11 | COMMONPATH=$(dirname "${SCRIPTPATH}") 12 | PATTERNPATH=$(dirname "${COMMONPATH}") 13 | 14 | PATTERN_NAME=${1:-$(basename "`pwd`")} 15 | SECRETS_BACKING_STORE="$($SCRIPTPATH/determine-secretstore-backend.sh)" 16 | 17 | EXTRA_PLAYBOOK_OPTS="${EXTRA_PLAYBOOK_OPTS:-}" 18 | 19 | ansible-playbook -e pattern_name="${PATTERN_NAME}" -e pattern_dir="${PATTERNPATH}" -e secrets_backing_store="${SECRETS_BACKING_STORE}" ${EXTRA_PLAYBOOK_OPTS} "rhvp.cluster_utils.process_secrets" 20 | -------------------------------------------------------------------------------- /common/scripts/set-secret-backend.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | BACKEND=$1 4 | 5 | yq -i ".global.secretStore.backend = \"$BACKEND\"" values-global.yaml 6 | -------------------------------------------------------------------------------- /common/scripts/vault-utils.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eu 3 | 4 | get_abs_filename() { 5 | # $1 : relative filename 6 | echo "$(cd "$(dirname "$1")" && pwd)/$(basename "$1")" 7 | } 8 | 9 | SCRIPT=$(get_abs_filename "$0") 10 | SCRIPTPATH=$(dirname "${SCRIPT}") 11 | COMMONPATH=$(dirname "${SCRIPTPATH}") 12 | PATTERNPATH=$(dirname "${COMMONPATH}") 13 | 14 | # Parse arguments 15 | if [ $# -lt 1 ]; then 16 | echo "Specify at least the command ($#): $*" 17 | exit 1 18 | fi 19 | 20 | TASK="${1}" 21 | PATTERN_NAME=${2:-$(basename "`pwd`")} 22 | 23 | if [ -z ${TASK} ]; then 24 | echo "Task is unset" 25 | exit 1 26 | fi 27 | 28 | EXTRA_PLAYBOOK_OPTS="${EXTRA_PLAYBOOK_OPTS:-}" 29 | 30 | ansible-playbook -t "${TASK}" -e pattern_name="${PATTERN_NAME}" -e pattern_dir="${PATTERNPATH}" ${EXTRA_PLAYBOOK_OPTS} "rhvp.cluster_utils.vault" 31 | -------------------------------------------------------------------------------- /common/scripts/write-token-kubeconfig.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eu 3 | 4 | OUTPUTFILE=${1:-"~/.kube/config"} 5 | 6 | get_abs_filename() { 7 | # $1 : relative filename 8 | echo "$(cd "$(dirname "$1")" && pwd)/$(basename "$1")" 9 | } 10 | 11 | SCRIPT=$(get_abs_filename "$0") 12 | SCRIPTPATH=$(dirname "${SCRIPT}") 13 | COMMONPATH=$(dirname "${SCRIPTPATH}") 14 | PATTERNPATH=$(dirname "${COMMONPATH}") 15 | 16 | EXTRA_PLAYBOOK_OPTS="${EXTRA_PLAYBOOK_OPTS:-}" 17 | 18 | ansible-playbook -e pattern_dir="${PATTERNPATH}" -e kubeconfig_file="${OUTPUTFILE}" ${EXTRA_PLAYBOOK_OPTS} "rhvp.cluster_utils.write-token-kubeconfig" 19 | -------------------------------------------------------------------------------- /overrides/values-aap-config-aeg.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | agof: 3 | iac_repo: https://github.com/validatedpatterns-demos/ansible-edge-gitops-hmi-config-as-code.git 4 | iac_revision: main 5 | -------------------------------------------------------------------------------- /overrides/values-egv-vms.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | vms: 3 | kiosk: 4 | count: 2 5 | role: kiosk 6 | -------------------------------------------------------------------------------- /overrides/values-odf-chart.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | storageSystem: 3 | deploy: true 4 | inventory: 5 | useSpecificNodes: false 6 | 7 | objectStorage: 8 | enable: false 9 | -------------------------------------------------------------------------------- /pattern-metadata.yaml: -------------------------------------------------------------------------------- 1 | # This goal of this metadata is mainly used as a source of truth for 2 | # documentation and qe 3 | metadata_version: "1.0" 4 | name: ansible-edge-gitops 5 | pattern_version: "1.0" 6 | display_name: Ansible Edge GitOps 7 | repo_url: https://github.com/validatedpatterns/ansible-edge-gitops 8 | docs_repo_url: https://github.com/validatedpatterns/docs 9 | issues_url: https://github.com/validatedpatterns/ansible-edge-gitops/issues 10 | docs_url: https://validatedpatterns.io/patterns/ansible-edge-gitops/ 11 | ci_url: https://validatedpatterns.io/ci/?pattern=aegitops 12 | # can be sandbox, tested or maintained 13 | tier: maintained 14 | owners: mhjacks 15 | requirements: 16 | hub: # Main cluster 17 | compute: 18 | platform: 19 | aws: 20 | replicas: 3 21 | type: m5.4xlarge 22 | controlPlane: 23 | platform: 24 | aws: 25 | replicas: 3 26 | type: m5.4xlarge 27 | 28 | # Loosely defined extra features like hypershift support, non-openshift 29 | # kubernetes support, spoke support 30 | extra_features: 31 | hypershift_support: false 32 | spoke_support: false 33 | 34 | external_requirements: 35 | # external quay, s3 bucket, agof tokens to access paywalled material, manifests, rag-llm hw (only selected regions) 36 | -------------------------------------------------------------------------------- /pattern.sh: -------------------------------------------------------------------------------- 1 | ./common/scripts/pattern-util.sh -------------------------------------------------------------------------------- /scripts/ansible_get_credentials.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ansible-playbook 2 | --- 3 | - name: Retrieve AAP credentials 4 | ansible.builtin.import_playbook: ../ansible/ansible_get_credentials.yml 5 | -------------------------------------------------------------------------------- /scripts/ansible_load_controller.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ansible-playbook 2 | --- 3 | - name: Retrieve AAP credentials 4 | ansible.builtin.import_playbook: ../ansible/ansible_get_credentials.yml 5 | 6 | - name: Parse secrets from local values_secret.yaml file 7 | ansible.builtin.import_playbook: ../ansible/parse_secrets_from_values_secret.yml 8 | 9 | - name: Configure AAP instance 10 | ansible.builtin.import_playbook: ../ansible/ansible_configure_controller.yml 11 | -------------------------------------------------------------------------------- /scripts/check_kubevirt_worker.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ansible-playbook 2 | --- 3 | - name: Check on Kubevirt Worker Status 4 | ansible.builtin.import_playbook: ../ansible/check_kubevirt_worker.yml 5 | -------------------------------------------------------------------------------- /scripts/deploy_kubevirt_worker.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ansible-playbook 2 | --- 3 | - name: Deploy KubeVirt Worker 4 | ansible.builtin.import_playbook: ../ansible/deploy_kubevirt_worker.yml 5 | -------------------------------------------------------------------------------- /scripts/get_image_urls.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ansible-playbook 2 | --- 3 | - name: "Retrieve RHEL image(s)" 4 | become: false 5 | connection: local 6 | hosts: localhost 7 | gather_facts: false 8 | vars: 9 | kubeconfig: "{{ lookup('env', 'KUBECONFIG') }}" 10 | refresh_token_file: "{{ lookup('env', 'REFRESH_TOKEN_FILE') }}" 11 | refresh_token_contents: "{{ lookup('file', refresh_token_file) }}" 12 | redhat_sso_url: 'https://sso.redhat.com/auth/realms/redhat-external/protocol/openid-connect/token' 13 | redhat_api_url: https://api.access.redhat.com/management/v1 14 | image_checksums: 15 | # rhel-8.5-x86_64-kvm.qcow2 16 | #- "9b63267716fa557f76df4899fb6a591c4c8a6ae2828f6297458815bff55ce8cc" 17 | # rhel-8.5-x86_64-boot.iso 18 | #- "61fe463758f6ee9b21c4d6698671980829ca4f747a066d556fa0e5eefc45382c" 19 | # rhel-8.6-x86_64-kvm.qcow2 20 | - "c9b32bef88d605d754b932aad0140e1955ab9446818c70c4c36ca75d6f442fe9" 21 | # rhel-8.6-x86_64-boot.iso 22 | - "4a3ffcec86ba40c89fc2608c8e3bb00b71d572da219f30904536cdce80b58e76" 23 | initial_download_path: /tmp 24 | tasks: 25 | - name: "Debug vars" 26 | ansible.builtin.debug: 27 | msg: '{{ refresh_token_file }} {{ refresh_token_contents }}' 28 | 29 | - name: Generate Access Token 30 | ansible.builtin.uri: 31 | body: 32 | client_id: rhsm-api 33 | grant_type: refresh_token 34 | refresh_token: "{{ refresh_token_contents }}" 35 | body_format: form-urlencoded 36 | method: POST 37 | url: "{{ redhat_sso_url }}" 38 | register: access_token 39 | 40 | - name: Generate Image Download URLs 41 | ansible.builtin.uri: 42 | follow_redirects: none 43 | headers: 44 | Authorization: "Bearer {{ access_token.json.access_token }}" 45 | status_code: 307 46 | url: "{{ redhat_api_url }}/images/{{ item }}/download" 47 | register: image_urls 48 | loop: "{{ image_checksums }}" 49 | 50 | - name: Download Red Hat Images 51 | ansible.builtin.get_url: 52 | checksum: "sha256:{{ item.item }}" 53 | dest: "{{ initial_download_path }}/{{ item.json.body.filename }}" 54 | url: "{{ item.json.body.href }}" 55 | loop: "{{ image_urls.results }}" 56 | 57 | - name: Get route for upload proxy 58 | kubernetes.core.k8s_info: 59 | kind: Route 60 | namespace: openshift-cnv 61 | name: cdi-uploadproxy 62 | register: uploadproxy_route 63 | 64 | - name: "Set host variable" 65 | ansible.builtin.set_fact: 66 | uploadproxy_url: 'https://{{ uploadproxy_route.resources[0].spec.host }}' 67 | 68 | - name: "debug host variable" 69 | ansible.builtin.debug: 70 | msg: '{{ uploadproxy_url }}' 71 | 72 | - name: Upload images to CDI proxy 73 | community.kubevirt.kubevirt_cdi_upload: 74 | pvc_namespace: default 75 | pvc_name: 'pvc-{{ item.json.body.filename }}' 76 | upload_host_validate_certs: false 77 | upload_host: '{{ uploadproxy_url }}' 78 | dest: "{{ initial_download_path }}/{{ item.json.body.filename }}" 79 | loop: "{{ image_urls.results }}" 80 | -------------------------------------------------------------------------------- /scripts/update-tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -x 2 | 3 | TEST_OPTS=$(echo -f common/examples/values-secret.yaml -f values-global.yaml --set global.repoURL="https://github.com/pattern-clone/mypattern" \ 4 | --set main.git.repoURL="https://github.com/pattern-clone/mypattern" --set main.git.revision=main --set global.pattern="mypattern" \ 5 | --set global.namespace="pattern-namespace" --set global.hubClusterDomain=hub.example.com --set global.localClusterDomain=region.example.com \ 6 | --set "clusterGroup.imperative.jobs[0].name"="test" --set "clusterGroup.imperative.jobs[0].playbook"="ansible/test.yml" \ 7 | --set clusterGroup.insecureUnsealVaultInsideCluster=true) 8 | 9 | echo $TEST_OPTS 10 | 11 | rm tests/* 12 | 13 | for i in $(find . -type f -iname 'Chart.yaml' -not -path "./common/*" -exec dirname "{}" \; | sed -e 's/.\///'); do \ 14 | s=$(echo $i | sed -e s@/@-@g -e s@charts-@@); echo $s; helm template $i --name-template $s > tests/$s-naked.expected.yaml; done 15 | 16 | for i in $(find . -type f -iname 'Chart.yaml' -not -path "./common/*" -exec dirname "{}" \; | sed -e 's/.\///'); do \ 17 | s=$(echo $i | sed -e s@/@-@g -e s@charts-@@); echo $s; helm template $i --name-template $s $TEST_OPTS > tests/$s-normal.expected.yaml; done 18 | -------------------------------------------------------------------------------- /tests/interop/README.md: -------------------------------------------------------------------------------- 1 | # Running tests 2 | 3 | ## Prerequisites 4 | 5 | * Openshift cluster with ansible-edge-gitops pattern installed 6 | * kubeconfig file for Openshift cluster 7 | * oc client installed at ~/oc_client/oc 8 | 9 | ## Steps 10 | 11 | * create python3 venv, clone ansible-edge-gitops repository 12 | * export KUBECONFIG=\ 13 | * export INFRA_PROVIDER=\ 14 | * (optional) export WORKSPACE=\ (defaults to /tmp) 15 | * cd ansible-edge-gitops/tests/interop 16 | * pip install -r requirements.txt 17 | * ./run_tests.sh 18 | 19 | ## Results 20 | 21 | * results .xml files will be placed at $WORKSPACE 22 | * test logs will be placed at $WORKSPACE/.results/test_execution_logs/ 23 | * CI badge file will be placed at $WORKSPACE 24 | -------------------------------------------------------------------------------- /tests/interop/__init__.py: -------------------------------------------------------------------------------- 1 | __version__ = "0.1.0" 2 | __loggername__ = "css_logger" 3 | -------------------------------------------------------------------------------- /tests/interop/conftest.py: -------------------------------------------------------------------------------- 1 | from validatedpatterns_tests.interop.conftest_logger import * # noqa: F401, F403 2 | from validatedpatterns_tests.interop.conftest_openshift import * # noqa: F401, F403 3 | -------------------------------------------------------------------------------- /tests/interop/create_ci_badge.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import subprocess 4 | from datetime import datetime 5 | 6 | from junitparser import JUnitXml 7 | 8 | oc = os.environ["HOME"] + "/oc_client/oc" 9 | 10 | ci_badge = { 11 | "schemaVersion": 1, 12 | "label": "Community test", 13 | "message": "", 14 | "color": "red", 15 | "openshiftVersion": "", 16 | "infraProvider": os.environ.get("INFRA_PROVIDER"), 17 | "patternName": os.environ.get("PATTERN_NAME"), 18 | "patternRepo": "", 19 | "patternBranch": "", 20 | "date": datetime.today().strftime("%Y-%m-%d"), 21 | "testSource": "Community", 22 | "debugInfo": None, 23 | } 24 | 25 | 26 | def get_openshift_version(): 27 | try: 28 | version_ret = subprocess.run([oc, "version", "-o", "json"], capture_output=True) 29 | version_out = version_ret.stdout.decode("utf-8") 30 | openshift_version = json.loads(version_out)["openshiftVersion"] 31 | major_minor = ".".join(openshift_version.split(".")[:-1]) 32 | return openshift_version, major_minor 33 | except KeyError as e: 34 | print("KeyError:" + str(e)) 35 | return None 36 | 37 | 38 | if __name__ == "__main__": 39 | versions = get_openshift_version() 40 | ci_badge["openshiftVersion"] = versions[0] 41 | 42 | pattern_repo = subprocess.run( 43 | ["git", "config", "--get", "remote.origin.url"], capture_output=True, text=True 44 | ) 45 | pattern_branch = subprocess.run( 46 | ["git", "branch", "--show-current"], capture_output=True, text=True 47 | ) 48 | 49 | ci_badge["patternRepo"] = pattern_repo.stdout.strip() 50 | ci_badge["patternBranch"] = pattern_branch.stdout.strip() 51 | 52 | # Check each xml file for failures 53 | results_dir = os.environ.get("WORKSPACE") 54 | failures = 0 55 | 56 | for file in os.listdir(results_dir): 57 | if file.startswith("test_") and file.endswith(".xml"): 58 | with open(os.path.join(results_dir, file), "r") as result_file: # type: ignore 59 | xml = JUnitXml.fromfile(result_file) # type: ignore 60 | for suite in xml: 61 | for case in suite: 62 | if case.result: 63 | failures += 1 64 | 65 | # Determine badge color from results 66 | if failures == 0: 67 | ci_badge["color"] = "green" 68 | 69 | # For now we assume `message` is the same as patternBranch 70 | ci_badge["message"] = ci_badge["patternBranch"] 71 | 72 | ci_badge_json_basename = ( 73 | os.environ.get("PATTERN_SHORTNAME") # type: ignore 74 | + "-" 75 | + os.environ.get("INFRA_PROVIDER") 76 | + "-" 77 | + versions[1] 78 | + "-stable-badge.json" 79 | ) 80 | ci_badge_json_filename = os.path.join(results_dir, ci_badge_json_basename) # type: ignore 81 | print(f"Creating CI badge file at: {ci_badge_json_filename}") 82 | 83 | with open(ci_badge_json_filename, "w") as ci_badge_file: 84 | json.dump(ci_badge, ci_badge_file) 85 | -------------------------------------------------------------------------------- /tests/interop/requirements.txt: -------------------------------------------------------------------------------- 1 | pytest 2 | kubernetes 3 | openshift 4 | openshift-python-wrapper 5 | junitparser 6 | git+https://github.com/validatedpatterns/vp-qe-test-common.git@development#egg=vp-qe-test-common -------------------------------------------------------------------------------- /tests/interop/run_tests.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/bash 2 | 3 | export EXTERNAL_TEST="true" 4 | export PATTERN_NAME="AnsibleEdgeGitops" 5 | export PATTERN_SHORTNAME="aegitops" 6 | 7 | if [ -z "${KUBECONFIG}" ]; then 8 | echo "No kubeconfig file set for hub cluster" 9 | exit 1 10 | fi 11 | 12 | if [ -z "${INFRA_PROVIDER}" ]; then 13 | echo "INFRA_PROVIDER is not defined" 14 | exit 1 15 | fi 16 | 17 | if [ -z "${WORKSPACE}" ]; then 18 | export WORKSPACE=/tmp 19 | fi 20 | 21 | pytest -lv --disable-warnings test_subscription_status_hub.py --kubeconfig $KUBECONFIG --junit-xml $WORKSPACE/test_subscription_status_hub.xml 22 | 23 | pytest -lv --disable-warnings test_check_vm_status.py --kubeconfig $KUBECONFIG --junit-xml $WORKSPACE/test_check_vm_status.xml 24 | 25 | pytest -lv --disable-warnings test_validate_hub_site_components.py --kubeconfig $KUBECONFIG --junit-xml $WORKSPACE/test_validate_hub_site_components.xml 26 | 27 | python3 create_ci_badge.py 28 | -------------------------------------------------------------------------------- /tests/interop/test_check_vm_status.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | import time 4 | 5 | import pytest 6 | from validatedpatterns_tests.interop.crd import ArgoCD 7 | 8 | from . import __loggername__ 9 | 10 | logger = logging.getLogger(__loggername__) 11 | 12 | oc = os.environ["HOME"] + "/oc_client/oc" 13 | 14 | 15 | @pytest.mark.check_vm_status 16 | def test_check_vm_status(openshift_dyn_client): 17 | logger.info("Get status for 'edge-gitops-vms' application") 18 | timeout = time.time() + 60 * 30 19 | while time.time() < timeout: 20 | app = ArgoCD.get( 21 | dyn_client=openshift_dyn_client, 22 | namespace="ansible-edge-gitops-hub", 23 | name="edge-gitops-vms", 24 | ) 25 | app = next(app) 26 | app_name = app.instance.metadata.name 27 | app_health = app.instance.status.health.status 28 | app_sync = app.instance.status.sync.status 29 | 30 | logger.info(f"Status for {app_name} : {app_health} : {app_sync}") 31 | 32 | if app_health == "Healthy" and app_sync == "Synced": 33 | failed = False 34 | break 35 | else: 36 | logger.info(f"Waiting for {app_name} app to sync") 37 | time.sleep(30) 38 | failed = True 39 | 40 | if failed: 41 | logger.info(app) 42 | err_msg = "Some or all applications deployed on hub site are unhealthy" 43 | logger.error(f"FAIL: {err_msg}: {app_name}") 44 | assert False, err_msg 45 | else: 46 | logger.info("PASS: All applications deployed on hub site are healthy.") 47 | -------------------------------------------------------------------------------- /tests/interop/test_subscription_status_hub.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import pytest 4 | from validatedpatterns_tests.interop import subscription 5 | 6 | from . import __loggername__ 7 | 8 | logger = logging.getLogger(__loggername__) 9 | 10 | 11 | @pytest.mark.subscription_status_hub 12 | def test_subscription_status_hub(openshift_dyn_client): 13 | # These are the operator subscriptions and their associated namespaces 14 | expected_subs = { 15 | "openshift-gitops-operator": ["openshift-operators"], 16 | "patterns-operator": ["openshift-operators"], 17 | "odf-operator": ["openshift-storage"], 18 | "kubevirt-hyperconverged": ["openshift-cnv"], 19 | "ansible-automation-platform-operator": ["ansible-automation-platform"], 20 | } 21 | 22 | err_msg = subscription.subscription_status( 23 | openshift_dyn_client, expected_subs, diff=True 24 | ) 25 | if err_msg: 26 | logger.error(f"FAIL: {err_msg}") 27 | assert False, err_msg 28 | else: 29 | logger.info("PASS: Subscription status check passed") 30 | -------------------------------------------------------------------------------- /tests/interop/test_validate_hub_site_components.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | 4 | import pytest 5 | from ocp_resources.storage_class import StorageClass 6 | from validatedpatterns_tests.interop import application, components 7 | 8 | from . import __loggername__ 9 | 10 | logger = logging.getLogger(__loggername__) 11 | 12 | oc = os.environ["HOME"] + "/oc_client/oc" 13 | 14 | 15 | @pytest.mark.test_validate_hub_site_components 16 | def test_validate_hub_site_components(openshift_dyn_client): 17 | logger.info("Checking Openshift version on hub site") 18 | version_out = components.dump_openshift_version() 19 | logger.info(f"Openshift version:\n{version_out}") 20 | 21 | logger.info("Dump PVC and storageclass info") 22 | pvcs_out = components.dump_pvc() 23 | logger.info(f"PVCs:\n{pvcs_out}") 24 | 25 | for sc in StorageClass.get(dyn_client=openshift_dyn_client): 26 | logger.info(sc.instance) 27 | 28 | 29 | @pytest.mark.validate_hub_site_reachable 30 | def test_validate_hub_site_reachable(kube_config, openshift_dyn_client): 31 | logger.info("Check if hub site API end point is reachable") 32 | err_msg = components.validate_site_reachable(kube_config, openshift_dyn_client) 33 | if err_msg: 34 | logger.error(f"FAIL: {err_msg}") 35 | assert False, err_msg 36 | else: 37 | logger.info("PASS: Hub site is reachable") 38 | 39 | 40 | @pytest.mark.check_pod_status_hub 41 | def test_check_pod_status(openshift_dyn_client): 42 | logger.info("Checking pod status") 43 | projects = [ 44 | "openshift-operators", 45 | "ansible-automation-platform", 46 | "ansible-edge-gitops-hub", 47 | "openshift-gitops", 48 | "edge-gitops-vms", 49 | "vault", 50 | ] 51 | err_msg = components.check_pod_status(openshift_dyn_client, projects) 52 | if err_msg: 53 | logger.error(f"FAIL: {err_msg}") 54 | assert False, err_msg 55 | else: 56 | logger.info("PASS: Pod status check succeeded.") 57 | 58 | 59 | @pytest.mark.validate_argocd_reachable_hub_site 60 | def test_validate_argocd_reachable_hub_site(openshift_dyn_client): 61 | logger.info("Check if argocd route/url on hub site is reachable") 62 | err_msg = components.validate_argocd_reachable(openshift_dyn_client) 63 | if err_msg: 64 | logger.error(f"FAIL: {err_msg}") 65 | assert False, err_msg 66 | else: 67 | logger.info("PASS: Argocd is reachable") 68 | 69 | 70 | @pytest.mark.validate_argocd_applications_health_hub_site 71 | def test_validate_argocd_applications_health_hub_site(openshift_dyn_client): 72 | logger.info("Get all applications deployed by argocd on hub site") 73 | projects = ["openshift-gitops", "ansible-edge-gitops-hub"] 74 | unhealthy_apps = application.get_argocd_application_status( 75 | openshift_dyn_client, projects 76 | ) 77 | if unhealthy_apps: 78 | err_msg = "Some or all applications deployed on hub site are unhealthy" 79 | logger.error(f"FAIL: {err_msg}:\n{unhealthy_apps}") 80 | assert False, err_msg 81 | else: 82 | logger.info("PASS: All applications deployed on hub site are healthy.") 83 | -------------------------------------------------------------------------------- /values-global.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | global: 3 | pattern: ansible-edge-gitops 4 | 5 | options: 6 | useCSV: false 7 | syncPolicy: Automatic 8 | installPlanApproval: Automatic 9 | 10 | hub: 11 | provider: aws 12 | 13 | main: 14 | clusterGroupName: hub 15 | multiSourceConfig: 16 | enabled: true 17 | clusterGroupChartVersion: 0.9.* 18 | -------------------------------------------------------------------------------- /values-hub.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | clusterGroup: 3 | name: hub 4 | isHubCluster: true 5 | 6 | namespaces: 7 | - vault 8 | - golang-external-secrets 9 | - ansible-automation-platform 10 | - openshift-cnv 11 | - openshift-storage 12 | - edge-gitops-vms 13 | - aap-config 14 | 15 | subscriptions: 16 | aap-operator: 17 | name: ansible-automation-platform-operator 18 | namespace: ansible-automation-platform 19 | channel: stable-2.5 20 | 21 | openshift-virtualization: 22 | name: kubevirt-hyperconverged 23 | namespace: openshift-cnv 24 | channel: stable 25 | 26 | openshift-data-foundation: 27 | name: odf-operator 28 | namespace: openshift-storage 29 | 30 | projects: 31 | - hub 32 | 33 | imperative: 34 | jobs: 35 | - name: deploy-kubevirt-worker 36 | playbook: ansible/deploy_kubevirt_worker.yml 37 | verbosity: -vvv 38 | - name: clean-golden-images 39 | playbook: ansible/odf_fix_dataimportcrons.yml 40 | verbosity: -vvv 41 | clusterRoleYaml: 42 | - apiGroups: 43 | - "*" 44 | resources: 45 | - machinesets 46 | - persistentvolumeclaims 47 | - datavolumes 48 | - dataimportcrons 49 | - datasources 50 | verbs: 51 | - "*" 52 | - apiGroups: 53 | - "*" 54 | resources: 55 | - "*" 56 | verbs: 57 | - get 58 | - list 59 | - watch 60 | 61 | applications: 62 | aap: 63 | name: ansible-automation-platform 64 | namespace: ansible-automation-platform 65 | project: hub 66 | chart: ansible-automation-platform-instance 67 | chartVersion: 0.1.* 68 | 69 | aap-config: 70 | name: aap-config 71 | namespace: aap-config 72 | project: hub 73 | chart: aap-config 74 | chartVersion: 0.1.* 75 | extraValueFiles: 76 | - '$patternref/overrides/values-aap-config-aeg.yaml' 77 | 78 | vault: 79 | name: vault 80 | namespace: vault 81 | project: hub 82 | chart: hashicorp-vault 83 | chartVersion: 0.1.* 84 | 85 | golang-external-secrets: 86 | name: golang-external-secrets 87 | namespace: golang-external-secrets 88 | project: hub 89 | chart: golang-external-secrets 90 | chartVersion: 0.1.* 91 | 92 | openshift-cnv: 93 | name: openshift-cnv 94 | namespace: openshift-cnv 95 | project: hub 96 | chart: openshift-virtualization-instance 97 | chartVersion: 0.1.* 98 | 99 | odf: 100 | name: odf 101 | namespace: openshift-storage 102 | project: hub 103 | chart: openshift-data-foundations 104 | chartVersion: 0.2.* 105 | extraValueFiles: 106 | - '$patternref/overrides/values-odf-chart.yaml' 107 | 108 | edge-gitops-vms: 109 | name: edge-gitops-vms 110 | namespace: edge-gitops-vms 111 | project: hub 112 | chart: edge-gitops-vms 113 | chartVersion: 0.2.* 114 | extraValueFiles: 115 | - '$patternref/overrides/values-egv-vms.yaml' 116 | 117 | # Only the hub cluster here - managed entities are edge nodes 118 | managedClusterGroups: [] 119 | -------------------------------------------------------------------------------- /values-secret.yaml.template: -------------------------------------------------------------------------------- 1 | --- 2 | # NEVER COMMIT THESE VALUES TO GIT 3 | version: "2.0" 4 | secrets: 5 | - name: vm-ssh 6 | fields: 7 | - name: username 8 | value: 'Username of user to attach privatekey and publickey to - cloud-user is a typical value' 9 | 10 | - name: privatekey 11 | value: 'Private ssh key of the user who will be able to elevate to root to provision kiosks' 12 | 13 | - name: publickey 14 | value: 'Public ssh key of the user who will be able to elevate to root to provision kiosks' 15 | 16 | - name: rhsm 17 | fields: 18 | - name: username 19 | value: 'username of user to register RHEL VMs' 20 | - name: password 21 | value: 'password of rhsm user in plaintext' 22 | 23 | - name: kiosk-extra 24 | fields: 25 | # Default: '--privileged -e GATEWAY_ADMIN_PASSWORD=redhat' 26 | - name: container_extra_params 27 | value: "Optional extra params to pass to kiosk ignition container, including admin password" 28 | 29 | - name: cloud-init 30 | fields: 31 | - name: userData 32 | value: |- 33 | #cloud-config 34 | user: 'username of user for console, probably cloud-user' 35 | password: 'a suitable password to use on the console' 36 | chpasswd: { expire: False } 37 | 38 | - name: aap-manifest 39 | fields: 40 | - name: b64content 41 | path: 'full pathname of file containing Satellite Manifest for entitling Ansible Automation Platform' 42 | base64: true 43 | 44 | - name: automation-hub-token 45 | fields: 46 | - name: token 47 | value: 'An automation hub token for retrieving Certified and Validated Ansible content' 48 | 49 | - name: agof-vault-file 50 | fields: 51 | - name: agof-vault-file 52 | path: 'full pathname of a valid agof_vault file for secrets to overlay the iac config' 53 | base64: true 54 | --------------------------------------------------------------------------------