├── charts ├── hub │ └── trustee │ │ ├── README.md │ │ ├── templates │ │ ├── resource-policy.yaml │ │ ├── kbs-route.yaml │ │ ├── reference-values.yaml │ │ ├── kbsres1-eso.yaml │ │ ├── tls-key-eso.yaml │ │ ├── kbs-passphrase-eso.yaml │ │ ├── kbs-operator-keys.yaml │ │ ├── securityPolicy-eso.yaml │ │ ├── tls-cert-eso.yaml │ │ ├── kbs.yaml │ │ ├── push-secret.yaml │ │ └── kbs-config-map.yaml │ │ ├── Chart.yaml │ │ └── values.yaml ├── coco-supported │ ├── kbs-access │ │ ├── values.yaml │ │ ├── Chart.yaml │ │ ├── templates │ │ │ ├── environment.yaml │ │ │ ├── secure-route.yaml │ │ │ ├── secure-svc.yaml │ │ │ └── secure-pod.yaml │ │ └── README.md │ ├── hello-openshift │ │ ├── values.yaml │ │ ├── Chart.yaml │ │ ├── templates │ │ │ ├── secure-route.yaml │ │ │ ├── standard-route.yaml │ │ │ ├── insecure-policy-route.yaml │ │ │ ├── secure-svc.yaml │ │ │ ├── standard-svc.yaml │ │ │ ├── insecure-policy-svc.yaml │ │ │ ├── standard-pod.yaml │ │ │ ├── secure-pod.yaml │ │ │ └── insecure-policy-pod.yaml │ │ └── insecure-policy.rego │ └── sandbox │ │ ├── Chart.yaml │ │ ├── templates │ │ ├── feature-gate.yaml │ │ ├── kata-config.yaml │ │ ├── ssh-key-eso.yaml │ │ └── peer-pods-cm.yaml │ │ └── values.yaml └── all │ └── letsencrypt │ ├── templates │ ├── cert-manager-installation.yaml │ ├── wildcard-cert.yaml │ ├── credentials-request.yaml │ ├── api-cert.yaml │ ├── issuer.yaml │ ├── default-routes.yaml │ ├── acm-secret-create.yaml │ └── issuer-acm.yaml │ ├── Chart.yaml │ ├── values.yaml │ ├── README.md │ └── LICENSE ├── pattern.sh ├── .gitleaks.toml ├── rhdp ├── requirements.txt ├── README.md ├── install-config.yaml.j2 ├── wrapper.sh └── rhdp-cluster-define.py ├── common ├── .gitleaks.toml ├── .github │ ├── linters │ │ ├── .markdown-lint.yml │ │ └── .gitleaks.toml │ ├── dependabot.yml │ └── workflows │ │ ├── pattern-sh-ci.yml │ │ └── superlinter.yml ├── scripts │ ├── set-secret-backend.sh │ ├── determine-pattern-name.sh │ ├── determine-secretstore-backend.sh │ ├── determine-main-clustergroup.sh │ ├── load-k8s-secrets.sh │ ├── write-token-kubeconfig.sh │ ├── deploy-pattern.sh │ ├── process-secrets.sh │ ├── vault-utils.sh │ ├── manage-secret-namespace.sh │ ├── preview-all.sh │ ├── display-secrets-info.sh │ ├── argocd-login.sh │ ├── manage-secret-app.sh │ ├── make-common-subtree.sh │ ├── pattern-util.sh │ └── preview.sh ├── requirements.yml ├── .gitignore ├── .ansible-lint ├── README.md ├── Changes.md ├── LICENSE └── Makefile ├── .flake8 ├── .prettierrc ├── commitlint.config.js ├── .github ├── linters │ ├── .markdown-lint.yml │ └── .gitleaks.toml ├── dependabot.yml └── workflows │ ├── ansible-lint.yml │ ├── conventional-pr.yml │ ├── release.yml │ ├── superlinter.yml │ └── jsonschema.yaml ├── .releaserc.yaml ├── requirements.yml ├── .ansible-lint ├── package.json ├── .gitignore ├── ansible.cfg ├── overrides ├── values-IBMCloud.yaml ├── values-AWS.yaml └── values-Azure.yaml ├── ansible ├── site.yaml ├── initdata-default.toml.tpl ├── install-deps.yaml ├── configure-issuer.yaml ├── init-data-gzipper.yaml ├── azure-nat-gateway.yaml └── gen-certificate.yaml ├── Makefile ├── values-global.yaml ├── scripts └── gen-secrets.sh ├── values-secret.yaml.template ├── values-simple.yaml ├── README.md └── LICENSE /charts/hub/trustee/README.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /pattern.sh: -------------------------------------------------------------------------------- 1 | ./common/scripts/pattern-util.sh -------------------------------------------------------------------------------- /.gitleaks.toml: -------------------------------------------------------------------------------- 1 | .github/linters/.gitleaks.toml -------------------------------------------------------------------------------- /rhdp/requirements.txt: -------------------------------------------------------------------------------- 1 | typer 2 | rich 3 | Jinja2 -------------------------------------------------------------------------------- /common/.gitleaks.toml: -------------------------------------------------------------------------------- 1 | .github/linters/.gitleaks.toml -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | # match black default 3 | max-line-length = 88 -------------------------------------------------------------------------------- /.prettierrc: -------------------------------------------------------------------------------- 1 | { 2 | "singleQuote": true, 3 | "semi": false 4 | } -------------------------------------------------------------------------------- /commitlint.config.js: -------------------------------------------------------------------------------- 1 | module.exports = { extends: ['@commitlint/config-conventional'] } 2 | -------------------------------------------------------------------------------- /charts/coco-supported/kbs-access/values.yaml: -------------------------------------------------------------------------------- 1 | global: 2 | coco: 3 | runtimeClassName: kata-remote 4 | -------------------------------------------------------------------------------- /charts/coco-supported/hello-openshift/values.yaml: -------------------------------------------------------------------------------- 1 | global: 2 | coco: 3 | runtimeClassName: kata-remote 4 | -------------------------------------------------------------------------------- /.github/linters/.markdown-lint.yml: -------------------------------------------------------------------------------- 1 | { 2 | "default": true, 3 | "MD003": false, 4 | "MD013": false, 5 | "MD033": false 6 | } -------------------------------------------------------------------------------- /common/.github/linters/.markdown-lint.yml: -------------------------------------------------------------------------------- 1 | { 2 | "default": true, 3 | "MD003": false, 4 | "MD013": false, 5 | "MD033": false 6 | } -------------------------------------------------------------------------------- /common/scripts/set-secret-backend.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | BACKEND=$1 4 | 5 | yq -i ".global.secretStore.backend = \"$BACKEND\"" values-global.yaml 6 | -------------------------------------------------------------------------------- /common/requirements.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Define Ansible collection requirements here 3 | collections: 4 | - name: git+https://github.com/validatedpatterns/rhvp.cluster_utils.git,v1 5 | -------------------------------------------------------------------------------- /charts/coco-supported/kbs-access/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | description: Demonstrates accessing keys within the KBS. 3 | keywords: 4 | - pattern 5 | name: kbs-access 6 | version: 0.0.1 7 | -------------------------------------------------------------------------------- /charts/coco-supported/kbs-access/templates/environment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: kbsref 5 | namespace: kbs-access 6 | data: 7 | FILEPATH: "/output/kbsres1.txt" -------------------------------------------------------------------------------- /common/.github/linters/.gitleaks.toml: -------------------------------------------------------------------------------- 1 | [whitelist] 2 | # As of v4, gitleaks only matches against filename, not path in the 3 | # files directive. Leaving content for backwards compatibility. 4 | files = [ ] 5 | -------------------------------------------------------------------------------- /.releaserc.yaml: -------------------------------------------------------------------------------- 1 | branches: 2 | - main 3 | plugins: 4 | - "@semantic-release/commit-analyzer" 5 | - "@semantic-release/release-notes-generator" 6 | - "@semantic-release/github" 7 | - "@semantic-release/git" 8 | -------------------------------------------------------------------------------- /requirements.yml: -------------------------------------------------------------------------------- 1 | collections: 2 | - azure.azcollection 3 | # Modules installed by default in the utility container, required for linting 4 | - community.general 5 | - community.crypto 6 | - kubernetes.core 7 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: 2 3 | updates: 4 | # Check for updates to GitHub Actions every week 5 | - package-ecosystem: "github-actions" 6 | directory: "/" 7 | schedule: 8 | interval: "weekly" 9 | 10 | -------------------------------------------------------------------------------- /charts/coco-supported/sandbox/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | description: A Helm chart to deploy sandbox containers and uses upstream where required. 3 | keywords: 4 | - pattern 5 | - upstream 6 | - sandbox 7 | name: sandbox 8 | version: 0.0.1 9 | -------------------------------------------------------------------------------- /charts/hub/trustee/templates/resource-policy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: resource-policy 5 | namespace: {{ .Values.kbs.deployNS }} 6 | data: 7 | policy.rego: | 8 | package policy 9 | default allow = true -------------------------------------------------------------------------------- /common/.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: 2 3 | updates: 4 | # Check for updates to GitHub Actions every week 5 | - package-ecosystem: "github-actions" 6 | directory: "/" 7 | schedule: 8 | interval: "weekly" 9 | 10 | -------------------------------------------------------------------------------- /charts/hub/trustee/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | description: Deploy and configure trustee on the hub cluster. If upstream operatorhub's catalog source is configured to pull trustee down. 3 | keywords: 4 | - pattern 5 | name: trustee 6 | version: 0.0.1 7 | -------------------------------------------------------------------------------- /charts/coco-supported/hello-openshift/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | description: Deploys a 'hello openshift' pod 3 times, twice with different coco configurations and once as a standard pod 3 | keywords: 4 | - pattern 5 | name: hello-openshift 6 | version: 0.0.1 7 | -------------------------------------------------------------------------------- /common/.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__/ 2 | *.py[cod] 3 | *~ 4 | *.swp 5 | *.swo 6 | values-secret.yaml 7 | .*.expected.yaml 8 | .vscode 9 | pattern-vault.init 10 | pattern-vault.init.bak 11 | super-linter.log 12 | golang-external-secrets/Chart.lock 13 | hashicorp-vault/Chart.lock 14 | -------------------------------------------------------------------------------- /charts/coco-supported/kbs-access/templates/secure-route.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: route.openshift.io/v1 2 | kind: Route 3 | metadata: 4 | name: secure 5 | spec: 6 | port: 7 | targetPort: 5000 8 | to: 9 | kind: Service 10 | name: secure 11 | weight: 100 12 | wildcardPolicy: None 13 | -------------------------------------------------------------------------------- /charts/coco-supported/sandbox/templates/feature-gate.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | annotations: 5 | argocd.argoproj.io/sync-wave: "1" 6 | name: osc-feature-gates 7 | namespace: openshift-sandboxed-containers-operator 8 | data: 9 | confidential: "true" 10 | -------------------------------------------------------------------------------- /charts/coco-supported/hello-openshift/templates/secure-route.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: route.openshift.io/v1 2 | kind: Route 3 | metadata: 4 | name: secure 5 | spec: 6 | port: 7 | targetPort: 8888 8 | to: 9 | kind: Service 10 | name: secure 11 | weight: 100 12 | wildcardPolicy: None 13 | -------------------------------------------------------------------------------- /charts/coco-supported/hello-openshift/templates/standard-route.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: route.openshift.io/v1 2 | kind: Route 3 | metadata: 4 | name: standard 5 | spec: 6 | port: 7 | targetPort: 8888 8 | to: 9 | kind: Service 10 | name: standard 11 | weight: 100 12 | wildcardPolicy: None 13 | -------------------------------------------------------------------------------- /charts/coco-supported/sandbox/templates/kata-config.yaml: -------------------------------------------------------------------------------- 1 | {{ if .Values.sandbox.deploy }} 2 | apiVersion: kataconfiguration.openshift.io/v1 3 | kind: KataConfig 4 | metadata: 5 | annotations: 6 | argocd.argoproj.io/sync-wave: "100" 7 | name: default-kata-config 8 | spec: 9 | enablePeerPods: true 10 | {{ end }} -------------------------------------------------------------------------------- /charts/coco-supported/hello-openshift/templates/insecure-policy-route.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: route.openshift.io/v1 2 | kind: Route 3 | metadata: 4 | name: insecure-policy 5 | spec: 6 | port: 7 | targetPort: 8888 8 | to: 9 | kind: Service 10 | name: standard 11 | weight: 100 12 | wildcardPolicy: None 13 | -------------------------------------------------------------------------------- /charts/coco-supported/kbs-access/templates/secure-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: secure 5 | spec: 6 | ports: 7 | - name: 5000-tcp 8 | port: 5000 9 | protocol: TCP 10 | targetPort: 5000 11 | selector: 12 | app: secure 13 | sessionAffinity: None 14 | type: ClusterIP 15 | -------------------------------------------------------------------------------- /charts/coco-supported/hello-openshift/templates/secure-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: secure 5 | spec: 6 | ports: 7 | - name: 8888-tcp 8 | port: 8888 9 | protocol: TCP 10 | targetPort: 8888 11 | selector: 12 | app: secure 13 | sessionAffinity: None 14 | type: ClusterIP 15 | -------------------------------------------------------------------------------- /charts/coco-supported/hello-openshift/templates/standard-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: standard 5 | spec: 6 | ports: 7 | - name: 8888-tcp 8 | port: 8888 9 | protocol: TCP 10 | targetPort: 8888 11 | selector: 12 | app: standard 13 | sessionAffinity: None 14 | type: ClusterIP 15 | -------------------------------------------------------------------------------- /.ansible-lint: -------------------------------------------------------------------------------- 1 | # Vim filetype=yaml 2 | --- 3 | offline: false 4 | 5 | exclude_paths: 6 | - .cache/ 7 | - .github/ 8 | - charts/ 9 | - common/ 10 | - tests/ 11 | - requirements.yml 12 | - values-* 13 | 14 | # warn_list: 15 | # - yaml 16 | # - schema 17 | # - experimental 18 | # - risky-file-permissions 19 | # - var-spacing 20 | -------------------------------------------------------------------------------- /.github/linters/.gitleaks.toml: -------------------------------------------------------------------------------- 1 | [whitelist] 2 | # As of v4, gitleaks only matches against filename, not path in the 3 | # files directive. Leaving content for backwards compatibility. 4 | files = [ 5 | "ansible/plugins/modules/*.py", 6 | "ansible/tests/unit/test_*.py", 7 | "ansible/tests/unit/*.yaml", 8 | "ansible/tests/unit/v2/*.yaml", 9 | ] 10 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "devDependencies": { 3 | "@semantic-release/changelog": "^6.0.3", 4 | "@semantic-release/commit-analyzer": "^13.0.1", 5 | "@semantic-release/git": "^10.0.1", 6 | "@semantic-release/github": "^11.0.1", 7 | "@semantic-release/release-notes-generator": "^14.0.3", 8 | "semantic-release": "^24.2.3" 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /charts/coco-supported/hello-openshift/templates/insecure-policy-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: insecure-policy 5 | spec: 6 | ports: 7 | - name: 8888-tcp 8 | port: 8888 9 | protocol: TCP 10 | targetPort: 8888 11 | selector: 12 | app: insecure-policy 13 | sessionAffinity: None 14 | type: ClusterIP 15 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | *.swp 3 | *.swo 4 | values-secret* 5 | .*.expected.yaml 6 | pattern-vault.init 7 | vault.init 8 | super-linter.log 9 | common/pattern-vault.init 10 | secrets.md 11 | authfile 12 | kubeconfig 13 | azure 14 | ocp-install* 15 | install-config.yaml 16 | azure-env.sh 17 | .openshift* 18 | .DS_Store 19 | openshift-install 20 | node_modules 21 | .envrc 22 | .ansible/ -------------------------------------------------------------------------------- /common/scripts/determine-pattern-name.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | PATTERN_DIR="$1" 4 | 5 | if [ -z "$PATTERN_DIR" ]; then 6 | PATTERN_DIR="." 7 | fi 8 | 9 | PATNAME=$(yq '.global.pattern' "$PATTERN_DIR/values-global.yaml" 2>/dev/null) 10 | 11 | if [ -z "$PATNAME" ] || [ "$PATNAME" == "null" ]; then 12 | PATNAME="$(basename "$PWD")" 13 | fi 14 | 15 | echo "$PATNAME" 16 | -------------------------------------------------------------------------------- /common/scripts/determine-secretstore-backend.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | PATTERN_DIR="$1" 4 | 5 | if [ -z "$PATTERN_DIR" ]; then 6 | PATTERN_DIR="." 7 | fi 8 | 9 | BACKEND=$(yq '.global.secretStore.backend' "$PATTERN_DIR/values-global.yaml" 2>/dev/null) 10 | 11 | if [ -z "$BACKEND" -o "$BACKEND" == "null" ]; then 12 | BACKEND="vault" 13 | fi 14 | 15 | echo "$BACKEND" 16 | -------------------------------------------------------------------------------- /charts/hub/trustee/templates/kbs-route.yaml: -------------------------------------------------------------------------------- 1 | # Single cluster deploy don't use the route yet. 2 | --- 3 | apiVersion: route.openshift.io/v1 4 | kind: Route 5 | metadata: 6 | name: kbs 7 | namespace: {{ .Values.kbs.deployNS }} 8 | spec: 9 | port: 10 | targetPort: 8080 11 | to: 12 | kind: Service 13 | name: kbs-service 14 | weight: 100 15 | tls: 16 | termination: passthrough 17 | -------------------------------------------------------------------------------- /common/scripts/determine-main-clustergroup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | PATTERN_DIR="$1" 4 | 5 | if [ -z "$PATTERN_DIR" ]; then 6 | PATTERN_DIR="." 7 | fi 8 | 9 | CGNAME=$(yq '.main.clusterGroupName' "$PATTERN_DIR/values-global.yaml") 10 | 11 | if [ -z "$CGNAME" ] || [ "$CGNAME" == "null" ]; then 12 | echo "Error - cannot detrmine clusterGroupName" 13 | exit 1 14 | fi 15 | 16 | echo "$CGNAME" 17 | -------------------------------------------------------------------------------- /ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | localhost_warning=False 3 | retry_files_enabled=False 4 | library=~/.ansible/plugins/modules:./ansible/plugins/modules:./common/ansible/plugins/modules:/usr/share/ansible/plugins/modules 5 | roles_path=~/.ansible/roles:./ansible/roles:./common/ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles 6 | filter_plugins=~/.ansible/plugins/filter:./ansible/plugins/filter:./common/ansible/plugins/filter:/usr/share/ansible/plugins/filter 7 | -------------------------------------------------------------------------------- /.github/workflows/ansible-lint.yml: -------------------------------------------------------------------------------- 1 | name: Ansible Lint # feel free to pick your own name 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | build: 7 | runs-on: ubuntu-latest 8 | 9 | steps: 10 | # Important: This sets up your GITHUB_WORKSPACE environment variable 11 | - uses: actions/checkout@v4 12 | 13 | - name: Lint Ansible Playbook 14 | uses: ansible/ansible-lint@06f616d6e86e9ce4c74393318d1cbb2d016af413 15 | # Let's point it to the path -------------------------------------------------------------------------------- /charts/coco-supported/sandbox/values.yaml: -------------------------------------------------------------------------------- 1 | global: 2 | cocoUpstream: true 3 | cocoConverged: true 4 | 5 | secretStore: 6 | name: vault-backend 7 | kind: ClusterSecretStore 8 | 9 | 10 | 11 | sandbox: 12 | deploy: true 13 | sshKey: secret/data/global/sshKey 14 | azure: true 15 | peerpodsCreds: secret/data/global/azure 16 | # These variables today limit to one cluster 17 | # revise using imperative framework to infer from cluster vars 18 | # Strongly advised to override in values-global.yaml or values-{cluster-group}.yaml 19 | -------------------------------------------------------------------------------- /overrides/values-IBMCloud.yaml: -------------------------------------------------------------------------------- 1 | # When using IBM ROKS the route certificates are signed by letsencrypt 2 | # By default the ESO configuration uses the kube-root-ca.crt configmap 3 | # to validate the connection to vault. Since this configmap will not contain 4 | # the letsencrypt CA, ESO will be unable to connect to the vault and return an 5 | # x509 CA unknown error. 6 | # Uncomment the following if you are using IBM ROKS (IPI installs on IBM Cloud are unaffected) 7 | 8 | # golangExternalSecrets: 9 | # caProvider: 10 | # enabled: false 11 | -------------------------------------------------------------------------------- /charts/hub/trustee/values.yaml: -------------------------------------------------------------------------------- 1 | global: 2 | cocoUpstream: true 3 | 4 | secretStore: 5 | name: vault-backend 6 | kind: ClusterSecretStore 7 | 8 | kbs: 9 | # Do you do internal HTTPS for the KBS 10 | deployNS: trustee-operator-system 11 | https: 12 | enabled: false 13 | certAuth: 14 | enabled: false 15 | securityPolicy: secret/data/hub/securityPolicyConfig 16 | publicKey: secret/data/hub/kbsPublicKey 17 | privateKey: secret/data/global/kbsPrivateKey 18 | kbsres1: secret/data/hub/kbsres1 19 | passphrase: secret/data/hub/passphrase 20 | -------------------------------------------------------------------------------- /ansible/site.yaml: -------------------------------------------------------------------------------- 1 | # This is only needed for RHPDS 2 | - name: MultiCloud-GitOps RHPDS bootstrap 3 | hosts: localhost 4 | connection: local 5 | tasks: 6 | # We cannot use .package or .dnf modules because python3 that is used comes 7 | # from a virtualenv 8 | - name: Launch the installation 9 | ansible.builtin.command: ./pattern.sh make install 10 | args: 11 | chdir: "{{ lookup('env', 'PWD') }}" 12 | register: output 13 | changed_when: false 14 | 15 | - name: Print output of installation 16 | ansible.builtin.debug: 17 | msg: "{{ output }}" 18 | -------------------------------------------------------------------------------- /charts/coco-supported/kbs-access/README.md: -------------------------------------------------------------------------------- 1 | # Notes use of external image 2 | 3 | This chart currently uses an [image hosted on ghcr.io](ghcr.io/butler54/kbs-access-app:latest) built from the [following repository](https://github.com/butler54/coco-kbs-access). 4 | 5 | Using separate repository for build rather than integrated content is discouraged by validated patterns. 6 | 7 | The separate repository is because Coco (via the Kata guest components) must be served by an image registry using a TLS connection with a well known CA (as of today). 8 | 9 | This chart will be updated as that position changes. 10 | -------------------------------------------------------------------------------- /common/scripts/load-k8s-secrets.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eu 3 | 4 | get_abs_filename() { 5 | # $1 : relative filename 6 | echo "$(cd "$(dirname "$1")" && pwd)/$(basename "$1")" 7 | } 8 | 9 | SCRIPT=$(get_abs_filename "$0") 10 | SCRIPTPATH=$(dirname "${SCRIPT}") 11 | COMMONPATH=$(dirname "${SCRIPTPATH}") 12 | PATTERNPATH=$(dirname "${COMMONPATH}") 13 | 14 | PATTERN_NAME=${1:-$(basename "`pwd`")} 15 | 16 | EXTRA_PLAYBOOK_OPTS="${EXTRA_PLAYBOOK_OPTS:-}" 17 | 18 | ansible-playbook -e pattern_name="${PATTERN_NAME}" -e pattern_dir="${PATTERNPATH}" ${EXTRA_PLAYBOOK_OPTS} "rhvp.cluster_utils.k8s_secrets" 19 | -------------------------------------------------------------------------------- /common/scripts/write-token-kubeconfig.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eu 3 | 4 | OUTPUTFILE=${1:-"~/.kube/config"} 5 | 6 | get_abs_filename() { 7 | # $1 : relative filename 8 | echo "$(cd "$(dirname "$1")" && pwd)/$(basename "$1")" 9 | } 10 | 11 | SCRIPT=$(get_abs_filename "$0") 12 | SCRIPTPATH=$(dirname "${SCRIPT}") 13 | COMMONPATH=$(dirname "${SCRIPTPATH}") 14 | PATTERNPATH=$(dirname "${COMMONPATH}") 15 | 16 | EXTRA_PLAYBOOK_OPTS="${EXTRA_PLAYBOOK_OPTS:-}" 17 | 18 | ansible-playbook -e pattern_dir="${PATTERNPATH}" -e kubeconfig_file="${OUTPUTFILE}" ${EXTRA_PLAYBOOK_OPTS} "rhvp.cluster_utils.write-token-kubeconfig" 19 | -------------------------------------------------------------------------------- /charts/hub/trustee/templates/reference-values.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | annotations: 5 | argocd.argoproj.io/sync-wave: "1" 6 | name: rvps-reference-values 7 | namespace: {{ .Values.kbs.deployNS }} 8 | data: 9 | reference-values.json: | 10 | [ 11 | ] 12 | 13 | # No reference values yet 14 | # [ 15 | # { 16 | # "name": "sample.svn", 17 | # "expired": "2025-01-01T00:00:00Z", 18 | # "hash-value": [ 19 | # { 20 | # "alg": "sha256", 21 | # "value": "1" 22 | # } 23 | # ] 24 | # } 25 | # ] 26 | -------------------------------------------------------------------------------- /ansible/initdata-default.toml.tpl: -------------------------------------------------------------------------------- 1 | algorithm = "sha384" 2 | version = "0.1.0" 3 | 4 | [data] 5 | "aa.toml" = ''' 6 | [token_configs] 7 | [token_configs.coco_as] 8 | url = "https://kbs-trustee-operator-system.{{ hub_domain }}" 9 | 10 | [token_configs.kbs] 11 | url = "https://kbs-trustee-operator-system.{{ hub_domain }}" 12 | cert = """ 13 | {{ trustee_cert }} 14 | """ 15 | ''' 16 | 17 | "cdh.toml" = ''' 18 | socket = 'unix:///run/confidential-containers/cdh.sock' 19 | credentials = [] 20 | 21 | [kbc] 22 | name = "cc_kbc" 23 | url = "https://kbs-trustee-operator-system.{{ hub_domain }}" 24 | kbs_cert = """ 25 | {{ trustee_cert }} 26 | """ 27 | ''' 28 | -------------------------------------------------------------------------------- /charts/hub/trustee/templates/kbsres1-eso.yaml: -------------------------------------------------------------------------------- 1 | {{- if ne .Values.global.secretStore.backend "none" }} 2 | --- 3 | apiVersion: "external-secrets.io/v1beta1" 4 | kind: ExternalSecret 5 | metadata: 6 | annotations: 7 | argocd.argoproj.io/sync-wave: "1" 8 | name: kbsres1-eso 9 | namespace: {{ .Values.kbs.deployNS }} 10 | spec: 11 | refreshInterval: 15s 12 | secretStoreRef: 13 | name: {{ .Values.secretStore.name }} 14 | kind: {{ .Values.secretStore.kind }} 15 | data: 16 | target: 17 | name: kbsres1 18 | template: 19 | type: Opaque 20 | dataFrom: 21 | - extract: 22 | key: {{ .Values.kbs.kbsres1 }} 23 | {{- end }} -------------------------------------------------------------------------------- /rhdp/README.md: -------------------------------------------------------------------------------- 1 | # RHDP support 2 | 3 | Red Hat demo platform is a system for employees and red hat partners to generate test infrastructure. 4 | The scripts in this directory help users of that platform automate deployments. 5 | 6 | ## To deploy 7 | 8 | 1. Stand up the 'Azure Subscription Based Blank Open Environment' 9 | 2. Download the credentials 10 | 3. Load the credentials into your environment (e.g. using `direnv`) 11 | 4. Launch the wrapper script from the repository root directory: 12 | 1. `bash ./rhdp/wrapper.sh eastasia` 13 | 2. The wrapper script **requires** an azure region code this code SHOULD be the same as what was selected in RHDP. 14 | -------------------------------------------------------------------------------- /charts/hub/trustee/templates/tls-key-eso.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: "external-secrets.io/v1beta1" 3 | kind: ExternalSecret 4 | metadata: 5 | annotations: 6 | argocd.argoproj.io/sync-wave: "1" 7 | name: tls-key-eso 8 | namespace: trustee-operator-system 9 | spec: 10 | refreshInterval: 15s 11 | secretStoreRef: 12 | name: {{ .Values.secretStore.name }} 13 | kind: {{ .Values.secretStore.kind }} 14 | target: 15 | name: kbs-https-key 16 | template: 17 | type: Opaque 18 | data: 19 | - secretKey: tls.key 20 | remoteRef: 21 | key: 'secret/data/pushsecrets/kbs-tls-self-signed' 22 | property: key 23 | 24 | 25 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: default 2 | default: help 3 | 4 | .PHONY: help 5 | ##@ Pattern tasks 6 | 7 | # No need to add a comment here as help is described in common/ 8 | help: 9 | @make -f common/Makefile MAKEFILE_LIST="Makefile common/Makefile" help 10 | 11 | %: 12 | make -f common/Makefile $* 13 | 14 | .PHONY: install 15 | install: operator-deploy post-install ## installs the pattern and loads the secrets 16 | @echo "Installed" 17 | 18 | .PHONY: post-install 19 | post-install: ## Post-install tasks 20 | make load-secrets 21 | @echo "Done" 22 | 23 | .PHONY: test 24 | test: 25 | @make -f common/Makefile PATTERN_OPTS="-f values-global.yaml -f values-hub.yaml" test 26 | -------------------------------------------------------------------------------- /charts/hub/trustee/templates/kbs-passphrase-eso.yaml: -------------------------------------------------------------------------------- 1 | {{- if ne .Values.global.secretStore.backend "none" }} 2 | --- 3 | apiVersion: "external-secrets.io/v1beta1" 4 | kind: ExternalSecret 5 | metadata: 6 | annotations: 7 | argocd.argoproj.io/sync-wave: "1" 8 | name: kbs-passphrase-eso 9 | namespace: {{ .Values.kbs.deployNS }} 10 | spec: 11 | refreshInterval: 15s 12 | secretStoreRef: 13 | name: {{ .Values.secretStore.name }} 14 | kind: {{ .Values.secretStore.kind }} 15 | data: 16 | target: 17 | name: passphrase 18 | template: 19 | type: Opaque 20 | dataFrom: 21 | - extract: 22 | key: {{ .Values.kbs.passphrase }} 23 | {{- end }} -------------------------------------------------------------------------------- /charts/hub/trustee/templates/kbs-operator-keys.yaml: -------------------------------------------------------------------------------- 1 | {{- if ne .Values.global.secretStore.backend "none" }} 2 | --- 3 | apiVersion: "external-secrets.io/v1beta1" 4 | kind: ExternalSecret 5 | metadata: 6 | annotations: 7 | argocd.argoproj.io/sync-wave: "1" 8 | name: kbs-auth-public-key-eso 9 | namespace: {{ .Values.kbs.deployNS }} 10 | spec: 11 | refreshInterval: 15s 12 | secretStoreRef: 13 | name: {{ .Values.secretStore.name }} 14 | kind: {{ .Values.secretStore.kind }} 15 | data: 16 | target: 17 | name: kbs-auth-public-key 18 | template: 19 | type: Opaque 20 | dataFrom: 21 | - extract: 22 | key: {{ .Values.kbs.publicKey }} 23 | {{- end }} -------------------------------------------------------------------------------- /charts/hub/trustee/templates/securityPolicy-eso.yaml: -------------------------------------------------------------------------------- 1 | {{- if ne .Values.global.secretStore.backend "none" }} 2 | --- 3 | apiVersion: "external-secrets.io/v1beta1" 4 | kind: ExternalSecret 5 | metadata: 6 | annotations: 7 | argocd.argoproj.io/sync-wave: "1" 8 | name: securitypolicy-eso 9 | namespace: {{ .Values.kbs.deployNS }} 10 | spec: 11 | refreshInterval: 15s 12 | secretStoreRef: 13 | name: {{ .Values.secretStore.name }} 14 | kind: {{ .Values.secretStore.kind }} 15 | data: 16 | target: 17 | name: security-policy 18 | template: 19 | type: generic 20 | dataFrom: 21 | - extract: 22 | key: {{ .Values.kbs.securityPolicy }} 23 | {{- end }} -------------------------------------------------------------------------------- /charts/hub/trustee/templates/tls-cert-eso.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: "external-secrets.io/v1beta1" 3 | kind: ExternalSecret 4 | metadata: 5 | annotations: 6 | argocd.argoproj.io/sync-wave: "1" 7 | name: tls-cert-eso 8 | namespace: trustee-operator-system 9 | spec: 10 | refreshInterval: 15s 11 | secretStoreRef: 12 | name: {{ .Values.secretStore.name }} 13 | kind: {{ .Values.secretStore.kind }} 14 | target: 15 | name: kbs-https-certificate 16 | template: 17 | type: Opaque 18 | data: 19 | - secretKey: tls.crt 20 | remoteRef: 21 | key: 'secret/data/pushsecrets/kbs-tls-self-signed' 22 | property: certificate 23 | 24 | 25 | -------------------------------------------------------------------------------- /charts/coco-supported/hello-openshift/templates/standard-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: standard 5 | labels: 6 | app: standard 7 | spec: 8 | runtimeClassName: {{ .Values.global.runtimeClass }} 9 | containers: 10 | - name: hello-openshift 11 | image: quay.io/openshift/origin-hello-openshift 12 | ports: 13 | - containerPort: 8888 14 | securityContext: 15 | privileged: false 16 | allowPrivilegeEscalation: false 17 | runAsNonRoot: true 18 | runAsUser: 1001 19 | capabilities: 20 | drop: 21 | - ALL 22 | seccompProfile: 23 | type: RuntimeDefault -------------------------------------------------------------------------------- /charts/coco-supported/hello-openshift/templates/secure-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: secure 5 | labels: 6 | app: secure 7 | annotations: 8 | peerpods: "true" 9 | spec: 10 | runtimeClassName: kata-remote 11 | containers: 12 | - name: hello-openshift 13 | image: quay.io/openshift/origin-hello-openshift 14 | ports: 15 | - containerPort: 8888 16 | securityContext: 17 | privileged: false 18 | allowPrivilegeEscalation: false 19 | runAsNonRoot: true 20 | runAsUser: 1001 21 | capabilities: 22 | drop: 23 | - ALL 24 | seccompProfile: 25 | type: RuntimeDefault 26 | 27 | -------------------------------------------------------------------------------- /charts/coco-supported/sandbox/templates/ssh-key-eso.yaml: -------------------------------------------------------------------------------- 1 | {{- if and (ne .Values.global.secretStore.backend "none") (.Values.sandbox.azure) }} 2 | --- 3 | apiVersion: "external-secrets.io/v1beta1" 4 | kind: ExternalSecret 5 | metadata: 6 | annotations: 7 | argocd.argoproj.io/sync-wave: "1" 8 | name: ssh-key-secret-eso 9 | namespace: openshift-sandboxed-containers-operator 10 | spec: 11 | refreshInterval: 15s 12 | secretStoreRef: 13 | name: {{ .Values.secretStore.name }} 14 | kind: {{ .Values.secretStore.kind }} 15 | target: 16 | name: ssh-key-secret 17 | template: 18 | type: Opaque 19 | dataFrom: 20 | - extract: 21 | key: {{ .Values.sandbox.sshKey }} 22 | 23 | {{ end }} -------------------------------------------------------------------------------- /ansible/install-deps.yaml: -------------------------------------------------------------------------------- 1 | - name: Retrieve Credentials for AAP on OpenShift 2 | become: false 3 | connection: local 4 | hosts: localhost 5 | gather_facts: false 6 | tasks: 7 | - name: Ensure collection is installed 8 | community.general.ansible_galaxy_install: 9 | type: collection 10 | name: azure.azcollection 11 | - name: Ensure community.crypto collection is installed 12 | community.general.ansible_galaxy_install: 13 | type: collection 14 | name: community.crypto 15 | - name: Install a Python package 16 | ansible.builtin.pip: 17 | requirements: "~/.ansible/collections/ansible_collections/azure/azcollection/requirements.txt" 18 | extra_args: --user 19 | -------------------------------------------------------------------------------- /charts/all/letsencrypt/templates/cert-manager-installation.yaml: -------------------------------------------------------------------------------- 1 | {{ if .Values.letsencrypt.enabled }} 2 | apiVersion: operator.openshift.io/v1alpha1 3 | kind: CertManager 4 | metadata: 5 | name: cluster 6 | spec: 7 | managementState: "Managed" 8 | unsupportedConfigOverrides: 9 | # Here's an example to supply custom DNS settings. 10 | # Use this if you are getting propagation errors or are unable to determine the correct NS in certmanager logs 11 | controller: 12 | args: 13 | - "--dns01-recursive-nameservers={{ with index .Values.letsencrypt.nameservers 0 }}{{ . }}{{- end }},{{ with index .Values.letsencrypt.nameservers 1 }}{{ . }}{{- end }}" 14 | - "--dns01-recursive-nameservers-only" 15 | {{- end }} 16 | -------------------------------------------------------------------------------- /common/scripts/deploy-pattern.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -o pipefail 3 | 4 | RUNS=10 5 | WAIT=15 6 | # Retry five times because the CRD might not be fully installed yet 7 | echo -n "Installing pattern: " 8 | for i in $(seq 1 ${RUNS}); do \ 9 | exec 3>&1 4>&2 10 | OUT=$( { helm template --include-crds --name-template $* 2>&4 | oc apply -f- 2>&4 1>&3; } 4>&1 3>&1) 11 | ret=$? 12 | exec 3>&- 4>&- 13 | if [ ${ret} -eq 0 ]; then 14 | break; 15 | else 16 | echo -n "." 17 | sleep "${WAIT}" 18 | fi 19 | done 20 | 21 | # All the runs failed 22 | if [ ${i} -eq ${RUNS} ]; then 23 | echo "Installation failed [${i}/${RUNS}]. Error:" 24 | echo "${OUT}" 25 | exit 1 26 | fi 27 | echo "Done" 28 | -------------------------------------------------------------------------------- /values-global.yaml: -------------------------------------------------------------------------------- 1 | global: 2 | pattern: coco-pattern 3 | options: 4 | useCSV: false 5 | syncPolicy: Automatic 6 | installPlanApproval: Automatic 7 | autoApproveManualInstallPlans: true 8 | # This defines whether or not to use upstream resources for CoCo. 9 | # Defines whether or not the hub cluster can be used for confidential containers 10 | coco: 11 | azure: 12 | enabled: true 13 | defaultVMFlavour: "Standard_DC2as_v5" 14 | main: 15 | # WARNING 16 | # This default configuration uses a single cluster on azure. 17 | # It fundamentally violates the separation of duties. 18 | clusterGroupName: simple 19 | multiSourceConfig: 20 | enabled: true 21 | clusterGroupChartVersion: 0.9.* 22 | -------------------------------------------------------------------------------- /common/scripts/process-secrets.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eu 3 | 4 | get_abs_filename() { 5 | # $1 : relative filename 6 | echo "$(cd "$(dirname "$1")" && pwd)/$(basename "$1")" 7 | } 8 | 9 | SCRIPT=$(get_abs_filename "$0") 10 | SCRIPTPATH=$(dirname "${SCRIPT}") 11 | COMMONPATH=$(dirname "${SCRIPTPATH}") 12 | PATTERNPATH=$(dirname "${COMMONPATH}") 13 | 14 | PATTERN_NAME=${1:-$(basename "`pwd`")} 15 | SECRETS_BACKING_STORE="$($SCRIPTPATH/determine-secretstore-backend.sh)" 16 | 17 | EXTRA_PLAYBOOK_OPTS="${EXTRA_PLAYBOOK_OPTS:-}" 18 | 19 | ansible-playbook -e pattern_name="${PATTERN_NAME}" -e pattern_dir="${PATTERNPATH}" -e secrets_backing_store="${SECRETS_BACKING_STORE}" ${EXTRA_PLAYBOOK_OPTS} "rhvp.cluster_utils.process_secrets" 20 | -------------------------------------------------------------------------------- /.github/workflows/conventional-pr.yml: -------------------------------------------------------------------------------- 1 | name: "Lint PR title" 2 | 3 | on: 4 | pull_request_target: 5 | types: 6 | - opened 7 | - edited 8 | - synchronize 9 | branches: 10 | - 'main' 11 | - 'develop' 12 | jobs: 13 | lint: 14 | if: ${{ github.head_ref != 'develop' }} 15 | runs-on: ubuntu-latest 16 | steps: 17 | - name: Checkout code 18 | uses: actions/checkout@v4 19 | 20 | - name: Install dependencies 21 | run: npm install @commitlint/cli @commitlint/config-conventional 22 | 23 | - name: Validate PR title 24 | run: | 25 | PR_TITLE=$(jq -r '.pull_request.title' "$GITHUB_EVENT_PATH") 26 | echo "$PR_TITLE" | npx commitlint --config commitlint.config.js 27 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | 8 | jobs: 9 | release: 10 | runs-on: ubuntu-latest 11 | permissions: 12 | contents: write # Required for creating releases/tags 13 | steps: 14 | - name: Checkout repository 15 | uses: actions/checkout@v4 16 | with: 17 | fetch-depth: 0 # Required for semantic-release to access all commits 18 | 19 | - name: Setup Node.js 20 | uses: actions/setup-node@v4 21 | with: 22 | node-version: 'lts/*' 23 | 24 | - name: Install dependencies 25 | run: npm ci 26 | 27 | - name: Run semantic-release 28 | env: 29 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 30 | run: npx semantic-release -------------------------------------------------------------------------------- /charts/coco-supported/hello-openshift/templates/insecure-policy-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: insecure-policy 5 | labels: 6 | app: insecure-policy 7 | annotations: 8 | io.katacontainers.config.agent.policy: '{{ tpl ( .Files.Get "insecure-policy.rego") . | b64enc }}' 9 | spec: 10 | runtimeClassName: kata-remote 11 | containers: 12 | - name: hello-openshift 13 | image: quay.io/openshift/origin-hello-openshift 14 | ports: 15 | - containerPort: 8888 16 | securityContext: 17 | privileged: false 18 | allowPrivilegeEscalation: false 19 | runAsNonRoot: true 20 | runAsUser: 1001 21 | capabilities: 22 | drop: 23 | - ALL 24 | seccompProfile: 25 | type: RuntimeDefault 26 | 27 | --- 28 | -------------------------------------------------------------------------------- /common/scripts/vault-utils.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eu 3 | 4 | get_abs_filename() { 5 | # $1 : relative filename 6 | echo "$(cd "$(dirname "$1")" && pwd)/$(basename "$1")" 7 | } 8 | 9 | SCRIPT=$(get_abs_filename "$0") 10 | SCRIPTPATH=$(dirname "${SCRIPT}") 11 | COMMONPATH=$(dirname "${SCRIPTPATH}") 12 | PATTERNPATH=$(dirname "${COMMONPATH}") 13 | 14 | # Parse arguments 15 | if [ $# -lt 1 ]; then 16 | echo "Specify at least the command ($#): $*" 17 | exit 1 18 | fi 19 | 20 | TASK="${1}" 21 | PATTERN_NAME=${2:-$(basename "`pwd`")} 22 | 23 | if [ -z ${TASK} ]; then 24 | echo "Task is unset" 25 | exit 1 26 | fi 27 | 28 | EXTRA_PLAYBOOK_OPTS="${EXTRA_PLAYBOOK_OPTS:-}" 29 | 30 | ansible-playbook -t "${TASK}" -e pattern_name="${PATTERN_NAME}" -e pattern_dir="${PATTERNPATH}" ${EXTRA_PLAYBOOK_OPTS} "rhvp.cluster_utils.vault" 31 | -------------------------------------------------------------------------------- /common/scripts/manage-secret-namespace.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | NAMESPACE=$1 4 | STATE=$2 5 | 6 | MAIN_CLUSTERGROUP_FILE="./values-$(common/scripts/determine-main-clustergroup.sh).yaml" 7 | MAIN_CLUSTERGROUP_PROJECT="$(common/scripts/determine-main-clustergroup.sh)" 8 | 9 | case "$STATE" in 10 | "present") 11 | 12 | RES=$(yq ".clusterGroup.namespaces[] | select(. == \"$NAMESPACE\")" "$MAIN_CLUSTERGROUP_FILE" 2>/dev/null) 13 | if [ -z "$RES" ]; then 14 | echo "Namespace $NAMESPACE not found, adding" 15 | yq -i ".clusterGroup.namespaces += [ \"$NAMESPACE\" ]" "$MAIN_CLUSTERGROUP_FILE" 16 | fi 17 | ;; 18 | "absent") 19 | echo "Removing namespace $NAMESPACE" 20 | yq -i "del(.clusterGroup.namespaces[] | select(. == \"$NAMESPACE\"))" "$MAIN_CLUSTERGROUP_FILE" 21 | ;; 22 | *) 23 | echo "$STATE not supported" 24 | exit 1 25 | ;; 26 | esac 27 | 28 | exit 0 29 | -------------------------------------------------------------------------------- /charts/all/letsencrypt/templates/wildcard-cert.yaml: -------------------------------------------------------------------------------- 1 | {{ if .Values.letsencrypt.enabled }} 2 | apiVersion: cert-manager.io/v1 3 | kind: Certificate 4 | metadata: 5 | name: lets-encrypt-certs 6 | namespace: openshift-ingress 7 | annotations: 8 | argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true 9 | spec: 10 | secretName: lets-encrypt-wildcart-cert-tls 11 | duration: {{ .Values.letsencrypt.duration }} 12 | renewBefore: {{ .Values.letsencrypt.renewBefore }} 13 | commonName: '*.{{ $.Values.global.localClusterDomain }}' 14 | usages: 15 | {{- range .Values.letsencrypt.usages }} 16 | - {{ . }} 17 | {{- end }} 18 | dnsNames: 19 | - '*.{{ $.Values.global.localClusterDomain }}' 20 | issuerRef: 21 | name: validated-patterns-issuer 22 | kind: ClusterIssuer 23 | subject: 24 | organizations: 25 | {{- range .Values.letsencrypt.organizations }} 26 | - {{ . }} 27 | {{- end }} 28 | {{- end }} 29 | -------------------------------------------------------------------------------- /common/scripts/preview-all.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | REPO=$1; shift; 4 | TARGET_BRANCH=$1; shift 5 | 6 | HUB=$( yq ".main.clusterGroupName" values-global.yaml ) 7 | MANAGED_CLUSTERS=$( yq ".clusterGroup.managedClusterGroups.[].name" values-$HUB.yaml ) 8 | ALL_CLUSTERS=( $HUB $MANAGED_CLUSTERS ) 9 | 10 | CLUSTER_INFO_OUT=$(oc cluster-info 2>&1) 11 | CLUSTER_INFO_RET=$? 12 | if [ $CLUSTER_INFO_RET -ne 0 ]; then 13 | echo "Could not access the cluster:" 14 | echo "${CLUSTER_INFO_OUT}" 15 | exit 1 16 | fi 17 | 18 | for cluster in ${ALL_CLUSTERS[@]}; do 19 | # We always add clustergroup as it is the entry point and it gets special cased in preview.sh. 20 | APPS="clustergroup $( yq ".clusterGroup.applications.[].name" values-$cluster.yaml )" 21 | for app in $APPS; do 22 | printf "# Parsing application $app from cluster $cluster\n" 23 | common/scripts/preview.sh $cluster $app $REPO $TARGET_BRANCH 24 | done 25 | done 26 | -------------------------------------------------------------------------------- /common/scripts/display-secrets-info.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eu 3 | 4 | get_abs_filename() { 5 | # $1 : relative filename 6 | echo "$(cd "$(dirname "$1")" && pwd)/$(basename "$1")" 7 | } 8 | 9 | SCRIPT=$(get_abs_filename "$0") 10 | SCRIPTPATH=$(dirname "${SCRIPT}") 11 | COMMONPATH=$(dirname "${SCRIPTPATH}") 12 | PATTERNPATH=$(dirname "${COMMONPATH}") 13 | 14 | if [ "$#" -ge 1 ]; then 15 | export VALUES_SECRET=$(get_abs_filename "${1}") 16 | fi 17 | 18 | if [[ "$#" == 2 ]]; then 19 | SECRETS_BACKING_STORE="$2" 20 | else 21 | SECRETS_BACKING_STORE="$($SCRIPTPATH/determine-secretstore-backend.sh)" 22 | fi 23 | 24 | PATTERN_NAME=$(basename "`pwd`") 25 | 26 | EXTRA_PLAYBOOK_OPTS="${EXTRA_PLAYBOOK_OPTS:-}" 27 | 28 | ansible-playbook -e pattern_name="${PATTERN_NAME}" -e pattern_dir="${PATTERNPATH}" -e secrets_backing_store="${SECRETS_BACKING_STORE}" -e hide_sensitive_output=false ${EXTRA_PLAYBOOK_OPTS} "rhvp.cluster_utils.display_secrets_info" 29 | -------------------------------------------------------------------------------- /charts/coco-supported/kbs-access/templates/secure-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: secure 5 | labels: 6 | app: secure 7 | annotations: 8 | peerpods: "true" 9 | spec: 10 | runtimeClassName: kata-remote 11 | containers: 12 | - name: python-access 13 | image: ghcr.io/butler54/kbs-access-app:latest 14 | ports: 15 | - containerPort: 5000 16 | volumeMounts: 17 | - name: output-volume 18 | mountPath: /output 19 | envFrom: 20 | - configMapRef: 21 | name: kbsref 22 | initContainers: 23 | - name: curl 24 | image: registry.access.redhat.com/ubi9/ubi:latest # Lightweight image with curl installed 25 | command: ['sh', '-c', 'curl -s http://127.0.0.1:8006/cdh/resource/default/kbsres1/key3 > /output/kbsres1.txt'] 26 | volumeMounts: 27 | - name: output-volume 28 | mountPath: /output 29 | volumes: 30 | - name: output-volume 31 | emptyDir: {} 32 | 33 | -------------------------------------------------------------------------------- /charts/all/letsencrypt/templates/credentials-request.yaml: -------------------------------------------------------------------------------- 1 | {{ if .Values.letsencrypt.enabled }} 2 | {{ if and (eq .Values.global.clusterPlatform "AWS") .Values.letsencrypt.cloudProviderDNS }} 3 | apiVersion: cloudcredential.openshift.io/v1 4 | kind: CredentialsRequest 5 | metadata: 6 | name: letsencrypt-cert-manager-dns 7 | namespace: openshift-cloud-credential-operator 8 | annotations: 9 | argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true 10 | spec: 11 | providerSpec: 12 | apiVersion: cloudcredential.openshift.io/v1 13 | kind: AWSProviderSpec 14 | statementEntries: 15 | - action: 16 | - 'route53:ChangeResourceRecordSets' 17 | - 'route53:GetChange' 18 | - 'route53:ListHostedZonesByName' 19 | - 'route53:ListHostedZones' 20 | effect: Allow 21 | resource: '*' 22 | secretRef: 23 | name: cert-manager-dns-credentials 24 | namespace: cert-manager 25 | {{- end }} 26 | # Azure is done out of band via creds today as OCP doesn't have workload identity 27 | {{ end }} -------------------------------------------------------------------------------- /rhdp/install-config.yaml.j2: -------------------------------------------------------------------------------- 1 | additionalTrustBundlePolicy: Proxyonly 2 | apiVersion: v1 3 | baseDomain: {{ GUID }}.azure.redhatworkshops.io 4 | compute: 5 | - architecture: amd64 6 | hyperthreading: Enabled 7 | name: worker 8 | platform: 9 | azure: 10 | type: Standard_D8s_v5 11 | replicas: 3 12 | controlPlane: 13 | architecture: amd64 14 | hyperthreading: Enabled 15 | name: master 16 | platform: 17 | azure: 18 | type: Standard_D8s_v5 19 | replicas: 3 20 | metadata: 21 | creationTimestamp: null 22 | name: coco 23 | networking: 24 | clusterNetwork: 25 | - cidr: 10.128.0.0/14 26 | hostPrefix: 23 27 | machineNetwork: 28 | - cidr: 10.0.0.0/16 29 | networkType: OVNKubernetes 30 | serviceNetwork: 31 | - 172.30.0.0/16 32 | platform: 33 | azure: 34 | baseDomainResourceGroupName: {{ RESOURCEGROUP }} 35 | cloudName: AzurePublicCloud 36 | outboundType: Loadbalancer 37 | region: {{ region }} 38 | publish: External 39 | pullSecret: '{{ pull_secret }}' 40 | sshKey: '{{ ssh_key }}' 41 | -------------------------------------------------------------------------------- /charts/all/letsencrypt/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: letsencrypt 3 | description: A Helm chart to add letsencrypt support to Validated Patterns. 4 | 5 | type: application 6 | 7 | # This is the chart version. This version number should be incremented each time you make changes 8 | # to the chart and its templates, including the app version. 9 | # Versions are expected to follow Semantic Versioning (https://semver.org/) 10 | version: 0.1.1 11 | 12 | # This is the version number of the application being deployed. This version number should be 13 | # incremented each time you make changes to the application. Versions are not expected to 14 | # follow Semantic Versioning. They should reflect the version the application is using. 15 | # It is recommended to use it with quotes. 16 | appVersion: "1.16.0" 17 | home: https://github.com/validatedpatterns/letsencrypt-chart 18 | maintainers: 19 | - name: Validated Patterns Team 20 | email: validatedpatterns@googlegroups.com 21 | icon: https://validatedpatterns.io/images/validated-patterns.png 22 | -------------------------------------------------------------------------------- /charts/all/letsencrypt/templates/api-cert.yaml: -------------------------------------------------------------------------------- 1 | {{ if and (.Values.letsencrypt.enabled) (.Values.letsencrypt.api_endpoint) }} 2 | apiVersion: cert-manager.io/v1 3 | kind: Certificate 4 | metadata: 5 | name: api-validated-patterns-cert 6 | namespace: openshift-config 7 | annotations: 8 | argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true 9 | spec: 10 | secretName: api-validated-patterns-letsencrypt-cert 11 | duration: {{ .Values.letsencrypt.duration }} 12 | renewBefore: {{ .Values.letsencrypt.renewBefore }} 13 | commonName: 'api.{{ $.Values.global.localClusterDomain | replace "apps." "" }}' 14 | usages: 15 | {{- range .Values.letsencrypt.usages }} 16 | - {{ . }} 17 | {{- end }} 18 | dnsNames: 19 | - api.{{ $.Values.global.localClusterDomain | replace "apps." "" }} 20 | issuerRef: 21 | name: validated-patterns-issuer 22 | kind: ClusterIssuer 23 | subject: 24 | organizations: 25 | {{- range .Values.letsencrypt.organizations }} 26 | - {{ . }} 27 | {{- end }} 28 | {{- end }} 29 | -------------------------------------------------------------------------------- /charts/all/letsencrypt/templates/issuer.yaml: -------------------------------------------------------------------------------- 1 | {{ if .Values.letsencrypt.enabled }} 2 | {{ if and (eq .Values.global.clusterPlatform "AWS") .Values.letsencrypt.cloudProviderDNS }} 3 | apiVersion: cert-manager.io/v1 4 | kind: ClusterIssuer 5 | metadata: 6 | name: validated-patterns-issuer 7 | annotations: 8 | argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true 9 | spec: 10 | acme: 11 | server: {{ .Values.letsencrypt.server }} 12 | email: {{ .Values.letsencrypt.email }} 13 | # Only use for persistent domains :) 14 | privateKeySecretRef: 15 | name: validated-patterns-issuer-account-key 16 | solvers: 17 | - selector: {} 18 | dns01: 19 | route53: 20 | region: {{ .Values.letsencrypt.region }} 21 | accessKeyIDSecretRef: 22 | name: cert-manager-dns-credentials 23 | key: aws_access_key_id 24 | secretAccessKeySecretRef: 25 | name: cert-manager-dns-credentials 26 | key: aws_secret_access_key 27 | 28 | {{- end }} 29 | {{- end }} 30 | -------------------------------------------------------------------------------- /overrides/values-AWS.yaml: -------------------------------------------------------------------------------- 1 | # The following snippet can be commented out in oroder 2 | # to enable letsencrypt certificates on API endpoint and default 3 | # ingress of the cluster 4 | # It is currently very experimental and unsupported. 5 | # PLEASE read https://github.com/hybrid-cloud-patterns/common/tree/main/letsencrypt#readme 6 | # for all the limitations around it 7 | 8 | 9 | # letsencrypt: 10 | # enabled: true 11 | # api_endpoint: true 12 | # # FIXME: tweak this to match your region 13 | # region: eu-central-1 14 | # server: https://acme-v02.api.letsencrypt.org/directory 15 | # # server: https://acme-staging-v02.api.letsencrypt.org/directory 16 | # # FIXME: set this to your correct email 17 | # email: iwashere@iwashere.com 18 | # 19 | # clusterGroup: 20 | # applications: 21 | # letsencrypt: 22 | # name: letsencrypt 23 | # namespace: letsencrypt 24 | # # Using 'default' as that exists everywhere 25 | # project: default 26 | # path: common/letsencrypt 27 | 28 | global: 29 | objectStorage: 30 | backingStorageClass: "gp3-csi" 31 | -------------------------------------------------------------------------------- /overrides/values-Azure.yaml: -------------------------------------------------------------------------------- 1 | # The following snippet can be commented out in oroder 2 | # to enable letsencrypt certificates on API endpoint and default 3 | # ingress of the cluster 4 | # It is currently very experimental and unsupported. 5 | # PLEASE read https://github.com/hybrid-cloud-patterns/common/tree/main/letsencrypt#readme 6 | # for all the limitations around it 7 | 8 | 9 | global: 10 | objectStorage: 11 | backingStorageClass: "managed-storage" 12 | 13 | 14 | # letsencrypt: 15 | # enabled: true 16 | # api_endpoint: true 17 | # # FIXME: tweak this to match your region 18 | # region: eu-central-1 19 | # server: https://acme-v02.api.letsencrypt.org/directory 20 | # # server: https://acme-staging-v02.api.letsencrypt.org/directory 21 | # # FIXME: set this to your correct email 22 | # email: iwashere@iwashere.com 23 | # 24 | # clusterGroup: 25 | # applications: 26 | # letsencrypt: 27 | # name: letsencrypt 28 | # namespace: letsencrypt 29 | # # Using 'default' as that exists everywhere 30 | # project: default 31 | # path: common/letsencrypt 32 | -------------------------------------------------------------------------------- /common/.ansible-lint: -------------------------------------------------------------------------------- 1 | # Vim filetype=yaml 2 | --- 3 | offline: false 4 | skip_list: 5 | - name[template] # Allow Jinja templating inside task and play names 6 | - template-instead-of-copy # Templated files should use template instead of copy 7 | - yaml[line-length] # too long lines 8 | - yaml[indentation] # Forcing lists to be always indented by 2 chars is silly IMO 9 | - var-naming[no-role-prefix] # This would be too much churn for very little gain 10 | - no-changed-when 11 | - var-naming[no-role-prefix] # There are too many changes now and it would be too risky 12 | 13 | # ansible-lint gh workflow cannot find ansible.cfg hence fails to import vault_utils role 14 | exclude_paths: 15 | - ./ansible/playbooks/vault/vault.yaml 16 | - ./ansible/playbooks/iib-ci/iib-ci.yaml 17 | - ./ansible/playbooks/k8s_secrets/k8s_secrets.yml 18 | - ./ansible/playbooks/process_secrets/process_secrets.yml 19 | - ./ansible/playbooks/write-token-kubeconfig/write-token-kubeconfig.yml 20 | - ./ansible/playbooks/process_secrets/display_secrets_info.yml 21 | - ./ansible/roles/vault_utils/tests/test.yml 22 | -------------------------------------------------------------------------------- /charts/hub/trustee/templates/kbs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: confidentialcontainers.org/v1alpha1 2 | kind: KbsConfig 3 | metadata: 4 | name: kbsconfig 5 | namespace: {{ .Values.kbs.deployNS }} 6 | spec: 7 | kbsConfigMapName: kbs-config 8 | kbsAuthSecretName: kbs-auth-public-key 9 | kbsDeploymentType: AllInOneDeployment 10 | kbsRvpsRefValuesConfigMapName: rvps-reference-values 11 | kbsSecretResources: ["kbsres1", "passphrase", "security-policy"] 12 | kbsHttpsKeySecretName: kbs-https-key 13 | kbsHttpsCertSecretName: kbs-https-certificate 14 | kbsResourcePolicyConfigMapName: resource-policy 15 | 16 | # TDX specific configuration (optional) 17 | # tdxConfigSpec: 18 | # kbsTdxConfigMapName: tdx-config 19 | 20 | # IBM SE specific configuration (optional) 21 | # ibmSEConfigSpec: 22 | # certStorePvc: 23 | 24 | # Override attestation policy (optional) 25 | # kbsAttestationPolicyConfigMapName: attestation-policy 26 | 27 | # Inject environment variables (optional) 28 | # Enable DEBUG logging in trustee pods 29 | KbsEnvVars: 30 | RUST_LOG: debug 31 | 32 | # service type (optional, it defaults to ClusterIP) 33 | kbsServiceType: ClusterIP 34 | -------------------------------------------------------------------------------- /charts/hub/trustee/templates/push-secret.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: external-secrets.io/v1alpha1 3 | kind: PushSecret 4 | metadata: 5 | name: push-certs 6 | namespace: imperative 7 | spec: 8 | updatePolicy: Replace # Policy to overwrite existing secrets in the provider on sync 9 | deletionPolicy: Delete # the provider' secret will be deleted if the PushSecret is deleted 10 | refreshInterval: 10s # Refresh interval for which push secret will reconcile 11 | secretStoreRefs: # A list of secret stores to push secrets to 12 | - name: {{ .Values.secretStore.name }} 13 | kind: {{ .Values.secretStore.kind }} 14 | selector: 15 | secret: 16 | name: kbs-tls-self-signed # Source Kubernetes secret to be pushed 17 | data: 18 | - match: 19 | secretKey: tls.key # Source Kubernetes secret key to be pushed 20 | remoteRef: 21 | remoteKey: "pushsecrets/kbs-tls-self-signed" # Remote reference (where the secret is going to be pushed) 22 | property: key 23 | - match: 24 | secretKey: tls.crt # Source Kubernetes secret key to be pushed 25 | remoteRef: 26 | remoteKey: "pushsecrets/kbs-tls-self-signed" 27 | property: certificate # Remote reference (where the secret is going to be pushed 28 | -------------------------------------------------------------------------------- /common/.github/workflows/pattern-sh-ci.yml: -------------------------------------------------------------------------------- 1 | name: Run Bash Script on Multiple Distributions 2 | 3 | on: 4 | push: 5 | paths: 6 | - "scripts/**" 7 | - "Makefile" 8 | branches: 9 | - main 10 | pull_request: 11 | paths: 12 | - "scripts/**" 13 | - "Makefile" 14 | 15 | jobs: 16 | run-script: 17 | name: Run Bash Script 18 | strategy: 19 | matrix: 20 | # Fedora is not an option yet 21 | os: [ubuntu-latest, ubuntu-22.04] 22 | runs-on: ${{ matrix.os }} 23 | 24 | steps: 25 | - name: Checkout Repository 26 | uses: actions/checkout@v4 27 | 28 | - name: Install Podman on Ubuntu 29 | if: contains(matrix.os, 'ubuntu') 30 | run: | 31 | sudo apt-get update 32 | sudo apt-get install -y podman 33 | 34 | # Currently we do not do MacOSX as it is not free, maybe in the future 35 | # - name: Install Podman on macOS 36 | # if: contains(matrix.os, 'macos') 37 | # run: | 38 | # brew install podman 39 | # podman machine init 40 | # podman machine start 41 | 42 | - name: Verify Podman Installation 43 | run: podman --version 44 | 45 | - name: Run pattern.sh script 46 | run: | 47 | export TARGET_BRANCH=main 48 | ./scripts/pattern-util.sh make validate-origin 49 | -------------------------------------------------------------------------------- /common/scripts/argocd-login.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ## Login to validated patterns argocd instances 4 | 5 | # Detect Argo CD namespaces 6 | ARGOCD_NAMESPACES=$(oc get argoCD -A -o jsonpath='{.items[*].metadata.namespace}') 7 | if [ -z "$ARGOCD_NAMESPACES" ]; then 8 | echo "Error: No Argo CD instances found in the cluster." 9 | exit 1 10 | fi 11 | 12 | # Split the namespaces into an array 13 | NAMESPACES=($ARGOCD_NAMESPACES) 14 | 15 | # Check if there are at least two Argo CD instances 16 | if [ ${#NAMESPACES[@]} -lt 2 ]; then 17 | echo "Error: Less than two Argo CD instances found. Found instances in namespaces: $ARGOCD_NAMESPACES" 18 | exit 1 19 | fi 20 | 21 | 22 | for NAMESPACE in ${NAMESPACES[@]}; do 23 | # get the instance name 24 | ARGOCD_INSTANCE=$(oc get argocd -n "$NAMESPACE" -o jsonpath='{.items[0].metadata.name}') # assume only one per NS 25 | SERVER_URL=$(oc get route "$ARGOCD_INSTANCE"-server -n "$NAMESPACE" -o jsonpath='{.status.ingress[0].host}') 26 | PASSWORD=$(oc get secret "$ARGOCD_INSTANCE"-cluster -n "$NAMESPACE" -o jsonpath='{.data.admin\.password}' | base64 -d) 27 | echo $PASSWORD 28 | argocd login --skip-test-tls --insecure --grpc-web "$SERVER_URL" --username "admin" --password "$PASSWORD" 29 | if [ "$?" -ne 0 ]; then 30 | echo "Login to Argo CD ${SERVER_URL} failed. Exiting." 31 | exit 1 32 | fi 33 | 34 | done 35 | -------------------------------------------------------------------------------- /charts/hub/trustee/templates/kbs-config-map.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: kbs-config 5 | namespace: {{ .Values.kbs.deployNS }} 6 | data: 7 | kbs-config.toml: | 8 | [http_server] 9 | sockets = ["0.0.0.0:8080"] 10 | insecure_http = false 11 | private_key = "/etc/https-key/tls.key" 12 | certificate = "/etc/https-cert/tls.crt" 13 | [admin] 14 | insecure_api = true 15 | auth_public_key = "/etc/auth-secret/publicKey" 16 | 17 | [attestation_token] 18 | insecure_key = true 19 | attestation_token_type = "CoCo" 20 | 21 | [attestation_service] 22 | type = "coco_as_builtin" 23 | work_dir = "/opt/confidential-containers/attestation-service" 24 | policy_engine = "opa" 25 | 26 | [attestation_service.attestation_token_broker] 27 | type = "Ear" 28 | policy_dir = "/opt/confidential-containers/attestation-service/policies" 29 | 30 | [attestation_service.attestation_token_config] 31 | duration_min = 5 32 | 33 | [attestation_service.rvps_config] 34 | type = "BuiltIn" 35 | 36 | [attestation_service.rvps_config.storage] 37 | type = "LocalJson" 38 | file_path = "/opt/confidential-containers/rvps/reference-values/reference-values.json" 39 | 40 | [[plugins]] 41 | name = "resource" 42 | type = "LocalFs" 43 | dir_path = "/opt/confidential-containers/kbs/repository" 44 | 45 | [policy_engine] 46 | policy_path = "/opt/confidential-containers/opa/policy.rego" -------------------------------------------------------------------------------- /scripts/gen-secrets.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | echo "Creating secrets as required" 4 | echo 5 | 6 | COCO_SECRETS_DIR="${HOME}/.coco-pattern" 7 | SECURITY_POLICY_FILE="${COCO_SECRETS_DIR}/security-policy-config.json" 8 | SSH_KEY_FILE="${COCO_SECRETS_DIR}/id_rsa" 9 | KBS_PRIVATE_KEY="${COCO_SECRETS_DIR}/kbsPrivateKey" 10 | KBS_PUBLIC_KEY="${COCO_SECRETS_DIR}/kbsPublicKey" 11 | SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 12 | VALUES_FILE="${HOME}/values-secret-coco-pattern.yaml" 13 | 14 | mkdir -p ${COCO_SECRETS_DIR} 15 | 16 | if [ ! -f "${SECURITY_POLICY_FILE}" ]; then 17 | echo "Creating security policy" 18 | cat > ${SECURITY_POLICY_FILE} < ${KBS_PRIVATE_KEY} 34 | openssl pkey -in "${KBS_PRIVATE_KEY}" -pubout -out "${KBS_PUBLIC_KEY}" 35 | fi 36 | 37 | if [ ! -f "${SSH_KEY_FILE}" ]; then 38 | echo "Creating ssh keys" 39 | rm -f "${SSH_KEY_FILE}.pub" 40 | ssh-keygen -f "${SSH_KEY_FILE}" -N "" 41 | fi 42 | 43 | 44 | ## Copy a sample values file if this stuff doesn't exist 45 | 46 | if [ ! -f "${VALUES_FILE}" ]; then 47 | echo "No values file was found copying template.. please review before deploying" 48 | cp "${SCRIPT_DIR}/../values-secret.yaml.template" "${VALUES_FILE}" 49 | fi -------------------------------------------------------------------------------- /.github/workflows/superlinter.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Super linter 3 | 4 | on: [push, pull_request] 5 | 6 | jobs: 7 | build: 8 | # Name the Job 9 | name: Super linter 10 | # Set the agent to run on 11 | runs-on: ubuntu-latest 12 | 13 | steps: 14 | - name: Checkout Code 15 | uses: actions/checkout@v4 16 | with: 17 | # Full git history is needed to get a proper list of changed files within `super-linter` 18 | fetch-depth: 0 19 | 20 | ################################ 21 | # Run Linter against code base # 22 | ################################ 23 | - name: Lint Code Base 24 | uses: super-linter/super-linter/slim@v7 25 | env: 26 | VALIDATE_ALL_CODEBASE: true 27 | DEFAULT_BRANCH: main 28 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 29 | # These are the validation we disable atm 30 | VALIDATE_ANSIBLE: false 31 | VALIDATE_BASH: false 32 | VALIDATE_CHECKOV: false 33 | VALIDATE_JSCPD: false 34 | VALIDATE_JSON_PRETTIER: false 35 | VALIDATE_MARKDOWN_PRETTIER: false 36 | VALIDATE_KUBERNETES_KUBECONFORM: false 37 | VALIDATE_PYTHON_PYLINT: false 38 | VALIDATE_SHELL_SHFMT: false 39 | VALIDATE_YAML: false 40 | VALIDATE_YAML_PRETTIER: false 41 | # VALIDATE_DOCKERFILE_HADOLINT: false 42 | # VALIDATE_MARKDOWN: false 43 | # VALIDATE_NATURAL_LANGUAGE: false 44 | # VALIDATE_TEKTON: false -------------------------------------------------------------------------------- /charts/all/letsencrypt/templates/default-routes.yaml: -------------------------------------------------------------------------------- 1 | {{ if .Values.letsencrypt.enabled }} 2 | --- 3 | apiVersion: operator.openshift.io/v1 4 | kind: IngressController 5 | metadata: 6 | name: default 7 | namespace: openshift-ingress-operator 8 | annotations: 9 | argocd.argoproj.io/sync-options: ServerSideApply=true, Validate=false, SkipDryRunOnMissingResource=true 10 | spec: 11 | routeAdmission: 12 | wildcardPolicy: WildcardsAllowed 13 | defaultCertificate: 14 | name: lets-encrypt-wildcart-cert-tls 15 | # Patch the cluster-wide argocd instance so it uses the ingress tls cert 16 | --- 17 | apiVersion: argoproj.io/v1alpha1 18 | kind: ArgoCD 19 | metadata: 20 | name: openshift-gitops 21 | namespace: openshift-gitops 22 | annotations: 23 | argocd.argoproj.io/sync-options: ServerSideApply=true, Validate=false, SkipDryRunOnMissingResource=true 24 | spec: 25 | server: 26 | route: 27 | enabled: true 28 | tls: 29 | termination: reencrypt 30 | {{ if .Values.letsencrypt.api_endpoint }} 31 | --- 32 | apiVersion: config.openshift.io/v1 33 | kind: APIServer 34 | metadata: 35 | name: cluster 36 | annotations: 37 | argocd.argoproj.io/sync-options: ServerSideApply=true, Validate=false, SkipDryRunOnMissingResource=true 38 | spec: 39 | servingCerts: 40 | namedCertificates: 41 | - names: 42 | - api.{{ $.Values.global.localClusterDomain | replace "apps." "" }} 43 | servingCertificate: 44 | name: api-validated-patterns-letsencrypt-cert 45 | {{- end }} 46 | {{- end }} 47 | -------------------------------------------------------------------------------- /common/.github/workflows/superlinter.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Super linter 3 | 4 | on: [push, pull_request] 5 | 6 | jobs: 7 | build: 8 | # Name the Job 9 | name: Super linter 10 | # Set the agent to run on 11 | runs-on: ubuntu-latest 12 | 13 | steps: 14 | - name: Checkout Code 15 | uses: actions/checkout@v4 16 | with: 17 | # Full git history is needed to get a proper list of changed files within `super-linter` 18 | fetch-depth: 0 19 | 20 | ################################ 21 | # Run Linter against code base # 22 | ################################ 23 | - name: Lint Code Base 24 | uses: super-linter/super-linter/slim@v7 25 | env: 26 | VALIDATE_ALL_CODEBASE: true 27 | DEFAULT_BRANCH: main 28 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 29 | # These are the validation we disable atm 30 | VALIDATE_ANSIBLE: false 31 | VALIDATE_BASH: false 32 | VALIDATE_CHECKOV: false 33 | VALIDATE_JSCPD: false 34 | VALIDATE_JSON_PRETTIER: false 35 | VALIDATE_MARKDOWN_PRETTIER: false 36 | VALIDATE_KUBERNETES_KUBECONFORM: false 37 | VALIDATE_PYTHON_PYLINT: false 38 | VALIDATE_SHELL_SHFMT: false 39 | VALIDATE_YAML: false 40 | VALIDATE_YAML_PRETTIER: false 41 | # VALIDATE_DOCKERFILE_HADOLINT: false 42 | # VALIDATE_MARKDOWN: false 43 | # VALIDATE_NATURAL_LANGUAGE: false 44 | # VALIDATE_TEKTON: false 45 | -------------------------------------------------------------------------------- /charts/coco-supported/hello-openshift/insecure-policy.rego: -------------------------------------------------------------------------------- 1 | package agent_policy 2 | 3 | default AddARPNeighborsRequest := true 4 | default AddSwapRequest := true 5 | default CloseStdinRequest := true 6 | default CopyFileRequest := true 7 | default CreateContainerRequest := true 8 | default CreateSandboxRequest := true 9 | default DestroySandboxRequest := true 10 | default ExecProcessRequest := true 11 | default GetMetricsRequest := true 12 | default GetOOMEventRequest := true 13 | default GuestDetailsRequest := true 14 | default ListInterfacesRequest := true 15 | default ListRoutesRequest := true 16 | default MemHotplugByProbeRequest := true 17 | default OnlineCPUMemRequest := true 18 | default PauseContainerRequest := true 19 | default PullImageRequest := true 20 | default ReadStreamRequest := true 21 | default RemoveContainerRequest := true 22 | default RemoveStaleVirtiofsShareMountsRequest := true 23 | default ReseedRandomDevRequest := true 24 | default ResumeContainerRequest := true 25 | default SetGuestDateTimeRequest := true 26 | default SetPolicyRequest := true 27 | default SignalProcessRequest := true 28 | default StartContainerRequest := true 29 | default StartTracingRequest := true 30 | default StatsContainerRequest := true 31 | default StopTracingRequest := true 32 | default TtyWinResizeRequest := true 33 | default UpdateContainerRequest := true 34 | default UpdateEphemeralMountsRequest := true 35 | default UpdateInterfaceRequest := true 36 | default UpdateRoutesRequest := true 37 | default WaitProcessRequest := true 38 | default WriteStreamRequest := true -------------------------------------------------------------------------------- /values-secret.yaml.template: -------------------------------------------------------------------------------- 1 | # A more formal description of this format can be found here: 2 | # https://github.com/hybrid-cloud-patterns/common/tree/main/ansible/roles/vault_utils#values-secret-file-format 3 | 4 | version: "2.0" 5 | # Ideally you NEVER COMMIT THESE VALUES TO GIT (although if all passwords are 6 | # automatically generated inside the vault this should not really matter) 7 | 8 | secrets: 9 | - name: 'sshKey' 10 | vaultPrefixes: 11 | - global 12 | fields: 13 | - name: id_rsa.pub 14 | path: ~/.coco-pattern/id_rsa.pub 15 | - name: id_rsa 16 | path: ~/.coco-pattern/id_rsa 17 | 18 | - name: 'securityPolicyConfig' 19 | vaultPrefixes: 20 | - hub 21 | fields: 22 | - name: osc 23 | path: ~/.coco-pattern/security-policy-config.json 24 | 25 | - name: kbsPublicKey 26 | vaultPrefixes: 27 | - hub 28 | fields: 29 | - name: publicKey 30 | path: ~/.coco-pattern/kbsPublicKey 31 | 32 | - name: kbsPrivateKey 33 | vaultPrefixes: 34 | - global 35 | fields: 36 | - name: privateKey 37 | path: ~/.coco-pattern/kbsPrivateKey 38 | 39 | - name: kbsres1 40 | vaultPrefixes: 41 | - hub 42 | fields: 43 | - name: key1 44 | value: '' 45 | onMissingValue: generate 46 | vaultPolicy: validatedPatternDefaultPolicy 47 | - name: key2 48 | value: '' 49 | onMissingValue: generate 50 | vaultPolicy: validatedPatternDefaultPolicy 51 | - name: key3 52 | value: '' 53 | onMissingValue: generate 54 | vaultPolicy: validatedPatternDefaultPolicy 55 | 56 | - name: passphrase 57 | vaultPrefixes: 58 | - hub 59 | fields: 60 | - name: passphrase 61 | value: '' 62 | onMissingValue: generate 63 | vaultPolicy: validatedPatternDefaultPolicy 64 | -------------------------------------------------------------------------------- /common/scripts/manage-secret-app.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | APP=$1 4 | STATE=$2 5 | 6 | MAIN_CLUSTERGROUP_FILE="./values-$(common/scripts/determine-main-clustergroup.sh).yaml" 7 | MAIN_CLUSTERGROUP_PROJECT="$(common/scripts/determine-main-clustergroup.sh)" 8 | 9 | case "$APP" in 10 | "vault") 11 | APP_NAME="vault" 12 | NAMESPACE="vault" 13 | PROJECT="$MAIN_CLUSTERGROUP_PROJECT" 14 | CHART_NAME="hashicorp-vault" 15 | CHART_VERSION=0.1.* 16 | 17 | ;; 18 | "golang-external-secrets") 19 | APP_NAME="golang-external-secrets" 20 | NAMESPACE="golang-external-secrets" 21 | PROJECT="$MAIN_CLUSTERGROUP_PROJECT" 22 | CHART_NAME="golang-external-secrets" 23 | CHART_VERSION=0.1.* 24 | 25 | ;; 26 | *) 27 | echo "Error - cannot manage $APP can only manage vault and golang-external-secrets" 28 | exit 1 29 | ;; 30 | esac 31 | 32 | case "$STATE" in 33 | "present") 34 | common/scripts/manage-secret-namespace.sh "$NAMESPACE" "$STATE" 35 | 36 | RES=$(yq ".clusterGroup.applications[] | select(.path == \"$CHART_LOCATION\")" "$MAIN_CLUSTERGROUP_FILE" 2>/dev/null) 37 | if [ -z "$RES" ]; then 38 | echo "Application with chart location $CHART_LOCATION not found, adding" 39 | yq -i ".clusterGroup.applications.$APP_NAME = { \"name\": \"$APP_NAME\", \"namespace\": \"$NAMESPACE\", \"project\": \"$PROJECT\", \"chart\": \"$CHART_NAME\", \"chartVersion\": \"$CHART_VERSION\"}" "$MAIN_CLUSTERGROUP_FILE" 40 | fi 41 | ;; 42 | "absent") 43 | common/scripts/manage-secret-namespace.sh "$NAMESPACE" "$STATE" 44 | echo "Removing application wth chart location $CHART_LOCATION" 45 | yq -i "del(.clusterGroup.applications[] | select(.chart == \"$CHART_NAME\"))" "$MAIN_CLUSTERGROUP_FILE" 46 | ;; 47 | *) 48 | echo "$STATE not supported" 49 | exit 1 50 | ;; 51 | esac 52 | 53 | exit 0 54 | -------------------------------------------------------------------------------- /.github/workflows/jsonschema.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Verify json schema 3 | 4 | on: [push, pull_request] 5 | 6 | jobs: 7 | jsonschema_tests: 8 | name: Json Schema tests 9 | strategy: 10 | matrix: 11 | python-version: [3.11] 12 | runs-on: ubuntu-latest 13 | 14 | steps: 15 | - name: Checkout Code 16 | uses: actions/checkout@v4 17 | 18 | - name: Set up Python ${{ matrix.python-version }} 19 | uses: actions/setup-python@v5 20 | with: 21 | python-version: ${{ matrix.python-version }} 22 | 23 | - name: Install dependencies 24 | run: | 25 | python -m pip install --upgrade pip 26 | pip install check-jsonschema 27 | 28 | - name: Install yq 29 | uses: chrisdickinson/setup-yq@latest 30 | with: 31 | yq-version: v4.30.7 32 | 33 | - name: Verify secrets json schema against templates 34 | run: | 35 | cp ./values-secret.yaml.template ./values-secret.yaml 36 | check-jsonschema --fill-defaults --schemafile https://raw.githubusercontent.com/validatedpatterns/rhvp.cluster_utils/refs/heads/main/roles/vault_utils/values-secrets.v2.schema.json values-secret.yaml 37 | rm -f ./values-secret.yaml 38 | 39 | - name: Verify ClusterGroup values.schema.json against values-*yaml files 40 | run: | 41 | set -e 42 | find . -maxdepth 1 -type f -name "values-*.yaml" ! -name "values-global.yaml" -print0 | while IFS= read -r -d '' i; 43 | do 44 | echo "$i" 45 | # disable shellcheck of single quotes in yq 46 | # shellcheck disable=2016 47 | yq eval-all '. as $item ireduce ({}; . * $item )' values-global.yaml "$i" > tmp.yaml 48 | check-jsonschema --fill-defaults --schemafile https://raw.githubusercontent.com/validatedpatterns/clustergroup-chart/refs/heads/main/values.schema.json tmp.yaml 49 | rm -f tmp.yaml 50 | done -------------------------------------------------------------------------------- /charts/all/letsencrypt/templates/acm-secret-create.yaml: -------------------------------------------------------------------------------- 1 | {{ if .Values.letsencrypt.enabled }} 2 | {{ if and (eq .Values.global.clusterPlatform "Azure") .Values.letsencrypt.cloudProviderDNS }} 3 | --- 4 | ## USE ACM policies to enforce the creation of a lets-encrypt cert 5 | apiVersion: policy.open-cluster-management.io/v1 6 | kind: Policy 7 | metadata: 8 | name: azure-secret-policy 9 | spec: 10 | remediationAction: enforce 11 | disabled: false 12 | policy-templates: 13 | - objectDefinition: 14 | apiVersion: policy.open-cluster-management.io/v1 15 | kind: ConfigurationPolicy 16 | metadata: 17 | name: azure-client-creds 18 | spec: 19 | remediationAction: enforce 20 | severity: medium 21 | object-templates: 22 | - complianceType: mustonlyhave 23 | objectDefinition: 24 | apiVersion: v1 25 | type: Opaque 26 | kind: Secret 27 | metadata: 28 | name: azuredns-config 29 | namespace: cert-manager 30 | data: 31 | client-secret: '{{ `{{ fromSecret "openshift-cloud-controller-manager" "azure-cloud-credentials" "azure_client_secret" }}` }}' 32 | --- 33 | apiVersion: policy.open-cluster-management.io/v1 34 | kind: PlacementBinding 35 | metadata: 36 | name: azure-secret-placement-binding 37 | annotations: 38 | argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true 39 | placementRef: 40 | name: azure-managed-clusters-placement-rule 41 | kind: PlacementRule 42 | apiGroup: apps.open-cluster-management.io 43 | subjects: 44 | - name: azure-secret-policy 45 | kind: Policy 46 | apiGroup: policy.open-cluster-management.io 47 | ------ 48 | apiVersion: apps.open-cluster-management.io/v1 49 | kind: PlacementRule 50 | metadata: 51 | name: azure-managed-clusters-placement-rule 52 | spec: 53 | clusterConditions: 54 | - status: 'True' 55 | type: ManagedClusterConditionAvailable 56 | clusterSelector: 57 | matchLabels: 58 | cloud: Azure 59 | 60 | --- 61 | {{- end }} 62 | {{- end }} -------------------------------------------------------------------------------- /ansible/configure-issuer.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Retrieve Credentials for AAP on OpenShift 3 | become: false 4 | connection: local 5 | hosts: localhost 6 | gather_facts: false 7 | vars: 8 | kubeconfig: "{{ lookup('env', 'KUBECONFIG') }}" 9 | tasks: 10 | - name: Get Azure credentials 11 | kubernetes.core.k8s_info: 12 | kind: Secret 13 | namespace: openshift-cloud-controller-manager 14 | name: azure-cloud-credentials 15 | register: azure_credentials 16 | retries: 20 17 | delay: 5 18 | - name: List DNS zones 19 | azure.azcollection.azure_rm_dnszone_info: 20 | auth_source: "auto" 21 | subscription_id: "{{ azure_credentials.resources[0]['data']['azure_subscription_id'] | b64decode }}" 22 | client_id: "{{ azure_credentials.resources[0]['data']['azure_client_id'] | b64decode }}" 23 | secret: "{{ azure_credentials.resources[0]['data']['azure_client_secret'] | b64decode }}" 24 | tenant: "{{ azure_credentials.resources[0]['data']['azure_tenant_id'] | b64decode }}" 25 | register: dns_zones 26 | # FIXME: This assumes only one dns zone is present. we should be matching against available dns zones. 27 | - name: Split the Path 28 | ansible.builtin.set_fact: 29 | path_parts: "{{ dns_zones.ansible_info.azure_dnszones[0].id.split('/') }}" 30 | - name: Find the Resource Group Name 31 | ansible.builtin.set_fact: 32 | resource_group: "{{ path_parts[4] }}" 33 | - name: Get hosted zone 34 | ansible.builtin.set_fact: 35 | hosted_zone: "{{ dns_zones.ansible_info.azure_dnszones[0].name }}" 36 | - name: "Set k8s cm" 37 | kubernetes.core.k8s: 38 | api_version: v1 39 | kind: ConfigMap 40 | resource_definition: 41 | apiVersion: v1 42 | kind: ConfigMap 43 | metadata: 44 | name: dnsinfo 45 | namespace: imperative 46 | data: 47 | resource_group: "{{ resource_group }}" 48 | hosted_zone: "{{ hosted_zone }}" 49 | state: present 50 | -------------------------------------------------------------------------------- /common/README.md: -------------------------------------------------------------------------------- 1 | # Validated Patterns common/ repository 2 | 3 | [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) 4 | 5 | ## Note 6 | 7 | This is the `main` branch of common and it assumes that the pattern is fully 8 | multisource (meaning that any used charts from VP is actually referenced from 9 | either a helm chart repository or quay repository). I.e. there are no helm 10 | charts contained in this branch of common and there is no ansible code neither. 11 | 12 | The helm charts now live in separate repositories under the VP 13 | [organization](https://github.com/validatedpatterns) on GitHub. The repositories are: 14 | 15 | - clustergroup-chart 16 | - pattern-install-chart 17 | - hashicorp-vault-chart 18 | - golang-external-secrets-chart 19 | - acm-chart 20 | - letsencrypt-chart 21 | 22 | The ansible bits live in this [repository](https://github.com/validatedpatterns/rhvp.cluster_utils) 23 | 24 | In order to be able to use this "slimmed-down" main branch of common you *must* 25 | use a 0.9.* clustergroup-chart that. Add the following to your `values-global.yaml`: 26 | 27 | ```yaml 28 | main: 29 | multiSourceConfig: 30 | enabled: true 31 | clusterGroupChartVersion: 0.9.* 32 | ``` 33 | 34 | ## Start Here 35 | 36 | This repository is never used as standalone. It is usually imported in each pattern as a subtree. 37 | In order to import the common subtree the very first time you can use the script 38 | [make_common_subtree.sh](scripts/make-common-subtree.sh). 39 | 40 | In order to update your common subtree inside your pattern repository you can either use 41 | `https://github.com/validatedpatterns/utilities/blob/main/scripts/update-common-everywhere.sh` or 42 | do it manually with the following commands: 43 | 44 | ```sh 45 | git remote add -f common-upstream https://github.com/validatedpatterns/common.git 46 | git merge -s subtree -Xtheirs -Xsubtree=common common-upstream/main 47 | ``` 48 | 49 | ## Secrets 50 | 51 | There are two different secret formats parsed by the ansible bits. Both are documented [here](https://github.com/validatedpatterns/common/tree/main/ansible/roles/vault_utils/README.md) 52 | -------------------------------------------------------------------------------- /common/scripts/make-common-subtree.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if [ "$1" = "-h" ]; then 4 | echo "This script will convert common into a subtree and add a remote to help manage it." 5 | echo "The script takes three positional arguments, as follows:" 6 | echo 7 | echo "$0 " 8 | echo 9 | echo "Run without arguments, the script would run as if these arguments had been passed:" 10 | echo "$0 https://github.com/validatedpatterns/common.git main common-upstream" 11 | echo 12 | echo "Please ensure the git subtree command is available. On RHEL/Fedora, the git subtree command" 13 | echo "is in a separate package called git-subtree" 14 | exit 1 15 | fi 16 | 17 | if [ -f '/etc/redhat-release' ]; then 18 | rpm -qa | grep git-subtree 2>&1 19 | if [ ! $? = 0 ]; then 20 | echo "you need to install git-subtree" 21 | echo "would you like to install it now?" 22 | select ANS in yes no 23 | do 24 | case $ANS in 25 | yes) 26 | sudo dnf install git-subtree -y 27 | break 28 | ;; 29 | no) 30 | exit 31 | break 32 | ;; 33 | *) 34 | echo "You must enter yes or no" 35 | ;; 36 | esac 37 | done 38 | fi 39 | fi 40 | 41 | if [ "$1" ]; then 42 | subtree_repo=$1 43 | else 44 | subtree_repo=https://github.com/validatedpatterns/common.git 45 | fi 46 | 47 | if [ "$2" ]; then 48 | subtree_branch=$2 49 | else 50 | subtree_branch=main 51 | fi 52 | 53 | if [ "$3" ]; then 54 | subtree_remote=$3 55 | else 56 | subtree_remote=common-upstream 57 | fi 58 | 59 | git diff --quiet || (echo "This script must be run on a clean working tree" && exit 1) 60 | 61 | echo "Changing directory to project root" 62 | cd `git rev-parse --show-toplevel` 63 | 64 | echo "Removing existing common and replacing it with subtree from $subtree_repo $subtree_remote" 65 | rm -rf common 66 | 67 | echo "Committing removal of common" 68 | (git add -A :/ && git commit -m "Removed previous version of common to convert to subtree from $subtree_repo $subtree_branch") || exit 1 69 | 70 | echo "Adding (possibly replacing) subtree remote $subtree_remote" 71 | git remote rm "$subtree_remote" 72 | git remote add -f "$subtree_remote" "$subtree_repo" || exit 1 73 | git subtree add --prefix=common "$subtree_remote" "$subtree_branch" || exit 1 74 | 75 | echo "Complete. You may now push these results if you are satisfied" 76 | exit 0 77 | -------------------------------------------------------------------------------- /ansible/init-data-gzipper.yaml: -------------------------------------------------------------------------------- 1 | - name: Gzip initdata 2 | become: false 3 | connection: local 4 | hosts: localhost 5 | gather_facts: false 6 | vars: 7 | kubeconfig: "{{ lookup('env', 'KUBECONFIG') }}" 8 | cluster_platform: "{{ global.clusterPlatform | default('none') | lower }}" 9 | hub_domain: "{{ global.hubClusterDomain | default('none') | lower}}" 10 | template_src: "initdata-default.toml.tpl" 11 | tasks: 12 | - name: Create temporary working directory 13 | ansible.builtin.tempfile: 14 | state: directory 15 | suffix: initdata 16 | register: tmpdir 17 | - name: Read KBS TLS secret from Kubernetes 18 | kubernetes.core.k8s_info: 19 | kubeconfig: "{{ lookup('env', 'KUBECONFIG') }}" 20 | api_version: v1 21 | kind: Secret 22 | name: kbs-tls-self-signed 23 | namespace: imperative 24 | register: kbs_secret_result 25 | 26 | - name: Extract and decode certificate from secret 27 | ansible.builtin.set_fact: 28 | trustee_cert: "{{ kbs_secret_result.resources[0].data['tls.crt'] | b64decode }}" 29 | when: kbs_secret_result.resources | length > 0 30 | 31 | - name: Fail if certificate not found 32 | ansible.builtin.fail: 33 | msg: "KBS TLS certificate not found in secret 'kbs-tls-self-signed' in namespace 'imperative'" 34 | when: kbs_secret_result.resources | length == 0 35 | 36 | - name: Define temp file paths 37 | ansible.builtin.set_fact: 38 | rendered_path: "{{ tmpdir.path }}/rendered.toml" 39 | gz_path: "{{ tmpdir.path }}/rendered.toml.gz" 40 | 41 | - name: Render template to temp file 42 | ansible.builtin.template: 43 | src: "{{ template_src }}" 44 | dest: "{{ rendered_path }}" 45 | mode: "0600" 46 | 47 | 48 | - name: Gzip the rendered content 49 | ansible.builtin.shell: | 50 | gzip -c "{{ rendered_path }}" > "{{ gz_path }}" 51 | changed_when: true 52 | 53 | - name: Read gzip as base64 54 | ansible.builtin.slurp: 55 | path: "{{ gz_path }}" 56 | register: gz_slurped 57 | 58 | - name: Create/update ConfigMap with gzipped+base64 content 59 | kubernetes.core.k8s: 60 | kubeconfig: "{{ kubeconfig | default(omit) }}" 61 | state: present 62 | definition: 63 | apiVersion: v1 64 | kind: ConfigMap 65 | metadata: 66 | name: "initdata" 67 | namespace: "imperative" 68 | data: 69 | INITDATA: "{{ gz_slurped.content }}" 70 | -------------------------------------------------------------------------------- /rhdp/wrapper.sh: -------------------------------------------------------------------------------- 1 | 2 | #!/usr/bin/env bash 3 | set -e 4 | 5 | if [ "$#" -ne 1 ]; then 6 | echo "Error: Exactly one argument is required." 7 | echo "Usage: $0 {azure-region-code}" 8 | echo "Example: $0 eastasia" 9 | exit 1 10 | fi 11 | AZUREREGION=$1 12 | 13 | echo "Run from the root directory of the project" 14 | echo "\n" 15 | echo "Ensuring azure environment is installed" 16 | 17 | if [ ! -n "${GUID}" ]; then 18 | echo "RHDP GUID environmental variable does not exist" 19 | exit 1 20 | fi 21 | if [ ! -n "${CLIENT_ID}" ]; then 22 | echo "RHDP AZURE 'CLIENT_ID' environmental variable does not exist" 23 | exit 1 24 | fi 25 | if [ ! -n "${PASSWORD}" ]; then 26 | echo "RHDP AZURE 'PASSWORD' environmental variable aka client secret does not exist" 27 | exit 1 28 | fi 29 | if [ ! -n "${TENANT}" ]; then 30 | echo "RHDP AZURE 'TENANT' environmental variable does not exist" 31 | exit 1 32 | fi 33 | if [ ! -n "${SUBSCRIPTION}" ]; then 34 | echo "RHDP AZURE 'SUBSCRIPTION' environmental variable does not exist" 35 | exit 1 36 | fi 37 | if [ ! -n "${RESOURCEGROUP}" ]; then 38 | echo "RHDP AZURE 'RESOURCEGROUP' environmental variable does not exist" 39 | exit 1 40 | fi 41 | 42 | 43 | sleep 10 44 | echo "---------------------" 45 | echo "Installing python dependencies" 46 | echo "---------------------" 47 | pip install -r rhdp/requirements.txt 48 | echo "---------------------" 49 | echo "requirements installed" 50 | echo "---------------------" 51 | sleep 5 52 | 53 | if [ ! -f "${HOME}/pull-secret.json" ]; then 54 | echo "A OpenShift pull secret is required at ~/pull-secret.json" 55 | exit 1 56 | fi 57 | 58 | if [ ! -f "${HOME}/.ssh/id_rsa" ]; then 59 | echo "An rsa ssh key is required at ~/.ssh/id_rsa" 60 | echo "e.g. ssh-keygen -t rsa -b 4096" 61 | echo "TBC: Update to support other key types" 62 | exit 1 63 | fi 64 | 65 | 66 | echo "---------------------" 67 | echo "defining cluster" 68 | echo "---------------------" 69 | python rhdp/rhdp-cluster-define.py ${AZUREREGION} 70 | echo "---------------------" 71 | echo "cluster defined" 72 | echo "---------------------" 73 | sleep 10 74 | echo "---------------------" 75 | echo "openshift-install" 76 | echo "---------------------" 77 | openshift-install create cluster --dir=./openshift-install 78 | echo "openshift-install done" 79 | echo "---------------------" 80 | echo "setting up secrets" 81 | 82 | bash ./scripts/gen-secrets.sh 83 | 84 | 85 | sleep 60 86 | echo "---------------------" 87 | echo "pattern install" 88 | echo "---------------------" 89 | export KUBECONFIG=`pwd`/openshift-install/auth/kubeconfig 90 | 91 | 92 | ./pattern.sh make install 93 | echo "---------------------" 94 | echo "pattern install done" 95 | echo "---------------------" 96 | 97 | -------------------------------------------------------------------------------- /charts/all/letsencrypt/values.yaml: -------------------------------------------------------------------------------- 1 | # -- Dictionary of the global settings to configure this chart 2 | # @default -- depends on the individual settings 3 | global: 4 | ## -- String containing the domain including the apps. prefix. Gets set by the Validated Pattern framework 5 | localClusterDomain: "apps.example.com" 6 | 7 | 8 | # -- This section contains all the parameters for the letsencrypt chart in 9 | # order to request CA signed certificates in a Validated Pattern By default if 10 | # you include this chart you enable the letsencrypt charts on both the 11 | # *.apps. ingress and on the API endpoint 12 | # @default -- depends on the individual settings 13 | letsencrypt: 14 | ## Override this if you are using a supported 3rd party DNS provider (e.g. cloudflare) 15 | cloudProviderDNS: true 16 | ## -- Boolean to enable this feature and request a wildcard cert for the default Infress (*.apps.domain) (defaults to True) 17 | enabled: true 18 | ## -- Boolean to enable letsencrypt certs on the API endpoint too (defaults to True) 19 | api_endpoint: true 20 | 21 | # -- String that defines the region used by the route53/dns01 resolver in cert-manager (required) 22 | region: eu-central-1 23 | # -- String containing the email used when requesting certificates to letsencrypt (required) 24 | # These two lines need tweaking for every deployment. @example.com emails 25 | # will be rejected by letsencrypt 26 | email: chbutler@redhat.com 27 | 28 | # -- String containing the letsencrypt ACME URL (Defaults to the staging server) 29 | # By default we use the staging URL to avoid any ratelimiting while testing 30 | # To switch to the production certificates signed by a recognized CA, please 31 | # switch to the non-staging URL (see values.yaml) 32 | #server: https://acme-staging-v02.api.letsencrypt.org/directory 33 | server: https://acme-v02.api.letsencrypt.org/directory 34 | 35 | # -- List of organization names to be put in a certificate (Defaults to [hybrid-cloud-patterns.io]) 36 | organizations: 37 | - hybrid-cloud-patterns.io 38 | # -- List of certificate uses. See API cert-manager.io/v1.KeyUsage (Defaults to [server auth]) 39 | usages: 40 | - server auth 41 | 42 | # -- Duration of the requested letsencrypt certificates (Defaults to 168h0m0s) 43 | duration: "168h0m0s" 44 | # -- How long before expiration date should the certs be renewed (Defaults to 28h0m0s) 45 | renewBefore: "28h0m0s" 46 | 47 | # -- List of DNS server (ip:port strings) to be used when doing DNS01 challenges (Defaults to [8.8.8.8:53, 1.1.1.1:53]) 48 | # These two are needed because the DNS01 ACME solver needs outside DNS 49 | # servers and won't really work with openshift's internal split-view DNS servers 50 | # https://cert-manager.io/docs/configuration/acme/dns01/#setting-nameservers-for-dns01-self-check 51 | nameservers: 52 | - 8.8.8.8:53 53 | - 1.1.1.1:53 54 | 55 | azure: 56 | secretStoreKey: 'secret/data/global/azure' 57 | 58 | 59 | secretStore: 60 | name: vault-backend 61 | kind: ClusterSecretStore 62 | -------------------------------------------------------------------------------- /charts/all/letsencrypt/templates/issuer-acm.yaml: -------------------------------------------------------------------------------- 1 | {{ if .Values.letsencrypt.enabled }} 2 | {{ if and (eq .Values.global.clusterPlatform "Azure") .Values.letsencrypt.cloudProviderDNS }} 3 | --- 4 | apiVersion: policy.open-cluster-management.io/v1 5 | kind: Policy 6 | metadata: 7 | name: azure-cluster-issuer-policy 8 | spec: 9 | remediationAction: enforce 10 | disabled: false 11 | policy-templates: 12 | - objectDefinition: 13 | apiVersion: policy.open-cluster-management.io/v1 14 | kind: ConfigurationPolicy 15 | metadata: 16 | name: azure-cluster-issuer 17 | spec: 18 | remediationAction: enforce 19 | severity: medium 20 | object-templates: 21 | - complianceType: mustonlyhave 22 | objectDefinition: 23 | apiVersion: cert-manager.io/v1 24 | kind: ClusterIssuer 25 | metadata: 26 | name: validated-patterns-issuer 27 | spec: 28 | acme: 29 | server: {{ .Values.letsencrypt.server }} 30 | email: {{ .Values.letsencrypt.email }} 31 | privateKeySecretRef: 32 | name: validated-patterns-issuer-account-key 33 | solvers: 34 | - dns01: 35 | azureDNS: 36 | # This info is also available in CM's however it's easier to get from the secret 37 | clientID: '{{ `{{ fromSecret "openshift-cloud-controller-manager" "azure-cloud-credentials" "azure_client_id" | base64dec }}` }}' 38 | clientSecretSecretRef: 39 | # The following is the secret we created in Kubernetes. Issuer will use this to present challenge to Azure DNS. 40 | name: azuredns-config 41 | key: client-secret 42 | subscriptionID: '{{ `{{ (fromJson (fromConfigMap "openshift-cloud-controller-manager" "cloud-conf" "cloud.conf" | toLiteral)).subscriptionId }}` }}' 43 | tenantID: '{{ `{{ (fromJson (fromConfigMap "openshift-cloud-controller-manager" "cloud-conf" "cloud.conf" | toLiteral)).tenantId }}` }}' 44 | resourceGroupName: '{{ `{{ fromConfigMap "imperative" "dnsinfo" "resource_group" }}` }}' 45 | hostedZoneName: '{{ `{{ fromConfigMap "imperative" "dnsinfo" "hosted_zone" }}` }}' 46 | # Azure Cloud Environment, default to AzurePublicCloud 47 | environment: AzurePublicCloud 48 | --- 49 | apiVersion: policy.open-cluster-management.io/v1 50 | kind: PlacementBinding 51 | metadata: 52 | name: azure-issuer-placement-binding 53 | annotations: 54 | argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true 55 | placementRef: 56 | name: azure-issuer-placement-rule 57 | kind: PlacementRule 58 | apiGroup: apps.open-cluster-management.io 59 | subjects: 60 | - name: azure-cluster-issuer-policy 61 | kind: Policy 62 | apiGroup: policy.open-cluster-management.io 63 | --- 64 | apiVersion: apps.open-cluster-management.io/v1 65 | kind: PlacementRule 66 | metadata: 67 | name: azure-issuer-placement-rule 68 | spec: 69 | clusterConditions: 70 | - status: 'True' 71 | type: ManagedClusterConditionAvailable 72 | clusterSelector: 73 | matchLabels: 74 | cloud: Azure 75 | --- 76 | {{- end }} 77 | {{- end }} -------------------------------------------------------------------------------- /rhdp/rhdp-cluster-define.py: -------------------------------------------------------------------------------- 1 | # SPDX-FileCopyrightText: 2024-present Red Hat Inc 2 | # 3 | # SPDX-License-Identifier: Apache-2.0 4 | import json 5 | import os 6 | import pathlib 7 | import shutil 8 | 9 | import typer 10 | from jinja2 import Environment, FileSystemLoader, select_autoescape 11 | from rich import print as rprint 12 | from typing_extensions import Annotated 13 | 14 | 15 | def cleanup(pattern_dir: pathlib.Path) -> None: 16 | """Cleanup directory""" 17 | 18 | install_dir = pattern_dir / "openshift-install" 19 | azure_dir = pathlib.Path.home() / ".azure" 20 | 21 | if install_dir.exists() and install_dir.is_dir(): 22 | shutil.rmtree(install_dir) 23 | install_dir.mkdir() 24 | if azure_dir.exists() and azure_dir.is_dir(): 25 | shutil.rmtree(azure_dir) 26 | 27 | 28 | def validate_dir(): 29 | """Simple validation for directory""" 30 | assert pathlib.Path("values-global.yaml").exists() 31 | assert pathlib.Path("values-simple.yaml").exists() 32 | 33 | 34 | def setup_install( 35 | pattern_dir: pathlib.Path, 36 | region: str, 37 | pull_secret_path: pathlib.Path, 38 | ssh_key_path: pathlib.Path, 39 | ): 40 | """create the install config file""" 41 | try: 42 | GUID = os.environ["GUID"] 43 | RESOURCEGROUP = os.environ["RESOURCEGROUP"] 44 | except KeyError as e: 45 | rprint("Unable to get azure environment details") 46 | raise e 47 | # Read ssh_public_key 48 | ssh_key = ssh_key_path.expanduser().read_text() 49 | pull_secret = pull_secret_path.expanduser().read_text() 50 | rhdp_dir = pattern_dir / "rhdp" 51 | jinja_env = Environment( 52 | loader=FileSystemLoader(searchpath=rhdp_dir), autoescape=select_autoescape() 53 | ) 54 | config_template = jinja_env.get_template("install-config.yaml.j2") 55 | output_text = config_template.render( 56 | GUID=GUID, 57 | RESOURCEGROUP=RESOURCEGROUP, 58 | ssh_key=ssh_key, 59 | pull_secret=pull_secret, 60 | region=region, 61 | ) 62 | install_config = pattern_dir / "openshift-install/install-config.yaml" 63 | install_config.write_text(output_text) 64 | 65 | 66 | def write_azure_creds(): 67 | """write azure creds based on env vars""" 68 | azure_dir = azure_dir = pathlib.Path.home() / ".azure" 69 | azure_dir.mkdir(exist_ok=True) 70 | sp_path = azure_dir / "osServicePrincipal.json" 71 | 72 | keymap = { 73 | "subscriptionId": os.environ["SUBSCRIPTION"], 74 | "clientId": os.environ["CLIENT_ID"], 75 | "clientSecret": os.environ["PASSWORD"], 76 | "tenantId": os.environ["TENANT"], 77 | } 78 | 79 | with open(sp_path, "w", encoding="utf-8") as file: 80 | json.dump(keymap, file) 81 | 82 | 83 | def print(): 84 | rprint("Run openshift install .") 85 | 86 | 87 | def run(region: Annotated[str, typer.Argument(help="Azure region code")]): 88 | """ 89 | Region flag requires an azure region key which can be (authoritatively) 90 | requested with: "az account list-locations -o table". 91 | """ 92 | validate_dir() 93 | cleanup(pathlib.Path.cwd()) 94 | setup_install( 95 | pathlib.Path.cwd(), 96 | region, 97 | pathlib.Path("~/pull-secret.json"), 98 | pathlib.Path("~/.ssh/id_rsa.pub"), 99 | ) 100 | write_azure_creds() 101 | 102 | 103 | if __name__ == "__main__": 104 | typer.run(run) 105 | -------------------------------------------------------------------------------- /ansible/azure-nat-gateway.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Configure Azure NAT Gateway 4 | become: false 5 | connection: local 6 | hosts: localhost 7 | gather_facts: false 8 | vars: 9 | kubeconfig: "{{ lookup('env', 'KUBECONFIG') }}" 10 | resource_prefix: "coco" 11 | tasks: 12 | - name: Get Azure credentials 13 | kubernetes.core.k8s_info: 14 | kind: Secret 15 | namespace: openshift-cloud-controller-manager 16 | name: azure-cloud-credentials 17 | register: azure_credentials 18 | retries: 20 19 | delay: 5 20 | 21 | - name: Get Azure configuration 22 | kubernetes.core.k8s_info: 23 | kind: ConfigMap 24 | namespace: openshift-cloud-controller-manager 25 | name: cloud-conf 26 | register: azure_cloud_conf 27 | retries: 20 28 | delay: 5 29 | 30 | - name: Set facts 31 | ansible.builtin.set_fact: 32 | azure_subscription_id: "{{ (azure_cloud_conf.resources[0]['data']['cloud.conf'] | from_json)['subscriptionId'] }}" 33 | azure_tenant_id: "{{ (azure_cloud_conf.resources[0]['data']['cloud.conf'] | from_json)['tenantId'] }}" 34 | azure_resource_group: "{{ (azure_cloud_conf.resources[0]['data']['cloud.conf'] | from_json)['vnetResourceGroup'] }}" 35 | azure_client_id: "{{ azure_credentials.resources[0]['data']['azure_client_id'] | b64decode }}" 36 | azure_client_secret: "{{ azure_credentials.resources[0]['data']['azure_client_secret'] | b64decode }}" 37 | azure_vnet: "{{ (azure_cloud_conf.resources[0]['data']['cloud.conf'] | from_json)['vnetName'] }}" 38 | azure_subnet: "{{ (azure_cloud_conf.resources[0]['data']['cloud.conf'] | from_json)['subnetName'] }}" 39 | coco_public_ip_name: "{{ resource_prefix }}-pip" 40 | coco_nat_gateway_name: "{{ resource_prefix }}-nat-gateway" 41 | no_log: true 42 | 43 | - name: Create Public IP for NAT Gateway 44 | azure.azcollection.azure_rm_publicipaddress: 45 | subscription_id: "{{ azure_subscription_id }}" 46 | tenant: "{{ azure_tenant_id }}" 47 | client_id: "{{ azure_client_id }}" 48 | secret: "{{ azure_client_secret }}" 49 | resource_group: "{{ azure_resource_group }}" 50 | name: "{{ coco_public_ip_name }}" 51 | sku: "standard" 52 | allocation_method: "static" 53 | 54 | - name: Retrieve Public IP for NAT Gateway 55 | azure.azcollection.azure_rm_publicipaddress_info: 56 | subscription_id: "{{ azure_subscription_id }}" 57 | tenant: "{{ azure_tenant_id }}" 58 | client_id: "{{ azure_client_id }}" 59 | secret: "{{ azure_client_secret }}" 60 | resource_group: "{{ azure_resource_group }}" 61 | name: "{{ coco_public_ip_name }}" 62 | register: coco_gw_public_ip 63 | 64 | - name: Create NAT Gateway 65 | azure.azcollection.azure_rm_natgateway: 66 | subscription_id: "{{ azure_subscription_id }}" 67 | tenant: "{{ azure_tenant_id }}" 68 | client_id: "{{ azure_client_id }}" 69 | secret: "{{ azure_client_secret }}" 70 | resource_group: "{{ azure_resource_group }}" 71 | name: "{{ coco_nat_gateway_name }}" 72 | idle_timeout_in_minutes: 10 73 | sku: 74 | name: standard 75 | public_ip_addresses: 76 | - "{{ coco_gw_public_ip.publicipaddresses[0].id }}" 77 | register: coco_natgw 78 | 79 | - name: Update the worker subnet to associate NAT gateway 80 | azure.azcollection.azure_rm_subnet: 81 | subscription_id: "{{ azure_subscription_id }}" 82 | tenant: "{{ azure_tenant_id }}" 83 | client_id: "{{ azure_client_id }}" 84 | secret: "{{ azure_client_secret }}" 85 | resource_group: "{{ azure_resource_group }}" 86 | name: "{{ azure_subnet }}" 87 | virtual_network_name: "{{ azure_vnet }}" 88 | nat_gateway: "{{ coco_nat_gateway_name }}" 89 | -------------------------------------------------------------------------------- /charts/coco-supported/sandbox/templates/peer-pods-cm.yaml: -------------------------------------------------------------------------------- 1 | {{- if and .Values.sandbox.azure .Values.sandbox.deploy }} 2 | --- 3 | apiVersion: policy.open-cluster-management.io/v1 4 | kind: Policy 5 | metadata: 6 | name: peerpods-cm-policy 7 | spec: 8 | remediationAction: enforce 9 | disabled: false 10 | policy-templates: 11 | - objectDefinition: 12 | apiVersion: policy.open-cluster-management.io/v1 13 | kind: ConfigurationPolicy 14 | metadata: 15 | name: peerpods-cm-cp 16 | spec: 17 | remediationAction: enforce 18 | severity: medium 19 | object-templates: 20 | 21 | - complianceType: mustonlyhave 22 | objectDefinition: 23 | apiVersion: v1 24 | kind: ConfigMap 25 | metadata: 26 | name: peer-pods-cm 27 | namespace: openshift-sandboxed-containers-operator 28 | data: 29 | CLOUD_PROVIDER: "azure" 30 | VXLAN_PORT: "9000" 31 | AZURE_IMAGE_ID: '{{ `{{if (lookup "v1" "ConfigMap" "openshift-sandboxed-containers-operator" "peer-pods-cm").metadata.name }}{{ fromConfigMap "openshift-sandboxed-containers-operator" "peer-pods-cm" "AZURE_IMAGE_ID" }}{{ else }}{{ end }}` }}' 32 | AZURE_INSTANCE_SIZE: "{{ .Values.global.coco.azure.defaultVMFlavour }}" 33 | AZURE_INSTANCE_SIZES: "Standard_DC2as_v5,Standard_DC4as_v5,Standard_DC8as_v5,Standard_DC16as_v5" 34 | AZURE_RESOURCE_GROUP: '{{ `{{ (fromJson (fromConfigMap "openshift-cloud-controller-manager" "cloud-conf" "cloud.conf" | toLiteral)).vnetResourceGroup }}` }}' 35 | AZURE_REGION: '{{ `{{ (fromJson (fromConfigMap "openshift-cloud-controller-manager" "cloud-conf" "cloud.conf" | toLiteral)).location }}` }}' 36 | AZURE_SUBNET_ID: '/subscriptions/{{ `{{ (fromJson (fromConfigMap "openshift-cloud-controller-manager" "cloud-conf" "cloud.conf" | toLiteral)).subscriptionId }}` }}/resourceGroups/{{ `{{ (fromJson (fromConfigMap "openshift-cloud-controller-manager" "cloud-conf" "cloud.conf" | toLiteral)).vnetResourceGroup }}` }}/providers/Microsoft.Network/virtualNetworks/{{ `{{ (fromJson (fromConfigMap "openshift-cloud-controller-manager" "cloud-conf" "cloud.conf" | toLiteral)).vnetName }}` }}/subnets/{{ `{{ (fromJson (fromConfigMap "openshift-cloud-controller-manager" "cloud-conf" "cloud.conf" | toLiteral)).subnetName }}` }}' 37 | AZURE_NSG_ID: '/subscriptions/{{ `{{ (fromJson (fromConfigMap "openshift-cloud-controller-manager" "cloud-conf" "cloud.conf" | toLiteral)).subscriptionId }}` }}/resourceGroups/{{ `{{ (fromJson (fromConfigMap "openshift-cloud-controller-manager" "cloud-conf" "cloud.conf" | toLiteral)).resourceGroup }}` }}/providers/Microsoft.Network/networkSecurityGroups/{{ `{{ (fromJson (fromConfigMap "openshift-cloud-controller-manager" "cloud-conf" "cloud.conf" | toLiteral)).securityGroupName }}` }}' 38 | DISABLECVM: "false" 39 | PROXY_TIMEOUT: "5m" 40 | INITDATA: '{{ `{{if (lookup "v1" "ConfigMap" "imperative" "initdata").metadata.name }}{{ fromConfigMap "imperative" "initdata" "INITDATA" }}{{ else }}{{ end }}` }}' 41 | 42 | --- 43 | apiVersion: policy.open-cluster-management.io/v1 44 | kind: PlacementBinding 45 | metadata: 46 | name: peerpods-placement-binding 47 | annotations: 48 | argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true 49 | placementRef: 50 | name: peerpods-placement-rule 51 | kind: PlacementRule 52 | apiGroup: apps.open-cluster-management.io 53 | subjects: 54 | - name: peerpods-cm-policy 55 | kind: Policy 56 | apiGroup: policy.open-cluster-management.io 57 | --- 58 | apiVersion: apps.open-cluster-management.io/v1 59 | kind: PlacementRule 60 | metadata: 61 | name: peerpods-placement-rule 62 | spec: 63 | clusterConditions: 64 | - status: 'True' 65 | type: ManagedClusterConditionAvailable 66 | clusterSelector: 67 | matchLabels: 68 | cloud: Azure 69 | --- 70 | {{- end }} 71 | -------------------------------------------------------------------------------- /common/scripts/pattern-util.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | function is_available { 4 | command -v $1 >/dev/null 2>&1 || { echo >&2 "$1 is required but it's not installed. Aborting."; exit 1; } 5 | } 6 | 7 | function version { 8 | echo "$@" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }' 9 | } 10 | 11 | if [ -z "$PATTERN_UTILITY_CONTAINER" ]; then 12 | PATTERN_UTILITY_CONTAINER="quay.io/hybridcloudpatterns/utility-container" 13 | fi 14 | # If PATTERN_DISCONNECTED_HOME is set it will be used to populate both PATTERN_UTILITY_CONTAINER 15 | # and PATTERN_INSTALL_CHART automatically 16 | if [ -n "${PATTERN_DISCONNECTED_HOME}" ]; then 17 | PATTERN_UTILITY_CONTAINER="${PATTERN_DISCONNECTED_HOME}/utility-container" 18 | PATTERN_INSTALL_CHART="oci://${PATTERN_DISCONNECTED_HOME}/pattern-install" 19 | echo "PATTERN_DISCONNECTED_HOME is set to ${PATTERN_DISCONNECTED_HOME}" 20 | echo "Setting the following variables:" 21 | echo " PATTERN_UTILITY_CONTAINER: ${PATTERN_UTILITY_CONTAINER}" 22 | echo " PATTERN_INSTALL_CHART: ${PATTERN_INSTALL_CHART}" 23 | fi 24 | 25 | readonly commands=(podman) 26 | for cmd in ${commands[@]}; do is_available "$cmd"; done 27 | 28 | UNSUPPORTED_PODMAN_VERSIONS="1.6 1.5" 29 | PODMAN_VERSION_STR=$(podman --version) 30 | for i in ${UNSUPPORTED_PODMAN_VERSIONS}; do 31 | # We add a space 32 | if echo "${PODMAN_VERSION_STR}" | grep -q -E "\b${i}"; then 33 | echo "Unsupported podman version. We recommend > 4.3.0" 34 | podman --version 35 | exit 1 36 | fi 37 | done 38 | 39 | # podman --version outputs: 40 | # podman version 4.8.2 41 | PODMAN_VERSION=$(echo "${PODMAN_VERSION_STR}" | awk '{ print $NF }') 42 | 43 | # podman < 4.3.0 do not support keep-id:uid=... 44 | if [ $(version "${PODMAN_VERSION}") -lt $(version "4.3.0") ]; then 45 | PODMAN_ARGS="-v ${HOME}:/root" 46 | else 47 | # We do not rely on bash's $UID and $GID because on MacOSX $GID is not set 48 | MYNAME=$(id -n -u) 49 | MYUID=$(id -u) 50 | MYGID=$(id -g) 51 | PODMAN_ARGS="--passwd-entry ${MYNAME}:x:${MYUID}:${MYGID}::/pattern-home:/bin/bash --user ${MYUID}:${MYGID} --userns keep-id:uid=${MYUID},gid=${MYGID}" 52 | 53 | fi 54 | 55 | if [ -n "$KUBECONFIG" ]; then 56 | if [[ ! "${KUBECONFIG}" =~ ^$HOME* ]]; then 57 | echo "${KUBECONFIG} is pointing outside of the HOME folder, this will make it unavailable from the container." 58 | echo "Please move it somewhere inside your $HOME folder, as that is what gets bind-mounted inside the container" 59 | exit 1 60 | fi 61 | fi 62 | 63 | # Detect if we use podman machine. If we do not then we bind mount local host ssl folders 64 | # if we are using podman machine then we do not bind mount anything (for now!) 65 | REMOTE_PODMAN=$(podman system connection list -q | wc -l) 66 | if [ $REMOTE_PODMAN -eq 0 ]; then # If we are not using podman machine we check the hosts folders 67 | # We check /etc/pki/tls because on ubuntu /etc/pki/fwupd sometimes 68 | # exists but not /etc/pki/tls and we do not want to bind mount in such a case 69 | # as it would find no certificates at all. 70 | if [ -d /etc/pki/tls ]; then 71 | PKI_HOST_MOUNT_ARGS="-v /etc/pki:/etc/pki:ro" 72 | elif [ -d /etc/ssl ]; then 73 | PKI_HOST_MOUNT_ARGS="-v /etc/ssl:/etc/ssl:ro" 74 | else 75 | PKI_HOST_MOUNT_ARGS="-v /usr/share/ca-certificates:/usr/share/ca-certificates:ro" 76 | fi 77 | else 78 | PKI_HOST_MOUNT_ARGS="" 79 | fi 80 | 81 | # Copy Kubeconfig from current environment. The utilities will pick up ~/.kube/config if set so it's not mandatory 82 | # $HOME is mounted as itself for any files that are referenced with absolute paths 83 | # $HOME is mounted to /root because the UID in the container is 0 and that's where SSH looks for credentials 84 | 85 | podman run -it --rm --pull=newer \ 86 | --security-opt label=disable \ 87 | -e EXTRA_HELM_OPTS \ 88 | -e EXTRA_PLAYBOOK_OPTS \ 89 | -e TARGET_ORIGIN \ 90 | -e TARGET_SITE \ 91 | -e TARGET_BRANCH \ 92 | -e NAME \ 93 | -e TOKEN_SECRET \ 94 | -e TOKEN_NAMESPACE \ 95 | -e VALUES_SECRET \ 96 | -e KUBECONFIG \ 97 | -e PATTERN_INSTALL_CHART \ 98 | -e PATTERN_DISCONNECTED_HOME \ 99 | -e DISABLE_VALIDATE_ORIGIN \ 100 | -e K8S_AUTH_HOST \ 101 | -e K8S_AUTH_VERIFY_SSL \ 102 | -e K8S_AUTH_SSL_CA_CERT \ 103 | -e K8S_AUTH_USERNAME \ 104 | -e K8S_AUTH_PASSWORD \ 105 | -e K8S_AUTH_TOKEN \ 106 | ${PKI_HOST_MOUNT_ARGS} \ 107 | -v "${HOME}":"${HOME}" \ 108 | -v "${HOME}":/pattern-home \ 109 | ${PODMAN_ARGS} \ 110 | ${EXTRA_ARGS} \ 111 | -w "$(pwd)" \ 112 | "$PATTERN_UTILITY_CONTAINER" \ 113 | $@ 114 | -------------------------------------------------------------------------------- /values-simple.yaml: -------------------------------------------------------------------------------- 1 | # This is currently configured as an 'all in one' deployment in one cluster. 2 | 3 | clusterGroup: 4 | name: simple 5 | isHubCluster: true 6 | namespaces: 7 | - open-cluster-management 8 | - vault 9 | - golang-external-secrets 10 | - openshift-sandboxed-containers-operator 11 | - trustee-operator-system 12 | - hello-openshift 13 | - cert-manager-operator 14 | - cert-manager 15 | - letsencrypt 16 | - kbs-access 17 | - encrypted-storage 18 | subscriptions: 19 | # ACM is kept anticipating 20 | acm: 21 | name: advanced-cluster-management 22 | namespace: open-cluster-management 23 | channel: release-2.13 24 | sandbox: 25 | name: sandboxed-containers-operator 26 | namespace: openshift-sandboxed-containers-operator 27 | source: redhat-operators 28 | channel: stable 29 | installPlanApproval: Manual 30 | csv: sandboxed-containers-operator.v1.10.1 31 | trustee: 32 | name: trustee-operator 33 | namespace: trustee-operator-system 34 | source: redhat-operators 35 | channel: stable 36 | installPlanApproval: Manual 37 | csv: trustee-operator.v0.4.1 38 | cert-manager: 39 | name: openshift-cert-manager-operator 40 | namespace: cert-manager-operator 41 | channel: stable-v1 42 | 43 | projects: 44 | - hub 45 | - vault 46 | - trustee 47 | - golang-external-secrets 48 | - sandbox 49 | - workloads 50 | - default 51 | # Explicitly mention the cluster-state based overrides we plan to use for this pattern. 52 | # We can use self-referential variables because the chart calls the tpl function with these variables defined 53 | sharedValueFiles: 54 | - '/overrides/values-{{ $.Values.global.clusterPlatform }}.yaml' 55 | applications: 56 | acm: 57 | name: acm 58 | namespace: open-cluster-management 59 | project: hub 60 | chart: acm 61 | chartVersion: 0.1.* 62 | 63 | vault: 64 | name: vault 65 | namespace: vault 66 | project: vault 67 | chart: hashicorp-vault 68 | chartVersion: 0.1.* 69 | 70 | secrets-operator: 71 | name: golang-external-secrets 72 | namespace: golang-external-secrets 73 | project: golang-external-secrets 74 | chart: golang-external-secrets 75 | chartVersion: 0.1.* 76 | 77 | trustee: 78 | name: trustee 79 | namespace: trustee-operator-system #upstream config 80 | project: trustee 81 | path: charts/hub/trustee 82 | 83 | sandbox: 84 | name: sandbox 85 | namespace: openshift-sandboxed-containers-operator #upstream config 86 | project: sandbox 87 | path: charts/coco-supported/sandbox 88 | 89 | # Letsencrypt is not required anymore for trustee. 90 | # It's only here if you need it for your needs. 91 | letsencrypt: 92 | name: letsencrypt 93 | namespace: letsencrypt 94 | project: hub 95 | path: charts/all/letsencrypt 96 | # Default to 'safe' for ARO 97 | overrides: 98 | - name: letsencrypt.enabled 99 | value: false 100 | hello-openshift: 101 | name: hello-openshift 102 | namespace: hello-openshift 103 | project: workloads 104 | path: charts/coco-supported/hello-openshift 105 | 106 | kbs-access: 107 | name: kbs-access 108 | namespace: kbs-access 109 | project: workloads 110 | path: charts/coco-supported/kbs-access 111 | 112 | imperative: 113 | # NOTE: We *must* use lists and not hashes. As hashes lose ordering once parsed by helm 114 | # The default schedule is every 10 minutes: imperative.schedule 115 | # Total timeout of all jobs is 1h: imperative.activeDeadlineSeconds 116 | # imagePullPolicy is set to always: imperative.imagePullPolicy 117 | # For additional overrides that apply to the jobs, please refer to 118 | # https://hybrid-cloud-patterns.io/imperative-actions/#additional-job-customizations 119 | jobs: 120 | - name: install-deps 121 | playbook: ansible/install-deps.yaml 122 | verbosity: -vvv 123 | timeout: 3600 124 | - name: configure-azure-dns 125 | playbook: ansible/configure-issuer.yaml 126 | # this image has not been changes. TBD would make sense 127 | #image: quay.io/hybridcloudpatterns/ansible-edge-gitops-ee:latest 128 | verbosity: -vvv 129 | timeout: 3600 130 | - name: configure-azure-nat-gateway 131 | playbook: ansible/azure-nat-gateway.yaml 132 | verbosity: -vvv 133 | timeout: 3600 134 | - name: gen-certificate 135 | playbook: ansible/gen-certificate.yaml 136 | verbosity: -vvv 137 | timeout: 3600 138 | - name: init-data-gzipper 139 | playbook: ansible/init-data-gzipper.yaml 140 | verbosity: -vvv 141 | timeout: 3600 142 | managedClusterGroups: 143 | exampleRegion: 144 | name: group-one 145 | acmlabels: 146 | - name: clusterGroup 147 | value: group-one 148 | helmOverrides: 149 | - name: clusterGroup.isHubCluster 150 | value: false 151 | -------------------------------------------------------------------------------- /ansible/gen-certificate.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Generate self-signed TLS cert for KBS and push to Kubernetes Secret 3 | hosts: localhost 4 | connection: local 5 | become: false 6 | gather_facts: false 7 | vars: 8 | kubeconfig: "{{ lookup('env', 'KUBECONFIG') }}" 9 | hub_domain: "{{ global.hubClusterDomain | default('none') | lower}}" 10 | secret_name: kbs-tls-self-signed 11 | common_name: "kbs-trustee-operator-system.{{ hub_domain }}" 12 | days_valid: 365 13 | renewal_threshold_days: 10 14 | need_new_cert: false 15 | pre_tasks: 16 | 17 | - name: Check if TLS secret exists 18 | kubernetes.core.k8s_info: 19 | kubeconfig: "{{ kubeconfig }}" 20 | api_version: v1 21 | kind: Secret 22 | name: "{{ secret_name }}" 23 | namespace: "imperative" 24 | register: existing_secret 25 | ignore_errors: true 26 | 27 | - name: Set fact that certificate doesn't exist 28 | ansible.builtin.set_fact: 29 | need_new_cert: true 30 | when: existing_secret.resources | length == 0 31 | 32 | - name: Extract existing certificate if secret exists 33 | ansible.builtin.set_fact: 34 | existing_cert_data: "{{ existing_secret.resources[0].data['tls.crt'] | b64decode }}" 35 | when: existing_secret.resources | length > 0 36 | 37 | - name: Create temporary file for existing certificate analysis 38 | ansible.builtin.tempfile: 39 | state: file 40 | suffix: .crt 41 | register: temp_cert_file 42 | when: existing_secret.resources | length > 0 43 | 44 | - name: Write existing certificate to temp file 45 | ansible.builtin.copy: 46 | content: "{{ existing_cert_data }}" 47 | dest: "{{ temp_cert_file.path }}" 48 | mode: "0600" 49 | when: existing_secret.resources | length > 0 50 | 51 | - name: Get certificate expiry date 52 | community.crypto.x509_certificate_info: 53 | path: "{{ temp_cert_file.path }}" 54 | register: cert_info 55 | when: existing_secret.resources | length > 0 56 | 57 | - name: Calculate days until expiry 58 | ansible.builtin.set_fact: 59 | days_until_expiry: "{{ ((cert_info.not_after | to_datetime('%Y%m%d%H%M%SZ')) - now()).days }}" 60 | when: existing_secret.resources | length > 0 61 | 62 | - name: Set fact to generate new certificate if expiring soon 63 | ansible.builtin.set_fact: 64 | need_new_cert: true 65 | when: 66 | - existing_secret.resources | length > 0 67 | - days_until_expiry | int <= renewal_threshold_days 68 | 69 | - name: Clean up temporary certificate file 70 | ansible.builtin.file: 71 | path: "{{ temp_cert_file.path }}" 72 | state: absent 73 | when: existing_secret.resources | length > 0 74 | 75 | - name: Display certificate status 76 | ansible.builtin.debug: 77 | msg: > 78 | Certificate status: 79 | {% if existing_secret.resources | length == 0 %} 80 | No existing certificate found. Will generate new certificate. 81 | {% elif need_new_cert %} 82 | Certificate expires in {{ days_until_expiry }} days (threshold: {{ renewal_threshold_days }} days). Will generate new certificate. 83 | {% else %} 84 | Certificate is valid for {{ days_until_expiry }} more days. Skipping certificate generation. 85 | {% endif %} 86 | 87 | - name: Create temporary directory for cert generation 88 | ansible.builtin.tempfile: 89 | state: directory 90 | prefix: kbs-cert- 91 | register: tmpdir 92 | when: need_new_cert 93 | 94 | tasks: 95 | - name: Generate private key 96 | community.crypto.openssl_privatekey: 97 | path: "{{ tmpdir.path }}/tls.key" 98 | size: 4096 99 | when: need_new_cert 100 | 101 | - name: Generate CSR 102 | community.crypto.openssl_csr: 103 | path: "{{ tmpdir.path }}/tls.csr" 104 | privatekey_path: "{{ tmpdir.path }}/tls.key" 105 | common_name: "kbs-trustee-operator-system" 106 | subject_alt_name: 107 | - "DNS:{{ common_name }}" 108 | when: need_new_cert 109 | 110 | - name: Generate self-signed certificate 111 | community.crypto.x509_certificate: 112 | path: "{{ tmpdir.path }}/tls.crt" 113 | privatekey_path: "{{ tmpdir.path }}/tls.key" 114 | csr_path: "{{ tmpdir.path }}/tls.csr" 115 | provider: selfsigned 116 | selfsigned_not_after: "+{{ days_valid }}d" 117 | when: need_new_cert 118 | 119 | - name: Create or update TLS secret for KBS 120 | kubernetes.core.k8s: 121 | kubeconfig: "{{ kubeconfig }}" 122 | state: present 123 | definition: 124 | apiVersion: v1 125 | kind: Secret 126 | metadata: 127 | name: "{{ secret_name }}" 128 | namespace: "imperative" 129 | type: kubernetes.io/tls 130 | stringData: 131 | tls.crt: "{{ lookup('file', tmpdir.path + '/tls.crt') }}" 132 | tls.key: "{{ lookup('file', tmpdir.path + '/tls.key') }}" 133 | when: need_new_cert 134 | 135 | - name: Cleanup temporary directory 136 | ansible.builtin.file: 137 | path: "{{ tmpdir.path }}" 138 | state: absent 139 | when: need_new_cert and tmpdir is defined 140 | -------------------------------------------------------------------------------- /common/scripts/preview.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # DISCLAIMER 4 | # 5 | # - Parsing of applications needs to be more clever. 6 | # - There is currently not a mechanism to actually preview against multiple clusters 7 | # (i.e. a hub and a remote). All previews will be done against the current. 8 | # - Make output can be included in the YAML. 9 | 10 | SITE=$1; shift 11 | APPNAME=$1; shift 12 | GIT_REPO=$1; shift 13 | GIT_BRANCH=$1; shift 14 | 15 | if [ "${APPNAME}" != "clustergroup" ]; then 16 | # This covers the following case: 17 | # foobar: 18 | # name: foo 19 | # namespace: foo 20 | # project: foo 21 | # path: charts/all/foo 22 | # So we retrieve the actual index ("foobar") given the name attribute of the application 23 | APP=$(yq ".clusterGroup.applications | with_entries(select(.value.name == \"$APPNAME\")) | keys | .[0]" values-$SITE.yaml) 24 | isLocalHelmChart=$(yq ".clusterGroup.applications.$APP.path" values-$SITE.yaml) 25 | if [ $isLocalHelmChart != "null" ]; then 26 | chart=$(yq ".clusterGroup.applications.$APP.path" values-$SITE.yaml) 27 | else 28 | helmrepo=$(yq ".clusterGroup.applications.$APP.repoURL" values-$SITE.yaml) 29 | helmrepo="${helmrepo:+oci://quay.io/hybridcloudpatterns}" 30 | chartversion=$(yq ".clusterGroup.applications.$APP.chartVersion" values-$SITE.yaml) 31 | chartname=$(yq ".clusterGroup.applications.$APP.chart" values-$SITE.yaml) 32 | chart="${helmrepo}/${chartname} --version ${chartversion}" 33 | fi 34 | namespace=$(yq ".clusterGroup.applications.$APP.namespace" values-$SITE.yaml) 35 | else 36 | APP=$APPNAME 37 | clusterGroupChartVersion=$(yq ".main.multiSourceConfig.clusterGroupChartVersion" values-global.yaml) 38 | helmrepo="oci://quay.io/hybridcloudpatterns" 39 | chart="${helmrepo}/clustergroup --version ${clusterGroupChartVersion}" 40 | namespace="openshift-operators" 41 | fi 42 | pattern=$(yq ".global.pattern" values-global.yaml) 43 | 44 | # You can override the default lookups by using OCP_{PLATFORM,VERSION,DOMAIN} 45 | # Note that when using the utility container you need to pass in the above variables 46 | # by export EXTRA_ARGS="-e OCP_PLATFORM -e OCP_VERSION -e OCP_DOMAIN" before 47 | # invoking pattern-util.sh 48 | platform=${OCP_PLATFORM:-$(oc get Infrastructure.config.openshift.io/cluster -o jsonpath='{.spec.platformSpec.type}')} 49 | ocpversion=${OCP_VERSION:-$(oc get clusterversion/version -o jsonpath='{.status.desired.version}' | awk -F. '{print $1"."$2}')} 50 | domain=${OCP_DOMAIN:-$(oc get Ingress.config.openshift.io/cluster -o jsonpath='{.spec.domain}' | sed 's/^apps.//')} 51 | 52 | function replaceGlobals() { 53 | output=$( echo $1 | sed -e 's/ //g' -e 's/\$//g' -e s@^-@@g -e s@\'@@g ) 54 | 55 | output=$(echo $output | sed "s@{{.Values.global.clusterPlatform}}@${platform}@g") 56 | output=$(echo $output | sed "s@{{.Values.global.clusterVersion}}@${ocpversion}@g") 57 | output=$(echo $output | sed "s@{{.Values.global.clusterDomain}}@${domain}@g") 58 | 59 | echo $output 60 | } 61 | 62 | function getOverrides() { 63 | overrides='' 64 | overrides=$( yq ".clusterGroup.applications.$APP.overrides[]" "values-$SITE.yaml" ) 65 | overrides=$( echo "$overrides" | tr -d '\n' ) 66 | overrides=$( echo "$overrides" | sed -e 's/name:/ --set/g; s/value: /=/g' ) 67 | if [ -n "$overrides" ]; then 68 | echo "$overrides" 69 | fi 70 | } 71 | 72 | 73 | CLUSTER_OPTS="" 74 | CLUSTER_OPTS="$CLUSTER_OPTS --set global.pattern=$pattern" 75 | CLUSTER_OPTS="$CLUSTER_OPTS --set global.repoURL=$GIT_REPO" 76 | CLUSTER_OPTS="$CLUSTER_OPTS --set main.git.repoURL=$GIT_REPO" 77 | CLUSTER_OPTS="$CLUSTER_OPTS --set main.git.revision=$GIT_BRANCH" 78 | CLUSTER_OPTS="$CLUSTER_OPTS --set global.namespace=$namespace" 79 | CLUSTER_OPTS="$CLUSTER_OPTS --set global.hubClusterDomain=apps.$domain" 80 | CLUSTER_OPTS="$CLUSTER_OPTS --set global.localClusterDomain=apps.$domain" 81 | CLUSTER_OPTS="$CLUSTER_OPTS --set global.clusterDomain=$domain" 82 | CLUSTER_OPTS="$CLUSTER_OPTS --set global.clusterVersion=$ocpversion" 83 | CLUSTER_OPTS="$CLUSTER_OPTS --set global.clusterPlatform=$platform" 84 | 85 | 86 | sharedValueFiles=$(yq ".clusterGroup.sharedValueFiles" values-$SITE.yaml) 87 | appValueFiles=$(yq ".clusterGroup.applications.$APP.extraValueFiles" values-$SITE.yaml) 88 | isKustomize=$(yq ".clusterGroup.applications.$APP.kustomize" values-$SITE.yaml) 89 | OVERRIDES=$( getOverrides ) 90 | 91 | VALUE_FILES="-f values-global.yaml -f values-$SITE.yaml" 92 | IFS=$'\n' 93 | for line in $sharedValueFiles; do 94 | if [ $line != "null" ] && [ -f $line ]; then 95 | file=$(replaceGlobals $line) 96 | VALUE_FILES="$VALUE_FILES -f $PWD$file" 97 | fi 98 | done 99 | 100 | for line in $appValueFiles; do 101 | if [ $line != "null" ] && [ -f $line ]; then 102 | file=$(replaceGlobals $line) 103 | VALUE_FILES="$VALUE_FILES -f $PWD$file" 104 | fi 105 | done 106 | 107 | if [ $isKustomize == "true" ]; then 108 | kustomizePath=$(yq ".clusterGroup.applications.$APP.path" values-$SITE.yaml) 109 | repoURL=$(yq ".clusterGroup.applications.$APP.repoURL" values-$SITE.yaml) 110 | if [[ $repoURL == http* ]] || [[ $repoURL == git@ ]]; then 111 | kustomizePath="${repoURL}/${kustomizePath}" 112 | fi 113 | cmd="oc kustomize ${kustomizePath}" 114 | eval "$cmd" 115 | else 116 | cmd="helm template $chart --name-template ${APP} -n ${namespace} ${VALUE_FILES} ${OVERRIDES} ${CLUSTER_OPTS}" 117 | eval "$cmd" 118 | fi 119 | -------------------------------------------------------------------------------- /common/Changes.md: -------------------------------------------------------------------------------- 1 | # Changes 2 | 3 | ## Sep 24, 2024 4 | 5 | * Ansible has been moved out of the common code tree, you must use a clustergroup chart that is >= 0.9.1 6 | 7 | ## Sep 6, 2024 8 | 9 | * Most charts have been removed from the tree. To get the charts you now have to point to them 10 | 11 | ## Sep 25, 2023 12 | 13 | * Upgraded ESO to v0.9.5 14 | 15 | ## Aug 17, 2023 16 | 17 | * Introduced support for multisource applications via .chart + .chartVersion 18 | 19 | ## Jul 8, 2023 20 | 21 | * Introduced a default of 20 for sync failures retries in argo applications (global override via global.options.applicationRetryLimit 22 | and per-app override via .syncPolicy) 23 | 24 | ## May 22, 2023 25 | 26 | * Upgraded ESO to 0.8.2 27 | * *Important* we now use the newly blessed sso config for argo. This means that gitops < 1.8 are *unsupported* 28 | 29 | ## May 18, 2023 30 | 31 | * Introduce a EXTRA_HELM_OPTS env variable that will be passed to the helm invocations 32 | 33 | ## April 21, 2023 34 | 35 | * Added labels and annotation support to namespaces.yaml template 36 | 37 | ## Apr 11, 2023 38 | 39 | * Apply the ACM ocp-gitops-policy everywhere but the hub 40 | 41 | ## Apr 7, 2023 42 | 43 | * Moved to gitops-1.8 channel by default (stable is unmaintained and will be dropped starting with ocp-4.13) 44 | 45 | ## March 20, 2023 46 | 47 | * Upgraded ESO to 0.8.1 48 | 49 | ## February 9, 2023 50 | 51 | * Add support for /values-.yaml and for /values--.yaml 52 | 53 | ## January 29, 2023 54 | 55 | * Stop extracting the HUB's CA via an imperative job running on the imported cluster. 56 | Just use ACM to push the HUB's CA out to the managed clusters. 57 | 58 | ## January 23, 2023 59 | 60 | * Add initial support for running ESO on ACM-imported clusters 61 | 62 | ## January 18, 2023 63 | 64 | * Add validate-schema target 65 | 66 | ## January 13, 2023 67 | 68 | * Simplify the secrets paths when using argo hosted sites 69 | 70 | ## January 10, 2023 71 | 72 | * vaultPrefixes is now optional in the v2 secret spec and defaults to ["hub"] 73 | 74 | ## December 9, 2022 75 | 76 | * Dropped insecureUnsealVaultInsideCluster (and file_unseal) entirely. Now 77 | vault is always unsealed via a cronjob in the cluster. It is recommended to 78 | store the imperative/vaultkeys secret offline securely and then delete it. 79 | 80 | ## December 8, 2022 81 | 82 | * Removed the legacy installation targets: 83 | `deploy upgrade legacy-deploy legacy-upgrade` 84 | Patterns must now use the operator-based installation 85 | 86 | ## November 29, 2022 87 | 88 | * Upgraded vault-helm to 0.23.0 89 | * Enable vault-ssl by default 90 | 91 | ## November 22, 2022 92 | 93 | * Implemented a new format for the values-secret.yaml. Example can be found in examples/ folder 94 | * Now the order of values-secret file lookup is the following: 95 | 1. ~/values-secret-.yaml 96 | 2. ~/values-secret.yaml 97 | 3. /values-secret.yaml.template 98 | * Add support for ansible vault encrypted values-secret files. You can now encrypt your values-secret file 99 | at rest with `ansible-vault encrypt ~/values-secret.yaml`. When running `make load-secrets` if an encrypted 100 | file is encountered the user will be prompted automatically for the password to decrypt it. 101 | 102 | ## November 6, 2022 103 | 104 | * Add support for /values--.yaml (e.g. /values-AWS-group-one.yaml) 105 | 106 | ## October 28, 2022 107 | 108 | * Updated vault helm chart to v0.22.1 and vault containers to 1.12.0 109 | 110 | ## October 25, 2022 111 | 112 | * Updated External Secrets Operator to v0.6.0 113 | * Moved to -UBI based ESO containers 114 | 115 | ## October 13, 2022 116 | 117 | * Added global.clusterVersion as a new helm variable which represents the OCP 118 | Major.Minor cluster version. By default now a user can add a 119 | values--.yaml file to have specific cluster version 120 | overrides (e.g. values-4.10-hub.yaml). Will need Validated Patterns Operator >= 0.0.6 121 | when deploying with the operator. Note: When using the ArgoCD Hub and spoke model, 122 | you cannot have spokes with a different version of OCP than the hub. 123 | 124 | ## October 4, 2022 125 | 126 | * Extended the values-secret.yaml file to support multiple vault paths and re-wrote 127 | the push_secrets feature as python module plugin. This requires the following line 128 | in a pattern's ansible.cfg's '[defaults]' stanza: 129 | 130 | `library=~/.ansible/plugins/modules:./ansible/plugins/modules:./common/ansible/plugins/modules:/usr/share/ansible/plugins/modules` 131 | 132 | ## October 3, 2022 133 | 134 | * Restore the ability to install a non-default site: `make TARGET_SITE=mysite install` 135 | * Revised tests (new output and filenames, requires adding new result files to Git) 136 | * ACM 2.6 required for ACM-based managed sites 137 | * Introduced global.clusterDomain template variable (without the `apps.` prefix) 138 | * Removed the ability to send specific charts to another cluster, use hosted argo sites instead 139 | * Added the ability to have the hub host `values-{site}.yaml` for spoke clusters. 140 | 141 | The following example would deploy the namespaces, subscriptions, and 142 | applications defined in `values-group-one.yaml` to the `perth` cluster 143 | directly from ArgoCD on the hub. 144 | 145 | ```yaml 146 | managedClusterGroups: 147 | - name: group-one 148 | hostedArgoSites: 149 | - name: perth 150 | domain: perth1.beekhof.net 151 | bearerKeyPath: secret/data/hub/cluster_perth 152 | caKeyPath: secret/data/hub/cluster_perth_ca 153 | ``` 154 | -------------------------------------------------------------------------------- /charts/all/letsencrypt/README.md: -------------------------------------------------------------------------------- 1 | # letsencrypt 2 | 3 | ## Forked from [Validated patterns lets encrypt chart.](https://github.com/validatedpatterns/letsencrypt-chart) 4 | 5 | ## Design for Azure 6 | 7 | Cert-manager needs the azure resource group for a zone in order to manage the DNS. 8 | Unfortunately this is a little tricky to get. 9 | 10 | To get this running on azure three compromises have been made: 11 | 12 | 1. The required information (managed_zone_name and managed_zone resource group) can be obtained via the ansible imperative framework. 13 | 14 | 2. The imperative framework is limited terms of feedback / logging. Please test carefully. 15 | 16 | 3. If the credentials can see more than one managed zone there may be issues. It presumes one. 17 | 18 | ![Version: 0.1.1](https://img.shields.io/badge/Version-0.1.1-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 1.16.0](https://img.shields.io/badge/AppVersion-1.16.0-informational?style=flat-square) 19 | 20 | A Helm chart to add letsencrypt support to Validated Patterns. 21 | 22 | **Homepage:** 23 | 24 | ## Maintainers 25 | 26 | | Name | Email | URL | 27 | | ---- | ------ | --- | 28 | | Validated Patterns Team | | | 29 | 30 | ## Usage 31 | 32 | In order to enable this chart in your patterns, please add and edit the 33 | following lines to `values-AWS.yaml`: 34 | 35 | ```yaml 36 | letsencrypt: 37 | region: eu-central-1 # region of the cluster 38 | server: https://acme-v02.api.letsencrypt.org/directory 39 | # staging URL 40 | # server: https://acme-staging-v02.api.letsencrypt.org/directory 41 | email: foo@bar.it 42 | 43 | clusterGroup: 44 | applications: 45 | letsencrypt: 46 | name: letsencrypt 47 | namespace: letsencrypt 48 | project: default 49 | path: common/letsencrypt 50 | ``` 51 | 52 | ## Values 53 | 54 | | Key | Type | Default | Description | 55 | |-----|------|---------|-------------| 56 | | global | object | depends on the individual settings | Dictionary of the global settings to configure this chart | 57 | | letsencrypt | object | depends on the individual settings | This section contains all the parameters for the letsencrypt chart in order to request CA signed certificates in a Validated Pattern By default if you include this chart you enable the letsencrypt charts on both the *.apps. ingress and on the API endpoint | 58 | | letsencrypt.certmanagerChannel | string | `"stable-v1"` | String the channel to install cert-manager from (Defaults to "stable-v1") | 59 | | letsencrypt.duration | string | `"168h0m0s"` | Duration of the requested letsencrypt certificates (Defaults to 168h0m0s) | 60 | | letsencrypt.email | string | `"test@example.com"` | String containing the email used when requesting certificates to letsencrypt (required) These two lines need tweaking for every deployment. @example.com emails will be rejected by letsencrypt | 61 | | letsencrypt.nameservers | list | `["8.8.8.8:53","1.1.1.1:53"]` | List of DNS server (ip:port strings) to be used when doing DNS01 challenges (Defaults to [8.8.8.8:53, 1.1.1.1:53]) These two are needed because the DNS01 ACME solver needs outside DNS servers and won't really work with openshift's internal split-view DNS servers [see](https://cert-manager.io/docs/configuration/acme/dns01/#setting-nameservers-for-dns01-self-check) | 62 | | letsencrypt.organizations | list | `["hybrid-cloud-patterns.io"]` | List of organization names to be put in a certificate (Defaults to [hybrid-cloud-patterns.io]) | 63 | | letsencrypt.region | string | `"eu-central-1"` | String that defines the region used by the route53/dns01 resolver in cert-manager (required) | 64 | | letsencrypt.renewBefore | string | `"28h0m0s"` | How long before expiration date should the certs be renewed (Defaults to 28h0m0s) | 65 | | letsencrypt.server | string | `"https://acme-staging-v02.api.letsencrypt.org/directory"` | String containing the letsencrypt ACME URL (Defaults to the staging server) By default we use the staging URL to avoid any ratelimiting while testing To switch to the production certificates signed by a recognized CA, please switch to the non-staging URL (see values.yaml) | 66 | | letsencrypt.usages | list | `["server auth"]` | List of certificate uses. See API cert-manager.io/v1.KeyUsage (Defaults to [server auth]) | 67 | 68 | ---------------------------------------------- 69 | Autogenerated from chart metadata using [helm-docs v1.14.2](https://github.com/norwoodj/helm-docs/releases/v1.14.2) 70 | 71 | ## Notes 72 | 73 | Please be aware of the following gotchas when using this chart: 74 | 75 | 1. Once the API certificate has been replaced with the letsencrypt one, the `oc` commands might fail with x509 unknown certificate authority errors. 76 | You need to remove the previous CA from the kubeconfig file. Run: `oc config set-cluster --certificate-authority="/dev/null" --embed-certs` 77 | 2. When you switch to non-staging letsencrypt certificates, things might fail if you asked for too many certificates over the last few days. 78 | 3. The cluster takes ~20-30 mins to fully settle when both the API endpoint and the default ingress certificates are implemented 79 | 80 | ## Implementation 81 | 82 | This chart creates a Cloud Credential that is allowed to write and read DNS 83 | entries via Route53 in AWS. That credential is then used by cert-manager to 84 | prove ownership of the DNS zone and answer the ACME DNS01 challenges. 85 | 86 | We ask for a single wildcard certificate for the default Ingress `*.apps.domain` 87 | and one non-wildcard certificate for the API endpoint `api.domain`. 88 | 89 | We use Argo's Server-Side Apply feature to patch in the Ingress Controller and 90 | the API endpoint certificates. 91 | 92 | Currently we also patch the main cluster-wide Argo instance to set the tls 93 | route to `reencrypt` in order have a proper cert there. Once issue 297 in the 94 | gitops-operator repository is fixed, we can drop that. 95 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # coco-pattern 2 | 3 | This is a validated pattern for deploying confidential containers on OpenShift. 4 | 5 | The target operating model has two clusters: 6 | 7 | - One in a "trusted" zone where the remote attestation, KMS and Key Broker infrastructure are deployed. 8 | - A second where a subset of workloads are deployed in confidential containers. 9 | 10 | The current version of this application the confidential containers assumes deployment to Azure. 11 | 12 | On the platform a sample workload is deployed: 13 | 14 | 1. Sample hello world applications to allow users to experiment with the policies for CoCo and the KBS (trustee). 15 | 2. A sample application `kbs-access` which presents secrets obtained from trustee to a web service. This is designed to allow users to test locked down environments. 16 | 17 | Future work includes: 18 | 19 | 1. Supporting a multiple cluster deployment 20 | 2. Supporting multiple infrastructure providers 21 | 3. Supporting a more sophisticated workload such as confidential AI inference with protected GPUs. 22 | 23 | ## Current constraints and assumptions 24 | 25 | - Only currently is known to work with `azure` as the provider of confidential vms via peer-pods. 26 | - Only known to work today with everything on one cluster. The work to expand this is in flight. 27 | - Below version 3.1, if not using ARO you must either provide your own CA signed certs, or use let's encrypt. 28 | - Must be on 4.16.14 or later. 29 | 30 | ## Major versions 31 | 32 | ### `3.*` 33 | 34 | Version `3.*` of the pattern is currently constrained to support the general availability releases of coco. 35 | 36 | - (OpenShift Sandboxed Containers Operator) `1.10.*` and above 37 | - Trustee `0.4.*` 38 | 39 | This limits support to OpenShift 4.16 and higher. 40 | 41 | The pattern has been tested on Azure for two installation methods: 42 | 43 | 1. Installing onto an ARO cluster 44 | 2. Self managed OpenShift install using the `openshift-install` CLI. 45 | 46 | #### Known limitations 47 | 48 | [Additional configuration](https://issues.redhat.com/browse/KATA-4107) is required to pull secrets from authenticated registries. 49 | 50 | ### `2.*` 51 | 52 | Version `2.*` of the pattern is currently constrained to support: 53 | 54 | - (OpenShift Sandboxed Containers Operator) `1.9.*` 55 | - Trustee `0.3.*` 56 | 57 | This limits support to OpenShift 4.16 and higher. 58 | 59 | The pattern has been tested on Azure for two installation methods: 60 | 61 | 1. Installing onto an ARO cluster 62 | 2. Self managed OpenShift install using the `openshift-install` CLI. 63 | 64 | > [!IMPORTANT] 65 | > You need an external CA signed certificate for to be added (e.g. with let's encrypt) to a self-managed install 66 | 67 | ### `1.0.0` 68 | 69 | 1.0.0 supports OpenShift Sandboxed containers version `1.8.1` along with Trustee version `0.2.0`. 70 | 71 | The pattern has been tested on Azure for one installation method: 72 | 73 | 1. Self managed OpenShift install using the `openshift-install` CLI 74 | 2. Installing on top of an existing Azure Red Hat OpenShift (ARO) cluster 75 | 76 | ## Validated pattern flavours 77 | 78 | **Today the demo has one flavour**. 79 | A number are planned based on various different hub cluster-groups. 80 | You can change between behaviour by configuring [`global.main.clusterGroupName`](https://validatedpatterns.io/learn/values-files/) key in the `values-global.yaml` file. 81 | 82 | `values-simple.yaml`: or the `simple` cluster group is the default for the pattern. 83 | It deploys a hello-openshift application 3 times: 84 | 85 | - A standard pod 86 | - A kata container with peer-pods 87 | - A confidential kata-container 88 | 89 | ## Setup instructions 90 | 91 | ### Default single cluster setup with `values-simple.yaml` 92 | 93 | The instructions here presume you have a cluster. See further down for provisioning instructions for a cluster. 94 | 95 | #### Fork and Clone the GitHub repository 96 | 97 | 1. Following [standard validated patterns workflow](https://validatedpatterns.io/learn/workflow/) fork the repository and clone to your development environment which has `podman` and `git` 98 | 2. If using a particular version (e.g. `1.0.0`) checkout the correct tag. 99 | 100 | > [!TIP] 101 | > Forking is essential as the validated pattern uses ArgoCD to reconcile it's state against your remote (forked) repository. 102 | 103 | #### Configuring required secrets / parameters 104 | 105 | The secrets here secure Trustee and the peer-pod vms. Mostly they are for demonstration purposes. 106 | This only has to be done once. 107 | 108 | 1. Run `sh scripts/gen-secrets.sh` 109 | 110 | > [!NOTE] 111 | > Once generated this script will not override secrets. Be careful when doing multiple tests. 112 | 113 | #### Configuring let's encrypt 114 | 115 | > [!IMPORTANT] 116 | > Ensure you have password login available to the cluster. Let's encrypt will replace the API certificate in addition to the certificates to user with routes. 117 | 118 | Trustee requires a trusted CA issued certificate. Let's Encrypt is included for environments without a trusted cert on OpenShift's routes. 119 | 120 | If you need a Let's Encrypt certificate to be issued the `letsencrypt` application configuration needs to be changed as below. 121 | 122 | ```yaml 123 | --- 124 | # Default configuration, safe for ARO 125 | letsencrypt: 126 | name: letsencrypt 127 | namespace: letsencrypt 128 | project: hub 129 | path: charts/all/letsencrypt 130 | # Default to 'safe' for ARO 131 | overrides: 132 | - name: letsencrypt.enabled 133 | value: false 134 | --- 135 | # Explicitly correct configuration for enabling let's encrypt 136 | letsencrypt: 137 | name: letsencrypt 138 | namespace: letsencrypt 139 | project: hub 140 | path: charts/all/letsencrypt 141 | overrides: 142 | - name: letsencrypt.enabled 143 | value: true 144 | ``` 145 | 146 | > [!WARNING] 147 | > Configuration changes are only effective once committed and pushed to your remote repository. 148 | 149 | #### Installing onto a cluster 150 | 151 | Once you configuration is pushed (if required) `./pattern.sh make install` to provision a cluster. 152 | 153 | > [!TIP] 154 | > The branch and default origin you have checked-out in your local repository is used to determine what ArgoCD and the patterns operator should reconcile against. Typical choices are to use the main for your fork. 155 | 156 | ## Cluster setup (if not already setup) 157 | 158 | ### Single cluster install on an OCP cluster on azure using Red Hat Demo Platform 159 | 160 | Red Hat a demo platform. This allows easy access for Red Hat associates and partners to ephemeral cloud resources. The pattern is known to work with this setup. 161 | 162 | 1. Get the [openshift installer](https://console.redhat.com/openshift/downloads) 163 | 1. **NOTE: openshift installer must be updated regularly if you want to automatically provision the latest versions of OCP** 164 | 2. Get access to an [Azure Subscription Based Blank Open Environment](https://catalog.demo.redhat.com/catalog?category=Open_Environments&search=azure&item=babylon-catalog-prod%2Fazure-gpte.open-environment-azure-subscription.prod). 165 | 3. Import the required azure environmental variables (see code block below) 166 | 4. Ensure certificates are configured (via let's encrypt or do so manually) 167 | 5. Run the wrapper install script 168 | 1. `bash ./rhdp/wrapper.sh azure-region-code` 169 | 2. Where azure region code is `eastasia`, `useast2` etc. 170 | 6. You *should* be done 171 | 1. You *may* need to recreate the hello world peer-pods depending on timeouts. 172 | 173 | ```shell 174 | export GUID= 175 | export CLIENT_ID= 176 | export PASSWORD= 177 | export TENANT= 178 | export SUBSCRIPTION= 179 | export RESOURCEGROUP= 180 | ``` 181 | 182 | ### Single cluster install on plain old azure *not* using Red Hat Demo Platform 183 | 184 | > [!TIP] 185 | > Don't use the default node sizes.. increase the node sizes such as below 186 | 187 | 1. Login to console.redhat.com 188 | 2. Get the openshift installer 189 | 3. Login to azure locally. 190 | 4. `openshift-install create install-config` 191 | 1. Select azure 192 | 2. For Red Hatter's and partners using RHDP make sure you select the same region for your account that you selected in RHDP 193 | 5. Change worker machine type e.g. change `type: Standard_D4s_v5` to `type: Standard_D8s_v5` or similar based on your needs. 194 | 6. `mkdir ./ocp-install && mv openshift-install.yaml ./ocp-install` 195 | 7. `openshift-install create cluster --dir=./ocp-install` 196 | 8. Once installed: 197 | 1. Login to `oc` 198 | 2. Configure Let's Encrypt (if required) 199 | 3. `./pattern.sh make install` 200 | 201 | ### Multi cluster setup 202 | 203 | TBD 204 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /common/LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /charts/all/letsencrypt/LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /common/Makefile: -------------------------------------------------------------------------------- 1 | NAME ?= $(shell basename "`pwd`") 2 | 3 | ifneq ($(origin TARGET_SITE), undefined) 4 | TARGET_SITE_OPT=--set main.clusterGroupName=$(TARGET_SITE) 5 | endif 6 | 7 | # Set this to true if you want to skip any origin validation 8 | DISABLE_VALIDATE_ORIGIN ?= false 9 | ifeq ($(DISABLE_VALIDATE_ORIGIN),true) 10 | VALIDATE_ORIGIN := 11 | else 12 | VALIDATE_ORIGIN := validate-origin 13 | endif 14 | 15 | # This variable can be set in order to pass additional helm arguments from the 16 | # the command line. I.e. we can set things without having to tweak values files 17 | EXTRA_HELM_OPTS ?= 18 | 19 | # This variable can be set in order to pass additional ansible-playbook arguments from the 20 | # the command line. I.e. we can set -vvv for more verbose logging 21 | EXTRA_PLAYBOOK_OPTS ?= 22 | 23 | # INDEX_IMAGES=registry-proxy.engineering.redhat.com/rh-osbs/iib:394248 24 | # or 25 | # INDEX_IMAGES=registry-proxy.engineering.redhat.com/rh-osbs/iib:394248,registry-proxy.engineering.redhat.com/rh-osbs/iib:394249 26 | INDEX_IMAGES ?= 27 | 28 | # git branch --show-current is also available as of git 2.22, but we will use this for compatibility 29 | TARGET_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD) 30 | 31 | #default to the branch remote 32 | TARGET_ORIGIN ?= $(shell git config branch.$(TARGET_BRANCH).remote) 33 | 34 | # This is to ensure that whether we start with a git@ or https:// URL, we end up with an https:// URL 35 | # This is because we expect to use tokens for repo authentication as opposed to SSH keys 36 | TARGET_REPO=$(shell git ls-remote --get-url --symref $(TARGET_ORIGIN) | sed -e 's/.*URL:[[:space:]]*//' -e 's%^git@%%' -e 's%^https://%%' -e 's%:%/%' -e 's%^%https://%') 37 | 38 | UUID_FILE ?= ~/.config/validated-patterns/pattern-uuid 39 | UUID_HELM_OPTS ?= 40 | 41 | # --set values always take precedence over the contents of -f 42 | ifneq ("$(wildcard $(UUID_FILE))","") 43 | UUID := $(shell cat $(UUID_FILE)) 44 | UUID_HELM_OPTS := --set main.analyticsUUID=$(UUID) 45 | endif 46 | 47 | # Set the secret name *and* its namespace when deploying from private repositories 48 | # The format of said secret is documented here: https://argo-cd.readthedocs.io/en/stable/operator-manual/declarative-setup/#repositories 49 | TOKEN_SECRET ?= 50 | TOKEN_NAMESPACE ?= 51 | 52 | ifeq ($(TOKEN_SECRET),) 53 | HELM_OPTS=-f values-global.yaml --set main.git.repoURL="$(TARGET_REPO)" --set main.git.revision=$(TARGET_BRANCH) $(TARGET_SITE_OPT) $(UUID_HELM_OPTS) $(EXTRA_HELM_OPTS) 54 | else 55 | # When we are working with a private repository we do not escape the git URL as it might be using an ssh secret which does not use https:// 56 | TARGET_CLEAN_REPO=$(shell git ls-remote --get-url --symref $(TARGET_ORIGIN)) 57 | HELM_OPTS=-f values-global.yaml --set main.tokenSecret=$(TOKEN_SECRET) --set main.tokenSecretNamespace=$(TOKEN_NAMESPACE) --set main.git.repoURL="$(TARGET_CLEAN_REPO)" --set main.git.revision=$(TARGET_BRANCH) $(TARGET_SITE_OPT) $(UUID_HELM_OPTS) $(EXTRA_HELM_OPTS) 58 | endif 59 | 60 | # Helm does the right thing and fetches all the tags and detects the newest one 61 | PATTERN_INSTALL_CHART ?= oci://quay.io/hybridcloudpatterns/pattern-install 62 | 63 | ##@ Pattern Common Tasks 64 | 65 | .PHONY: help 66 | help: ## This help message 67 | @echo "Pattern: $(NAME)" 68 | @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^(\s|[a-zA-Z_0-9-])+:.*?##/ { printf " \033[36m%-35s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) 69 | 70 | # Makefiles in the individual patterns should call these targets explicitly 71 | # e.g. from industrial-edge: make -f common/Makefile show 72 | .PHONY: show 73 | show: ## show the starting template without installing it 74 | helm template $(PATTERN_INSTALL_CHART) --name-template $(NAME) $(HELM_OPTS) 75 | 76 | preview-all: ## (EXPERIMENTAL) Previews all applications on hub and managed clusters 77 | @echo "NOTE: This is just a tentative approximation of rendering all hub and managed clusters templates" 78 | @common/scripts/preview-all.sh $(TARGET_REPO) $(TARGET_BRANCH) 79 | 80 | preview-%: 81 | $(eval CLUSTERGROUP ?= $(shell yq ".main.clusterGroupName" values-global.yaml)) 82 | @common/scripts/preview.sh $(CLUSTERGROUP) $* $(TARGET_REPO) $(TARGET_BRANCH) 83 | 84 | .PHONY: operator-deploy 85 | operator-deploy operator-upgrade: validate-prereq $(VALIDATE_ORIGIN) validate-cluster ## runs helm install 86 | @common/scripts/deploy-pattern.sh $(NAME) $(PATTERN_INSTALL_CHART) $(HELM_OPTS) 87 | 88 | .PHONY: uninstall 89 | uninstall: ## runs helm uninstall 90 | $(eval CSV := $(shell oc get subscriptions -n openshift-operators openshift-gitops-operator -ojsonpath={.status.currentCSV})) 91 | helm uninstall $(NAME) 92 | @oc delete csv -n openshift-operators $(CSV) 93 | 94 | .PHONY: load-secrets 95 | load-secrets: ## loads the secrets into the backend determined by values-global setting 96 | common/scripts/process-secrets.sh $(NAME) 97 | 98 | .PHONY: legacy-load-secrets 99 | legacy-load-secrets: ## loads the secrets into vault (only) 100 | common/scripts/vault-utils.sh push_secrets $(NAME) 101 | 102 | .PHONY: secrets-backend-vault 103 | secrets-backend-vault: ## Edits values files to use default Vault+ESO secrets config 104 | common/scripts/set-secret-backend.sh vault 105 | common/scripts/manage-secret-app.sh vault present 106 | common/scripts/manage-secret-app.sh golang-external-secrets present 107 | common/scripts/manage-secret-namespace.sh validated-patterns-secrets absent 108 | @git diff --exit-code || echo "Secrets backend set to vault, please review changes, commit, and push to activate in the pattern" 109 | 110 | .PHONY: secrets-backend-kubernetes 111 | secrets-backend-kubernetes: ## Edits values file to use Kubernetes+ESO secrets config 112 | common/scripts/set-secret-backend.sh kubernetes 113 | common/scripts/manage-secret-namespace.sh validated-patterns-secrets present 114 | common/scripts/manage-secret-app.sh vault absent 115 | common/scripts/manage-secret-app.sh golang-external-secrets present 116 | @git diff --exit-code || echo "Secrets backend set to kubernetes, please review changes, commit, and push to activate in the pattern" 117 | 118 | .PHONY: secrets-backend-none 119 | secrets-backend-none: ## Edits values files to remove secrets manager + ESO 120 | common/scripts/set-secret-backend.sh none 121 | common/scripts/manage-secret-app.sh vault absent 122 | common/scripts/manage-secret-app.sh golang-external-secrets absent 123 | common/scripts/manage-secret-namespace.sh validated-patterns-secrets absent 124 | @git diff --exit-code || echo "Secrets backend set to none, please review changes, commit, and push to activate in the pattern" 125 | 126 | .PHONY: load-iib 127 | load-iib: ## CI target to install Index Image Bundles 128 | @set -e; if [ x$(INDEX_IMAGES) != x ]; then \ 129 | ansible-playbook $(EXTRA_PLAYBOOK_OPTS) rhvp.cluster_utils.iib_ci; \ 130 | else \ 131 | echo "No INDEX_IMAGES defined. Bailing out"; \ 132 | exit 1; \ 133 | fi 134 | 135 | .PHONY: token-kubeconfig 136 | token-kubeconfig: ## Create a local ~/.kube/config with password (not usually needed) 137 | common/scripts/write-token-kubeconfig.sh 138 | 139 | ##@ Validation Tasks 140 | 141 | # We only check the remote ssh git branch's existance if we're not running inside a container 142 | # as getting ssh auth working inside a container seems a bit brittle 143 | # If the main repoUpstreamURL field is set, then we need to check against 144 | # that and not target_repo 145 | .PHONY: validate-origin 146 | validate-origin: ## verify the git origin is available 147 | @echo "Checking repository:" 148 | $(eval UPSTREAMURL := $(shell yq -r '.main.git.repoUpstreamURL // (.main.git.repoUpstreamURL = "")' values-global.yaml)) 149 | @if [ -z "$(UPSTREAMURL)" ]; then\ 150 | echo -n " $(TARGET_REPO) - branch '$(TARGET_BRANCH)': ";\ 151 | git ls-remote --exit-code --heads $(TARGET_REPO) $(TARGET_BRANCH) >/dev/null &&\ 152 | echo "OK" || (echo "NOT FOUND"; exit 1);\ 153 | else\ 154 | echo "Upstream URL set to: $(UPSTREAMURL)";\ 155 | echo -n " $(UPSTREAMURL) - branch '$(TARGET_BRANCH)': ";\ 156 | git ls-remote --exit-code --heads $(UPSTREAMURL) $(TARGET_BRANCH) >/dev/null &&\ 157 | echo "OK" || (echo "NOT FOUND"; exit 1);\ 158 | fi 159 | 160 | .PHONY: validate-cluster 161 | validate-cluster: ## Do some cluster validations before installing 162 | @echo "Checking cluster:" 163 | @echo -n " cluster-info: " 164 | @oc cluster-info >/dev/null && echo "OK" || (echo "Error"; exit 1) 165 | @echo -n " storageclass: " 166 | @if [ `oc get storageclass -o go-template='{{printf "%d\n" (len .items)}}'` -eq 0 ]; then\ 167 | echo "WARNING: No storageclass found";\ 168 | else\ 169 | echo "OK";\ 170 | fi 171 | 172 | 173 | .PHONY: validate-schema 174 | validate-schema: ## validates values files against schema in common/clustergroup 175 | $(eval VAL_PARAMS := $(shell for i in ./values-*.yaml; do echo -n "$${i} "; done)) 176 | @echo -n "Validating clustergroup schema of: " 177 | @set -e; for i in $(VAL_PARAMS); do echo -n " $$i"; helm template oci://quay.io/hybridcloudpatterns/clustergroup $(HELM_OPTS) -f "$${i}" >/dev/null; done 178 | @echo 179 | 180 | .PHONY: validate-prereq 181 | validate-prereq: ## verify pre-requisites 182 | $(eval GLOBAL_PATTERN := $(shell yq -r .global.pattern values-global.yaml)) 183 | @if [ $(NAME) != $(GLOBAL_PATTERN) ]; then\ 184 | echo "";\ 185 | echo "WARNING: folder directory is \"$(NAME)\" and global.pattern is set to \"$(GLOBAL_PATTERN)\"";\ 186 | echo "this can create problems. Please make sure they are the same!";\ 187 | echo "";\ 188 | fi 189 | @if [ ! -f /run/.containerenv ]; then\ 190 | echo "Checking prerequisites:";\ 191 | echo -n " Check for python-kubernetes: ";\ 192 | if ! ansible -m ansible.builtin.command -a "{{ ansible_python_interpreter }} -c 'import kubernetes'" localhost > /dev/null 2>&1; then echo "Not found"; exit 1; fi;\ 193 | echo "OK";\ 194 | echo -n " Check for kubernetes.core collection: ";\ 195 | if ! ansible-galaxy collection list | grep kubernetes.core > /dev/null 2>&1; then echo "Not found"; exit 1; fi;\ 196 | echo "OK";\ 197 | else\ 198 | if [ -f values-global.yaml ]; then\ 199 | OUT=`yq -r '.main.multiSourceConfig.enabled // (.main.multiSourceConfig.enabled = "false")' values-global.yaml`;\ 200 | if [ "$${OUT,,}" = "false" ]; then\ 201 | echo "You must set \".main.multiSourceConfig.enabled: true\" in your 'values-global.yaml' file";\ 202 | echo "because your common subfolder is the slimmed down version with no helm charts in it";\ 203 | exit 1;\ 204 | fi;\ 205 | fi;\ 206 | fi 207 | 208 | .PHONY: argo-healthcheck 209 | argo-healthcheck: ## Checks if all argo applications are synced 210 | @echo "Checking argo applications" 211 | $(eval APPS := $(shell oc get applications.argoproj.io -A -o jsonpath='{range .items[*]}{@.metadata.namespace}{","}{@.metadata.name}{"\n"}{end}')) 212 | @NOTOK=0; \ 213 | for i in $(APPS); do\ 214 | n=`echo "$${i}" | cut -f1 -d,`;\ 215 | a=`echo "$${i}" | cut -f2 -d,`;\ 216 | STATUS=`oc get -n "$${n}" applications.argoproj.io/"$${a}" -o jsonpath='{.status.sync.status}'`;\ 217 | if [[ $$STATUS != "Synced" ]]; then\ 218 | NOTOK=$$(( $${NOTOK} + 1));\ 219 | fi;\ 220 | HEALTH=`oc get -n "$${n}" applications.argoproj.io/"$${a}" -o jsonpath='{.status.health.status}'`;\ 221 | if [[ $$HEALTH != "Healthy" ]]; then\ 222 | NOTOK=$$(( $${NOTOK} + 1));\ 223 | fi;\ 224 | echo "$${n} $${a} -> Sync: $${STATUS} - Health: $${HEALTH}";\ 225 | done;\ 226 | if [ $${NOTOK} -gt 0 ]; then\ 227 | echo "Some applications are not synced or are unhealthy";\ 228 | exit 1;\ 229 | fi 230 | 231 | 232 | ##@ Test and Linters Tasks 233 | 234 | .PHONY: qe-tests 235 | qe-tests: ## Runs the tests that QE runs 236 | @set -e; if [ -f ./tests/interop/run_tests.sh ]; then \ 237 | pushd ./tests/interop; ./run_tests.sh; popd; \ 238 | else \ 239 | echo "No ./tests/interop/run_tests.sh found skipping"; \ 240 | fi 241 | 242 | .PHONY: super-linter 243 | super-linter: ## Runs super linter locally 244 | rm -rf .mypy_cache 245 | podman run -e RUN_LOCAL=true -e USE_FIND_ALGORITHM=true \ 246 | -e VALIDATE_ANSIBLE=false \ 247 | -e VALIDATE_BASH=false \ 248 | -e VALIDATE_CHECKOV=false \ 249 | -e VALIDATE_DOCKERFILE_HADOLINT=false \ 250 | -e VALIDATE_JSCPD=false \ 251 | -e VALIDATE_JSON_PRETTIER=false \ 252 | -e VALIDATE_MARKDOWN_PRETTIER=false \ 253 | -e VALIDATE_KUBERNETES_KUBECONFORM=false \ 254 | -e VALIDATE_PYTHON_PYLINT=false \ 255 | -e VALIDATE_SHELL_SHFMT=false \ 256 | -e VALIDATE_TEKTON=false \ 257 | -e VALIDATE_YAML=false \ 258 | -e VALIDATE_YAML_PRETTIER=false \ 259 | $(DISABLE_LINTERS) \ 260 | -v $(PWD):/tmp/lint:rw,z \ 261 | -w /tmp/lint \ 262 | ghcr.io/super-linter/super-linter:slim-v7 263 | 264 | .PHONY: deploy upgrade legacy-deploy legacy-upgrade 265 | deploy upgrade legacy-deploy legacy-upgrade: 266 | @echo "UNSUPPORTED TARGET: please switch to 'operator-deploy'"; exit 1 267 | --------------------------------------------------------------------------------