├── .github └── workflows │ ├── build-push-images.yaml │ ├── individual-image-scanner-quay.yaml │ ├── periodic-scanner-quay.yaml │ └── update-osp-nightly.yaml ├── .gitignore ├── .gitleaks.toml ├── .pre-commit-config.yaml ├── .shellcheckrc ├── .tekton ├── cleanup-existing-clusters.yaml ├── open-infra-deployment-pr-osp-nightly.save ├── open-infra-deployment-pr.save ├── pipeline-service-upgrade-test-ocp-414.yaml ├── pipeline │ ├── test-pipeline-service-deployment.yaml │ ├── test-pipeline-service-upgrade.yaml │ ├── test-rhtap-e2e.yaml │ └── update-repository.yaml ├── tasks │ ├── deploy-cluster.yaml │ ├── deploy-pipeline-service.yaml │ ├── destroy-cluster.yaml │ ├── destroy-existing-cluster.yaml │ ├── generate-cluster-name.yaml │ ├── run-rhtap-e2e.yaml │ ├── setup-ci-runner-container.yaml │ ├── test-pipeline-service.yaml │ └── update-repository.yaml ├── test-docker-images-build.yaml ├── test-pipeline-service-deployment-ocp-414.yaml ├── test-pipeline-service-static-code-analysis.yaml ├── test-rhtap-e2e.yaml └── update-dependencies.yaml ├── CONTRIBUTING.md ├── DEPENDENCIES.md ├── LICENSE ├── OWNERS ├── README.md ├── ci ├── README.md ├── docs │ ├── continuous_integation.md │ └── static_checks.md ├── hack │ ├── plnsvc_upgrade_tests.sh │ ├── rosa_cluster_provision.sh │ └── rosa_hcp_setup.sh └── images │ ├── ci-runner │ ├── Dockerfile │ └── hack │ │ ├── bin │ │ ├── cleanup-expired-clusters.sh │ │ ├── copy-plnsvc-code.sh │ │ ├── create-ci-runner-container.sh │ │ ├── deploy-cluster.sh │ │ ├── destroy-cluster.sh │ │ ├── run-plnsvc-setup.sh │ │ ├── run-plnsvc-test.sh │ │ └── utils.sh │ │ ├── manifests │ │ └── sidecar │ │ │ ├── kustomization.yaml │ │ │ └── pod.yaml │ │ └── sidecar │ │ └── bin │ │ ├── plnsvc_setup.sh │ │ └── plnsvc_test.sh │ ├── e2e-test-runner │ ├── Dockerfile │ └── run-e2e-test.sh │ ├── quay-upload │ ├── Dockerfile │ └── image-upload.sh │ ├── static-checks │ ├── Dockerfile │ └── content │ │ ├── bin │ │ ├── all.sh │ │ ├── checkov.sh │ │ ├── grafana-dashboards.sh │ │ ├── hadolint.sh │ │ ├── shellcheck.sh │ │ └── yamllint.sh │ │ └── config │ │ ├── checkov.yaml │ │ ├── hadolint.yaml │ │ ├── shellcheck.rc │ │ └── yamllint.yaml │ └── vulnerability-scan │ ├── Dockerfile │ ├── common │ └── utils.sh │ ├── scan-image.sh │ └── scan.sh ├── developer ├── config.yaml ├── docs │ ├── README.md │ └── troubleshooting.md ├── hack │ ├── build-images-buildah.sh │ ├── build_image.sh │ ├── ci.sh │ └── run_image.sh ├── images │ └── dependencies-update │ │ ├── Dockerfile │ │ └── hack │ │ └── bin │ │ ├── is-pr-skipped.sh │ │ ├── setup-local-repository.sh │ │ ├── task.sh │ │ ├── tasks │ │ ├── update_binaries.sh │ │ └── update_dockerfiles_base_images_sha.sh │ │ └── update.sh └── openshift │ ├── README.md │ ├── apps │ ├── openshift-gitops.sh │ └── pipeline-service.sh │ ├── dev_setup.sh │ ├── gitops │ ├── argocd │ │ ├── kustomization.yaml │ │ ├── pipeline-service-o11y.yaml │ │ ├── pipeline-service-o11y │ │ │ ├── README.md │ │ │ ├── appstudio-grafana │ │ │ │ ├── allow-argocd-to-manage.yaml │ │ │ │ ├── kustomization.yaml │ │ │ │ └── namespace.yaml │ │ │ ├── appstudio-prometheus │ │ │ │ ├── allow-argocd-to-manage.yaml │ │ │ │ └── kustomization.yaml │ │ │ └── kustomization.yaml │ │ ├── pipeline-service-storage.yaml │ │ ├── pipeline-service-storage │ │ │ ├── kustomization.yaml │ │ │ ├── minio │ │ │ │ ├── kustomization.yaml │ │ │ │ ├── operator │ │ │ │ │ ├── allow-argocd-to-manage.yaml │ │ │ │ │ ├── kustomization.yaml │ │ │ │ │ └── minio.yaml │ │ │ │ └── tenant │ │ │ │ │ ├── kustomization.yaml │ │ │ │ │ └── tenant.yaml │ │ │ └── postgres.yaml │ │ ├── pipeline-service.yaml │ │ └── pipeline-service │ │ │ ├── kustomization.yaml │ │ │ └── tekton-results │ │ │ ├── kustomization.yaml │ │ │ ├── minio-create-bucket.yaml │ │ │ └── minio-tls.yaml │ └── local │ │ ├── kustomization.yaml │ │ ├── patch-pipeline-service-o11y.yaml │ │ ├── patch-pipeline-service-storage.yaml │ │ ├── patch-pipeline-service.yaml │ │ └── tekton-results │ │ ├── kustomization.yaml │ │ ├── namespace.yaml │ │ ├── rds-db-cert-configmap.yaml │ │ ├── tekton-results-db-secret.yaml │ │ ├── tekton-results-minio-config.yaml │ │ ├── tekton-results-postgresql-tls-secret.yaml │ │ └── tekton-results-s3-secret.yaml │ ├── operators │ └── openshift-gitops │ │ ├── kustomization.yaml │ │ └── openshift-gitops.yaml │ ├── reset.sh │ └── utils.sh ├── operator ├── docs │ └── operations.md ├── gitops │ ├── README.md │ ├── argocd │ │ ├── grafana │ │ │ ├── dashboard.yaml │ │ │ ├── dashboards │ │ │ │ └── pipeline-service-dashboard.json │ │ │ └── kustomization.yaml │ │ ├── kustomization.yaml │ │ ├── pipeline-service.yaml │ │ └── pipeline-service │ │ │ ├── kustomization.yaml │ │ │ ├── metrics-exporter │ │ │ ├── clusterrole.yaml │ │ │ ├── clusterrolebinding.yaml │ │ │ ├── deployment.yaml │ │ │ ├── kustomization.yaml │ │ │ ├── service.yaml │ │ │ ├── serviceaccount.yaml │ │ │ └── servicemonitor.yaml │ │ │ ├── openshift-pipelines │ │ │ ├── allow-argocd-to-manage-jobs.yaml │ │ │ ├── allow-argocd-to-manage.yaml │ │ │ ├── appstudio-pipelines-scc.yaml │ │ │ ├── bugfix-pac-gitauth-secrets.yaml │ │ │ ├── chains-observability-service.yaml │ │ │ ├── chains-public-key-viewer.yaml │ │ │ ├── chains-secrets-config.yaml │ │ │ ├── chains-service-monitor.yaml │ │ │ ├── kustomization.yaml │ │ │ ├── namespace.yaml │ │ │ ├── openshift-operator.yaml │ │ │ ├── osp-nightly-catalog-source.yaml │ │ │ └── tekton-config.yaml │ │ │ └── tekton-results │ │ │ ├── README.md │ │ │ ├── api-db-config.yaml │ │ │ ├── api-kube-rbac-proxy.yaml │ │ │ ├── api-route.yaml │ │ │ ├── api-s3-config.yaml │ │ │ ├── api-service-patch.yaml │ │ │ ├── api-service-sync.yaml │ │ │ ├── api-service-tls.yaml │ │ │ ├── api-sync.yaml │ │ │ ├── config.env │ │ │ ├── kustomization.yaml │ │ │ ├── namespace.yaml │ │ │ ├── service-monitor.yaml │ │ │ ├── watcher-config.yaml │ │ │ ├── watcher-kube-rbac-proxy.yaml │ │ │ ├── watcher-logging-rbac.yaml │ │ │ ├── watcher-logging.yaml │ │ │ ├── watcher-rbac.yaml │ │ │ ├── watcher-service-patch.yaml │ │ │ ├── watcher-service-sync.yaml │ │ │ └── watcher-sync.yaml │ ├── compute │ │ └── pipeline-service-manager │ │ │ ├── kustomization.yaml │ │ │ ├── namespace.yaml │ │ │ ├── pipeline-service-manager.yaml │ │ │ ├── role.yaml │ │ │ └── rolebinding.yaml │ └── sre │ │ ├── credentials │ │ ├── kubeconfig │ │ │ ├── .gitignore │ │ │ └── README.md │ │ ├── manifests │ │ │ └── README.md │ │ └── secrets │ │ │ └── README.md │ │ └── environment │ │ └── compute │ │ └── .gitignore └── test │ ├── manifests │ ├── setup │ │ └── pipeline-service │ │ │ ├── appstudio-pipeline-service-account.yaml │ │ │ ├── kustomization.yaml │ │ │ └── namespace.yaml │ └── test │ │ ├── metrics │ │ └── curl-metrics-service-pipeline.yaml │ │ └── tekton-chains │ │ ├── chains-test-service-account.yaml │ │ ├── kustomization.yaml │ │ ├── public-key.yaml │ │ ├── simple-copy-pipeline.yaml │ │ └── tekton-chains-metrics.yaml │ └── test.sh └── shared ├── config └── dependencies.sh └── hack └── install.sh /.github/workflows/periodic-scanner-quay.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: (cron job) Fetch security scan results from quay 3 | on: 4 | schedule: 5 | - cron: "0 0 * * *" 6 | workflow_dispatch: 7 | permissions: read-all 8 | jobs: 9 | build-push: 10 | runs-on: ubuntu-latest 11 | permissions: 12 | contents: read 13 | packages: write 14 | 15 | steps: 16 | - uses: actions/checkout@v4 17 | 18 | - name: Fetch results 19 | id: fetch-results 20 | run: | 21 | ./ci/images/vulnerability-scan/scan.sh | tee /tmp/vulnerability-scan.log 22 | echo "VULNERABILITIES_EXIST=$(tail -1 /tmp/vulnerability-scan.log)" >> $GITHUB_OUTPUT 23 | env: 24 | AUTH_BEARER_TOKEN: ${{ secrets.AUTH_BEARER_TOKEN }} 25 | images: ( 26 | "ci-runner" 27 | "dependencies-update" 28 | "e2e-test-runner" 29 | "quay-upload" 30 | "static-checks" 31 | "vulnerability-scan" 32 | ) 33 | quay_url: "https://quay.io/api/v1/repository/redhat-pipeline-service" 34 | 35 | - name: Check results 36 | id: check-results 37 | if: always() 38 | run: | 39 | res=(${{ steps.fetch-results.outputs.VULNERABILITIES_EXIST }}) 40 | if [[ "${res[*]}" =~ "1" ]];then 41 | echo "Vulnerabilities found. Please check the fetch-results step for further details." 42 | exit 1 43 | else 44 | echo "No Vulnerabilities found" 45 | fi 46 | -------------------------------------------------------------------------------- /.github/workflows/update-osp-nightly.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: update-osp-nightly 3 | run-name: Check for OSP nightly and create a PR 4 | on: 5 | schedule: 6 | - cron: '10 1 * * *' 7 | workflow_dispatch: 8 | permissions: {} # drop all permissions; the default triggers codecov failure 9 | jobs: 10 | update-osp-nightly: 11 | runs-on: ubuntu-latest 12 | permissions: 13 | contents: write 14 | pull-requests: write 15 | steps: 16 | - uses: actions/checkout@v4 17 | - name: Update CatalogSource 18 | run: | 19 | tag="v4.14-candidate" 20 | inspected=$(skopeo inspect docker://quay.io/openshift-pipeline/openshift-pipelines-pipelines-operator-bundle-container-index:$tag) 21 | created=$(echo "$inspected" | jq -r '.Created') 22 | echo "DEBUG: Found tag $tag created: $created" 23 | digest=$(echo "$inspected" | jq -r '.Digest') 24 | echo "DEBUG: Tag digest: $digest" 25 | sed -i -E "s/sha256:[0-9a-f]{64}/${digest}/g" operator/gitops/argocd/pipeline-service/openshift-pipelines/osp-nightly-catalog-source.yaml 26 | - name: Create Pull Request 27 | uses: peter-evans/create-pull-request@v6 28 | with: 29 | # the branch name is used by other jobs to open infra-deployments PR 30 | # and skip downgrade tests. 31 | branch: ci-update-osp-nightly 32 | commit-message: "[new-osp-nightly-build] automated change" 33 | title: "Automated change updating the OSP nightly version" 34 | body: | 35 | Automated change by [update-osp-nightly] 36 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | **/work/ 2 | .idea/ 3 | -------------------------------------------------------------------------------- /.gitleaks.toml: -------------------------------------------------------------------------------- 1 | [allowlist] 2 | description = "Allowlist" 3 | paths = [ 4 | '''operator/gitops/pac/README.md''', 5 | ] 6 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | repos: 3 | - repo: https://gitlab.corp.redhat.com/infosec-public/developer-workbench/tools.git 4 | rev: rh-pre-commit-2.0.0 5 | hooks: 6 | # If you have not run this hook on your system before, it may prompt you to 7 | # log in for patterns, and you will need to try again. 8 | # 9 | # Docs: https://source.redhat.com/departments/it/it-information-security/leaktk/leaktk_components/rh_pre_commit 10 | - id: rh-pre-commit 11 | # Commit-msg attestation 12 | # Make sure to run `rh-multi-pre-commit --hook-type commit-msg install` 13 | # to enable the hook. 14 | - id: rh-pre-commit.commit-msg 15 | -------------------------------------------------------------------------------- /.shellcheckrc: -------------------------------------------------------------------------------- 1 | ci/images/static-checks/content/config/shellcheck.rc -------------------------------------------------------------------------------- /.tekton/cleanup-existing-clusters.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: tekton.dev/v1 3 | kind: PipelineRun 4 | metadata: 5 | name: cleanup-existing-clusters 6 | annotations: 7 | pipelinesascode.tekton.dev/on-event: "[pull_request, push]" 8 | pipelinesascode.tekton.dev/on-target-branch: "[refs/heads/*]" 9 | pipelinesascode.tekton.dev/task: "[git-clone]" 10 | pipelinesascode.tekton.dev/task-1: "[.tekton/tasks/destroy-existing-cluster.yaml]" 11 | pipelinesascode.tekton.dev/max-keep-runs: "5" 12 | spec: 13 | timeouts: 14 | pipeline: "1h0m0s" 15 | params: 16 | - name: repo_url 17 | value: "{{ repo_url }}" 18 | - name: revision 19 | value: "{{ revision }}" 20 | - name: target_branch 21 | value: "{{ target_branch }}" 22 | pipelineSpec: 23 | params: 24 | - name: repo_url 25 | - name: revision 26 | - name: target_branch 27 | workspaces: 28 | - name: shared-workspace 29 | tasks: 30 | - name: fetch-repository 31 | taskRef: 32 | name: git-clone 33 | kind: ClusterTask 34 | workspaces: 35 | - name: output 36 | workspace: shared-workspace 37 | params: 38 | - name: url 39 | value: $(params.repo_url) 40 | - name: revision 41 | value: $(params.revision) 42 | - name: subdirectory 43 | value: source 44 | - name: destroy-existing-clusters 45 | taskRef: 46 | name: destroy-existing-cluster 47 | runAfter: 48 | - "fetch-repository" 49 | params: 50 | - name: target_branch 51 | value: $(params.target_branch) 52 | workspaces: 53 | - name: workdir 54 | workspace: shared-workspace 55 | workspaces: 56 | - name: shared-workspace 57 | volumeClaimTemplate: 58 | spec: 59 | accessModes: 60 | - ReadWriteOnce 61 | resources: 62 | requests: 63 | storage: 1Gi 64 | -------------------------------------------------------------------------------- /.tekton/open-infra-deployment-pr-osp-nightly.save: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: tekton.dev/v1beta1 3 | kind: PipelineRun 4 | metadata: 5 | name: open-infra-deployment-pr-osp-nightly 6 | annotations: 7 | pipelinesascode.tekton.dev/on-event: "push" 8 | pipelinesascode.tekton.dev/on-target-branch: "ci-update-osp-nightly" 9 | pipelinesascode.tekton.dev/max-keep-runs: "5" 10 | spec: 11 | params: 12 | - name: git-url 13 | # We override the repo name as it's used as title for the automated PR. 14 | # While testing nightlies we want two lanes of PRs, the regular ones and the OSP nightlies. 15 | value: https://github.com/openshift-pipelines/pipeline-service-test-osp-nightly 16 | - name: revision 17 | value: "{{ revision }}" 18 | - name: infra-deployment-update-script 19 | value: | 20 | sed -i -E 's/[0-9a-f]{40}/{{ revision }}/g' components/pipeline-service/development/kustomization.yaml 21 | # just pin the watcher image, which is the second image ref 22 | sed -i -E '/name: quay.io\/redhat-appstudio\/tekton-results-watcher/{n;s/newTag: {{ revision }}/newTag: bae7851ff584423503af324200f52cd28ca99116/}' components/pipeline-service/development/kustomization.yaml 23 | sed -i -E 's/[0-9a-f]{40}/{{ revision }}/g' components/pipeline-service/staging/base/kustomization.yaml 24 | # just pin the watcher image, which is the second image ref 25 | sed -i -E '/name: quay.io\/redhat-appstudio\/tekton-results-watcher/{n;s/newTag: {{ revision }}/newTag: bae7851ff584423503af324200f52cd28ca99116/}' components/pipeline-service/staging/base/kustomization.yaml 26 | sed -i -E 's/[0-9a-f]{40}/{{ revision }}/g' components/monitoring/grafana/base/dashboards/pipeline-service/kustomization.yaml 27 | - name: slack-webhook-notification-team 28 | value: pipeline 29 | pipelineSpec: 30 | params: 31 | - description: "Source Repository URL" 32 | name: git-url 33 | type: string 34 | - description: "Revision of the Source Repository" 35 | name: revision 36 | type: string 37 | - default: "" 38 | name: infra-deployment-update-script 39 | - default: "" 40 | name: slack-webhook-notification-team 41 | tasks: 42 | - name: update-infra-repo 43 | params: 44 | - name: ORIGIN_REPO 45 | value: $(params.git-url) 46 | - name: REVISION 47 | value: $(params.revision) 48 | - name: SCRIPT 49 | value: $(params.infra-deployment-update-script) 50 | taskRef: 51 | bundle: quay.io/redhat-appstudio-tekton-catalog/task-update-infra-deployments:0.1 52 | name: update-infra-deployments 53 | finally: 54 | - name: send-slack-webhook-notification 55 | when: 56 | - input: $(params.slack-webhook-notification-team) 57 | operator: notin 58 | values: [""] 59 | - input: $(tasks.status) 60 | operator: in 61 | values: ["Failed"] 62 | params: 63 | - name: message 64 | value: Tekton pipelineRun $(context.pipelineRun.name) failed 65 | - name: key-name 66 | value: $(params.slack-webhook-notification-team) 67 | taskRef: 68 | bundle: quay.io/redhat-appstudio-tekton-catalog/task-slack-webhook-notification:0.1 69 | name: slack-webhook-notification 70 | -------------------------------------------------------------------------------- /.tekton/open-infra-deployment-pr.save: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: tekton.dev/v1 3 | kind: PipelineRun 4 | metadata: 5 | name: open-infra-deployment-pr 6 | annotations: 7 | pipelinesascode.tekton.dev/on-event: "push" 8 | pipelinesascode.tekton.dev/on-target-branch: "main" 9 | pipelinesascode.tekton.dev/max-keep-runs: "5" 10 | spec: 11 | params: 12 | - name: git-url 13 | value: "{{ repo_url }}" 14 | - name: revision 15 | value: "{{ revision }}" 16 | - name: infra-deployment-update-script 17 | value: | 18 | sed -i -E 's/[0-9a-f]{40}/{{ revision }}/g' components/pipeline-service/development/kustomization.yaml 19 | # just pin the watcher image, which is the second image ref 20 | sed -i -E '/name: quay.io\/redhat-appstudio\/tekton-results-watcher/{n;s/newTag: {{ revision }}/newTag: bae7851ff584423503af324200f52cd28ca99116/}' components/pipeline-service/development/kustomization.yaml 21 | sed -i -E 's/[0-9a-f]{40}/{{ revision }}/g' components/pipeline-service/staging/base/kustomization.yaml 22 | # just pin the watcher image, which is the second image ref 23 | sed -i -E '/name: quay.io\/redhat-appstudio\/tekton-results-watcher/{n;s/newTag: {{ revision }}/newTag: bae7851ff584423503af324200f52cd28ca99116/}' components/pipeline-service/staging/base/kustomization.yaml 24 | sed -i -E 's/[0-9a-f]{40}/{{ revision }}/g' components/monitoring/grafana/base/dashboards/pipeline-service/kustomization.yaml 25 | oc kustomize components/pipeline-service/staging/stone-stg-m01/resources/ > components/pipeline-service/staging/stone-stg-m01/deploy.yaml 26 | oc kustomize components/pipeline-service/staging/stone-stg-rh01/resources/ > components/pipeline-service/staging/stone-stg-rh01/deploy.yaml 27 | oc kustomize components/pipeline-service/staging/stone-stage-p01/resources/ > components/pipeline-service/staging/stone-stage-p01/deploy.yaml 28 | - name: slack-webhook-notification-team 29 | value: pipeline 30 | pipelineSpec: 31 | params: 32 | - description: "Source Repository URL" 33 | name: git-url 34 | type: string 35 | - description: "Revision of the Source Repository" 36 | name: revision 37 | type: string 38 | - default: "" 39 | name: infra-deployment-update-script 40 | - default: "" 41 | name: slack-webhook-notification-team 42 | tasks: 43 | - name: update-infra-repo 44 | params: 45 | - name: ORIGIN_REPO 46 | value: $(params.git-url) 47 | - name: REVISION 48 | value: $(params.revision) 49 | - name: SCRIPT 50 | value: $(params.infra-deployment-update-script) 51 | taskRef: 52 | kind: Task 53 | params: 54 | - name: name 55 | value: update-infra-deployments 56 | - name: bundle 57 | value: >- 58 | quay.io/redhat-appstudio-tekton-catalog/task-update-infra-deployments:0.1 59 | - name: kind 60 | value: task 61 | resolver: bundles 62 | finally: 63 | - name: send-slack-webhook-notification 64 | when: 65 | - input: $(params.slack-webhook-notification-team) 66 | operator: notin 67 | values: [""] 68 | - input: $(tasks.status) 69 | operator: in 70 | values: ["Failed"] 71 | params: 72 | - name: message 73 | value: Tekton pipelineRun $(context.pipelineRun.name) failed 74 | - name: key-name 75 | value: $(params.slack-webhook-notification-team) 76 | taskRef: 77 | kind: Task 78 | params: 79 | - name: name 80 | value: slack-webhook-notification 81 | - name: bundle 82 | value: >- 83 | quay.io/redhat-appstudio-tekton-catalog/task-slack-webhook-notification:0.1 84 | - name: kind 85 | value: task 86 | resolver: bundles 87 | -------------------------------------------------------------------------------- /.tekton/pipeline-service-upgrade-test-ocp-414.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: tekton.dev/v1 3 | kind: PipelineRun 4 | metadata: 5 | name: test-pipeline-service-upgrade-ocp-414 6 | annotations: 7 | pipelinesascode.tekton.dev/on-event: "[pull_request, push]" 8 | pipelinesascode.tekton.dev/on-target-branch: "[refs/heads/*]" 9 | pipelinesascode.tekton.dev/pipeline: "[.tekton/pipeline/test-pipeline-service-upgrade.yaml]" 10 | pipelinesascode.tekton.dev/task: "[.tekton/tasks/setup-ci-runner-container.yaml]" 11 | pipelinesascode.tekton.dev/task-1: "[.tekton/tasks/deploy-cluster.yaml]" 12 | pipelinesascode.tekton.dev/task-2: "[.tekton/tasks/destroy-cluster.yaml]" 13 | pipelinesascode.tekton.dev/task-3: "[.tekton/tasks/generate-cluster-name.yaml]" 14 | pipelinesascode.tekton.dev/task-4: "[.tekton/tasks/deploy-pipeline-service.yaml]" 15 | pipelinesascode.tekton.dev/task-5: "[.tekton/tasks/test-pipeline-service.yaml]" 16 | pipelinesascode.tekton.dev/task-6: "[git-clone]" 17 | pipelinesascode.tekton.dev/max-keep-runs: "5" 18 | spec: 19 | pipelineRef: 20 | name: test-pipeline-service-upgrade 21 | params: 22 | - name: repo_url 23 | value: "{{ repo_url }}" 24 | - name: revision 25 | value: "{{ revision }}" 26 | - name: target_branch 27 | value: "{{ target_branch }}" 28 | - name: source_branch 29 | value: "{{ source_branch }}" 30 | timeouts: 31 | pipeline: "1h30m0s" 32 | workspaces: 33 | - name: shared-workspace 34 | volumeClaimTemplate: 35 | spec: 36 | accessModes: 37 | - ReadWriteOnce 38 | resources: 39 | requests: 40 | storage: 3Gi 41 | -------------------------------------------------------------------------------- /.tekton/pipeline/test-pipeline-service-deployment.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: tekton.dev/v1 3 | kind: Pipeline 4 | metadata: 5 | name: test-pipeline-service-deployment 6 | spec: 7 | params: 8 | - name: ocp_version 9 | - name: repo_url 10 | - name: revision 11 | - name: target_branch 12 | timeouts: 13 | finally: "0h30m0s" 14 | workspaces: 15 | - name: shared-workspace 16 | tasks: 17 | - name: clone-pipeline-service-repository 18 | taskRef: 19 | name: git-clone 20 | workspaces: 21 | - name: output 22 | workspace: shared-workspace 23 | params: 24 | - name: url 25 | value: $(params.repo_url) 26 | - name: revision 27 | value: $(params.revision) 28 | - name: subdirectory 29 | value: source 30 | - name: generate-cluster-name 31 | taskRef: 32 | name: generate-cluster-name 33 | - name: deploy-cluster 34 | taskRef: 35 | name: deploy-cluster 36 | runAfter: 37 | - "generate-cluster-name" 38 | - "clone-pipeline-service-repository" 39 | params: 40 | - name: cluster-name 41 | value: "$(tasks.generate-cluster-name.results.cluster-name)" 42 | - name: target_branch 43 | value: $(params.target_branch) 44 | - name: oc_version 45 | value: $(params.ocp_version) 46 | workspaces: 47 | - name: workdir 48 | workspace: shared-workspace 49 | - name: setup-ci-runner-container 50 | taskRef: 51 | name: setup-ci-runner-container 52 | runAfter: 53 | - "deploy-cluster" 54 | retries: 2 55 | params: 56 | - name: target_branch 57 | value: $(params.target_branch) 58 | workspaces: 59 | - name: workdir 60 | workspace: shared-workspace 61 | - name: deploy-pipeline-service 62 | taskRef: 63 | name: deploy-pipeline-service 64 | runAfter: 65 | - "setup-ci-runner-container" 66 | workspaces: 67 | - name: workdir 68 | workspace: shared-workspace 69 | params: 70 | - name: repo_url 71 | value: $(params.repo_url) 72 | - name: target_branch 73 | value: $(params.target_branch) 74 | - name: test_branch 75 | value: $(params.revision) 76 | - name: test-pipeline-service 77 | taskRef: 78 | name: test-pipeline-service 79 | runAfter: 80 | - "deploy-pipeline-service" 81 | params: 82 | - name: target_branch 83 | value: $(params.target_branch) 84 | workspaces: 85 | - name: workdir 86 | workspace: shared-workspace 87 | finally: 88 | - name: destroy-cluster 89 | taskRef: 90 | name: destroy-cluster 91 | when: 92 | - input: "$(tasks.deploy-cluster.status)" 93 | operator: notin 94 | values: ["None", "Failed"] 95 | params: 96 | - name: cluster-name 97 | value: "$(tasks.generate-cluster-name.results.cluster-name)" 98 | - name: target_branch 99 | value: $(params.target_branch) 100 | workspaces: 101 | - name: workdir 102 | workspace: shared-workspace 103 | -------------------------------------------------------------------------------- /.tekton/pipeline/test-rhtap-e2e.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: tekton.dev/v1 3 | kind: Pipeline 4 | metadata: 5 | name: test-rhtap-e2e 6 | spec: 7 | timeouts: 8 | finally: "0h30m0s" 9 | params: 10 | - name: repo_url 11 | - name: revision 12 | - name: repo_owner 13 | - name: target_branch 14 | workspaces: 15 | - name: workdir 16 | tasks: 17 | - name: clone-pipeline-service-repository 18 | taskRef: 19 | name: git-clone 20 | workspaces: 21 | - name: output 22 | workspace: workdir 23 | params: 24 | - name: url 25 | value: $(params.repo_url) 26 | - name: revision 27 | value: $(params.revision) 28 | - name: generate-cluster-name 29 | runAfter: 30 | - "clone-pipeline-service-repository" 31 | taskRef: 32 | name: generate-cluster-name 33 | - name: deploy-cluster 34 | runAfter: 35 | - "generate-cluster-name" 36 | params: 37 | - name: cluster-name 38 | value: "$(tasks.generate-cluster-name.results.cluster-name)" 39 | - name: target_branch 40 | value: $(params.target_branch) 41 | workspaces: 42 | - name: output 43 | workspace: shared-workspace 44 | taskRef: 45 | name: deploy-cluster 46 | - name: run-rhtap-e2e 47 | runAfter: 48 | - "deploy-cluster" 49 | workspaces: 50 | - name: workdir 51 | workspace: shared-workspace 52 | params: 53 | - name: repo_url 54 | value: $(params.repo_url) 55 | - name: revision 56 | value: $(params.revision) 57 | - name: repo_owner 58 | value: $(params.repo_owner) 59 | - name: target_branch 60 | value: $(params.target_branch) 61 | taskRef: 62 | name: run-rhtap-e2e 63 | finally: 64 | - name: destroy-cluster 65 | taskRef: 66 | name: destroy-cluster 67 | when: 68 | - input: "$(tasks.deploy-cluster.status)" 69 | operator: notin 70 | values: ["None"] 71 | params: 72 | - name: cluster-name 73 | value: "$(tasks.generate-cluster-name.results.cluster-name)" 74 | - name: target_branch 75 | value: $(params.target_branch) 76 | workspaces: 77 | - name: workdir 78 | workspace: shared-workspace 79 | -------------------------------------------------------------------------------- /.tekton/pipeline/update-repository.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: tekton.dev/v1 3 | kind: Pipeline 4 | metadata: 5 | name: update-repository 6 | spec: 7 | params: 8 | - name: repo_name 9 | - name: repo_owner 10 | - name: repo_url 11 | - name: source_branch 12 | workspaces: 13 | - name: basic-auth 14 | - name: workdir 15 | tasks: 16 | - name: update-binaries 17 | taskRef: 18 | name: update-repository 19 | params: 20 | - name: COMMIT_BRANCH 21 | value: "robot/$(params.source_branch)/update_binaries" 22 | - name: TARGET_GH_NAME 23 | value: $(params.repo_name) 24 | - name: TARGET_GH_OWNER 25 | value: $(params.repo_owner) 26 | - name: TARGET_GH_URL 27 | value: $(params.repo_url) 28 | - name: TARGET_BRANCH 29 | value: $(params.source_branch) 30 | - name: SCRIPT_IMAGE 31 | value: quay.io/redhat-pipeline-service/dependencies-update:$(params.source_branch) 32 | - name: SCRIPT_PATH 33 | value: ./developer/images/dependencies-update/hack/bin/update.sh 34 | - name: SCRIPT_ARGS 35 | value: 36 | - --task 37 | - update_binaries 38 | - --workspace_dir 39 | - "." 40 | workspaces: 41 | - name: basic-auth 42 | workspace: basic-auth 43 | - name: workdir 44 | workspace: workdir 45 | - name: update-images 46 | runAfter: 47 | - update-binaries 48 | taskRef: 49 | name: update-repository 50 | params: 51 | - name: COMMIT_BRANCH 52 | value: "robot/$(params.source_branch)/update_dockerfiles" 53 | - name: TARGET_GH_NAME 54 | value: $(params.repo_name) 55 | - name: TARGET_GH_OWNER 56 | value: $(params.repo_owner) 57 | - name: TARGET_GH_URL 58 | value: $(params.repo_url) 59 | - name: TARGET_BRANCH 60 | value: $(params.source_branch) 61 | - name: SCRIPT_IMAGE 62 | value: quay.io/redhat-pipeline-service/dependencies-update:$(params.source_branch) 63 | - name: SCRIPT_PATH 64 | value: ./developer/images/dependencies-update/hack/bin/update.sh 65 | - name: SCRIPT_ARGS 66 | value: 67 | - --task 68 | - update_dockerfiles_base_images_sha 69 | - --workspace_dir 70 | - "." 71 | workspaces: 72 | - name: basic-auth 73 | workspace: basic-auth 74 | - name: workdir 75 | workspace: workdir 76 | -------------------------------------------------------------------------------- /.tekton/tasks/deploy-cluster.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: tekton.dev/v1 3 | kind: Task 4 | metadata: 5 | name: deploy-cluster 6 | spec: 7 | params: 8 | - name: ocp_version 9 | description: ocp cluster version that you want to provision 10 | default: "4.14" 11 | - name: region 12 | description: ocp cluster region where you want to provision 13 | default: "us-east-1" 14 | - name: cluster-name 15 | - name: target_branch 16 | workspaces: 17 | - name: workdir 18 | steps: 19 | - name: install-cluster 20 | image: quay.io/redhat-pipeline-service/ci-runner:$(params.target_branch) 21 | imagePullPolicy: Always 22 | env: 23 | - name: CLUSTER_NAME 24 | value: "$(params.cluster-name)" 25 | - name: KUBECONFIG_DIR 26 | value: "$(workspaces.workdir.path)/.kube" 27 | - name: REGION 28 | value: "$(params.region)" 29 | - name: OCP_VERSION 30 | value: "$(params.ocp_version)" 31 | - name: ROSA_TOKEN 32 | valueFrom: 33 | secretKeyRef: 34 | name: plnsvc-ci-secret 35 | key: "PLNSVC_ROSA_TOKEN" 36 | - name: AWS_ACCESS_KEY_ID 37 | valueFrom: 38 | secretKeyRef: 39 | name: plnsvc-ci-secret 40 | key: "PLNSVC_AWS_KEY_ID" 41 | - name: AWS_SECRET_ACCESS_KEY 42 | valueFrom: 43 | secretKeyRef: 44 | name: plnsvc-ci-secret 45 | key: "PLNSVC_AWS_KEY" 46 | - name: AWS_ACCOUNT_ID 47 | valueFrom: 48 | secretKeyRef: 49 | name: plnsvc-ci-secret 50 | key: "PLNSVC_AWS_ACCOUNT_ID" 51 | workingDir: "$(workspaces.workdir.path)/source" 52 | command: 53 | - ci/images/ci-runner/hack/bin/deploy-cluster.sh 54 | -------------------------------------------------------------------------------- /.tekton/tasks/deploy-pipeline-service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: tekton.dev/v1 3 | kind: Task 4 | metadata: 5 | name: deploy-pipeline-service 6 | spec: 7 | params: 8 | - name: repo_url 9 | - name: target_branch 10 | - name: test_branch 11 | workspaces: 12 | - name: workdir 13 | steps: 14 | - name: run-plnsvc-setup 15 | image: quay.io/redhat-pipeline-service/ci-runner:$(params.target_branch) 16 | resources: 17 | requests: 18 | memory: 500Mi 19 | cpu: 300m 20 | workingDir: "$(workspaces.workdir.path)/source" 21 | env: 22 | - name: KUBECONFIG 23 | value: "$(workspaces.workdir.path)/.kube/config" 24 | - name: REPO_URL 25 | value: $(params.repo_url) 26 | - name: REPO_REVISION 27 | value: $(params.test_branch) 28 | command: 29 | - ci/images/ci-runner/hack/bin/run-plnsvc-setup.sh 30 | -------------------------------------------------------------------------------- /.tekton/tasks/destroy-cluster.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: tekton.dev/v1 3 | kind: Task 4 | metadata: 5 | name: destroy-cluster 6 | spec: 7 | params: 8 | - name: cluster-name 9 | - name: region 10 | description: ocp cluster region where you want to provision 11 | default: "us-east-1" 12 | - name: target_branch 13 | workspaces: 14 | - name: workdir 15 | steps: 16 | - name: destroy 17 | image: quay.io/redhat-pipeline-service/ci-runner:$(params.target_branch) 18 | env: 19 | - name: CLUSTER_NAME 20 | value: "$(params.cluster-name)" 21 | - name: ROSA_TOKEN 22 | valueFrom: 23 | secretKeyRef: 24 | name: plnsvc-ci-secret 25 | key: "PLNSVC_ROSA_TOKEN" 26 | - name: AWS_ACCESS_KEY_ID 27 | valueFrom: 28 | secretKeyRef: 29 | name: plnsvc-ci-secret 30 | key: "PLNSVC_AWS_KEY_ID" 31 | - name: AWS_SECRET_ACCESS_KEY 32 | valueFrom: 33 | secretKeyRef: 34 | name: plnsvc-ci-secret 35 | key: "PLNSVC_AWS_KEY" 36 | - name: KUBECONFIG_DIR 37 | value: "$(workspaces.workdir.path)/.kube" 38 | - name: REGION 39 | value: "$(params.region)" 40 | workingDir: "$(workspaces.workdir.path)/source" 41 | command: 42 | - ci/images/ci-runner/hack/bin/destroy-cluster.sh 43 | -------------------------------------------------------------------------------- /.tekton/tasks/destroy-existing-cluster.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: tekton.dev/v1 3 | kind: Task 4 | metadata: 5 | name: destroy-existing-cluster 6 | spec: 7 | params: 8 | - name: target_branch 9 | - name: region 10 | description: ocp cluster region where you want to provision 11 | default: us-east-1 12 | workspaces: 13 | - name: workdir 14 | steps: 15 | - name: destroy 16 | image: quay.io/redhat-pipeline-service/ci-runner:$(params.target_branch) 17 | env: 18 | - name: ROSA_TOKEN 19 | valueFrom: 20 | secretKeyRef: 21 | name: plnsvc-ci-secret 22 | key: "PLNSVC_ROSA_TOKEN" 23 | - name: AWS_ACCESS_KEY_ID 24 | valueFrom: 25 | secretKeyRef: 26 | name: plnsvc-ci-secret 27 | key: "PLNSVC_AWS_KEY_ID" 28 | - name: AWS_SECRET_ACCESS_KEY 29 | valueFrom: 30 | secretKeyRef: 31 | name: plnsvc-ci-secret 32 | key: "PLNSVC_AWS_KEY" 33 | - name: REGION 34 | value: "$(params.region)" 35 | workingDir: "$(workspaces.workdir.path)/source" 36 | command: 37 | - ci/images/ci-runner/hack/bin/cleanup-expired-clusters.sh 38 | -------------------------------------------------------------------------------- /.tekton/tasks/generate-cluster-name.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: tekton.dev/v1 3 | kind: Task 4 | metadata: 5 | name: generate-cluster-name 6 | spec: 7 | results: 8 | - name: cluster-name 9 | description: Openshift cluster name 10 | steps: 11 | - name: generate-cluster-name 12 | image: registry.access.redhat.com/ubi9/openssl:9.1-2 13 | script: | 14 | #!/usr/bin/env bash 15 | set -o errexit 16 | set -o nounset 17 | set -o pipefail 18 | set -x 19 | CLUSTER_NAME="ci-$( openssl rand -hex 5 )" 20 | echo -n "$CLUSTER_NAME" | tee $(results.cluster-name.path) 21 | -------------------------------------------------------------------------------- /.tekton/tasks/run-rhtap-e2e.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: tekton.dev/v1 3 | kind: Task 4 | metadata: 5 | name: run-rhtap-e2e 6 | spec: 7 | workspaces: 8 | - name: workdir 9 | params: 10 | - name: repo_url 11 | - name: revision 12 | - name: repo_owner 13 | - name: target_branch 14 | kind: ClusterTask 15 | steps: 16 | - name: run-e2e-test 17 | image: quay.io/redhat-pipeline-service/e2e-test-runner:$(params.target_branch) 18 | resources: 19 | limits: 20 | memory: 4Gi 21 | requests: 22 | memory: 500Mi 23 | cpu: 300m 24 | env: 25 | - name: KUBECONFIG 26 | value: "$(workspaces.workdir.path)/kubeconfig" 27 | - name: PIPELINE_PR_OWNER 28 | value: $(params.repo_owner) 29 | - name: PIPELINE_PR_SHA 30 | value: $(params.revision) 31 | - name: BW_CLIENTID 32 | valueFrom: 33 | secretKeyRef: 34 | name: hypershift-bitwarden 35 | key: "BW_CLIENTID" 36 | - name: BW_CLIENTSECRET 37 | valueFrom: 38 | secretKeyRef: 39 | name: hypershift-bitwarden 40 | key: "BW_CLIENTSECRET" 41 | - name: BW_PASSWORD 42 | valueFrom: 43 | secretKeyRef: 44 | name: hypershift-bitwarden 45 | key: "BW_PASSWORD" 46 | workingDir: "$(workspaces.workdir.path)/source" 47 | command: 48 | - ci/images/e2e-test-runner/run-e2e-test.sh 49 | -------------------------------------------------------------------------------- /.tekton/tasks/setup-ci-runner-container.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: tekton.dev/v1 3 | kind: Task 4 | metadata: 5 | name: setup-ci-runner-container 6 | spec: 7 | params: 8 | - name: target_branch 9 | workspaces: 10 | - name: workdir 11 | steps: 12 | - name: create-ci-runner-container 13 | image: quay.io/redhat-pipeline-service/ci-runner:$(params.target_branch) 14 | resources: 15 | requests: 16 | memory: 500Mi 17 | cpu: 300m 18 | env: 19 | - name: KUBECONFIG 20 | value: "$(workspaces.workdir.path)/.kube/config" 21 | workingDir: "$(workspaces.workdir.path)/source" 22 | command: 23 | - ci/images/ci-runner/hack/bin/create-ci-runner-container.sh 24 | - name: copy-plnsvc-code 25 | image: quay.io/redhat-pipeline-service/ci-runner:$(params.target_branch) 26 | resources: 27 | requests: 28 | memory: 500Mi 29 | cpu: 300m 30 | env: 31 | - name: KUBECONFIG 32 | value: "$(workspaces.workdir.path)/.kube/config" 33 | workingDir: "$(workspaces.workdir.path)/source" 34 | command: 35 | - ci/images/ci-runner/hack/bin/copy-plnsvc-code.sh 36 | -------------------------------------------------------------------------------- /.tekton/tasks/test-pipeline-service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: tekton.dev/v1 3 | kind: Task 4 | metadata: 5 | name: test-pipeline-service 6 | spec: 7 | params: 8 | - name: target_branch 9 | workspaces: 10 | - name: workdir 11 | steps: 12 | - name: run-tests 13 | image: quay.io/redhat-pipeline-service/ci-runner:$(params.target_branch) 14 | resources: 15 | requests: 16 | memory: 500Mi 17 | cpu: 300m 18 | workingDir: "$(workspaces.workdir.path)/source" 19 | env: 20 | - name: KUBECONFIG 21 | value: "$(workspaces.workdir.path)/.kube/config" 22 | command: 23 | - ci/images/ci-runner/hack/bin/run-plnsvc-test.sh 24 | -------------------------------------------------------------------------------- /.tekton/test-docker-images-build.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: tekton.dev/v1 3 | kind: PipelineRun 4 | metadata: 5 | name: test-docker-images-build 6 | annotations: 7 | pipelinesascode.tekton.dev/on-event: "[pull_request, push]" 8 | pipelinesascode.tekton.dev/on-target-branch: "[refs/heads/*]" 9 | pipelinesascode.tekton.dev/task: "[git-clone, buildah]" 10 | pipelinesascode.tekton.dev/max-keep-runs: "5" 11 | spec: 12 | timeouts: 13 | pipeline: "1h0m0s" 14 | params: 15 | - name: repo_url 16 | value: "{{ repo_url }}" 17 | - name: revision 18 | value: "{{ revision }}" 19 | pipelineSpec: 20 | params: 21 | - name: repo_url 22 | - name: revision 23 | workspaces: 24 | - name: shared-workspace 25 | tasks: 26 | - name: fetch-repository 27 | taskRef: 28 | name: git-clone 29 | workspaces: 30 | - name: output 31 | workspace: shared-workspace 32 | params: 33 | - name: url 34 | value: $(params.repo_url) 35 | - name: revision 36 | value: $(params.revision) 37 | - name: subdirectory 38 | value: source 39 | - name: build-images 40 | runAfter: 41 | - fetch-repository 42 | workspaces: 43 | - name: workdir 44 | workspace: shared-workspace 45 | taskSpec: 46 | workspaces: 47 | - name: workdir 48 | volumes: 49 | - emptyDir: {} 50 | name: varlibcontainers 51 | steps: 52 | - name: test-build-images 53 | image: registry.redhat.io/ubi9/buildah@sha256:32dba51af7790d4f067ff0bc37e46a2f583f093106176a4e48573623d144a9dc 54 | imagePullPolicy: Always 55 | securityContext: 56 | capabilities: 57 | add: 58 | - SETFCAP 59 | volumeMounts: 60 | - mountPath: /var/lib/containers 61 | name: varlibcontainers 62 | workingDir: $(workspaces.workdir.path)/source 63 | script: | 64 | developer/hack/build-images-buildah.sh --delete 65 | workspaces: 66 | - name: shared-workspace 67 | volumeClaimTemplate: 68 | spec: 69 | accessModes: 70 | - ReadWriteOnce 71 | resources: 72 | requests: 73 | storage: 1Gi 74 | -------------------------------------------------------------------------------- /.tekton/test-pipeline-service-deployment-ocp-414.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: tekton.dev/v1 3 | kind: PipelineRun 4 | metadata: 5 | name: test-pipeline-service-deployment-ocp-414 6 | annotations: 7 | pipelinesascode.tekton.dev/on-event: "[pull_request, push]" 8 | pipelinesascode.tekton.dev/on-target-branch: "[refs/heads/*]" 9 | pipelinesascode.tekton.dev/pipeline: "[.tekton/pipeline/test-pipeline-service-deployment.yaml]" 10 | pipelinesascode.tekton.dev/task: "[.tekton/tasks/setup-ci-runner-container.yaml]" 11 | pipelinesascode.tekton.dev/task-1: "[.tekton/tasks/deploy-cluster.yaml]" 12 | pipelinesascode.tekton.dev/task-2: "[.tekton/tasks/destroy-cluster.yaml]" 13 | pipelinesascode.tekton.dev/task-3: "[.tekton/tasks/generate-cluster-name.yaml]" 14 | pipelinesascode.tekton.dev/task-4: "[.tekton/tasks/deploy-pipeline-service.yaml]" 15 | pipelinesascode.tekton.dev/task-5: "[.tekton/tasks/test-pipeline-service.yaml]" 16 | pipelinesascode.tekton.dev/task-6: "[git-clone]" 17 | pipelinesascode.tekton.dev/max-keep-runs: "5" 18 | spec: 19 | pipelineRef: 20 | name: test-pipeline-service-deployment 21 | params: 22 | - name: ocp_version 23 | value: "4.14" 24 | - name: repo_url 25 | value: "{{ repo_url }}" 26 | - name: revision 27 | value: "{{ revision }}" 28 | - name: target_branch 29 | value: "{{ target_branch }}" 30 | timeouts: 31 | pipeline: "1h30m0s" 32 | workspaces: 33 | - name: shared-workspace 34 | volumeClaimTemplate: 35 | spec: 36 | accessModes: 37 | - ReadWriteOnce 38 | resources: 39 | requests: 40 | storage: 3Gi 41 | -------------------------------------------------------------------------------- /.tekton/test-rhtap-e2e.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: tekton.dev/v1 3 | kind: PipelineRun 4 | metadata: 5 | name: test-rhtap-e2e 6 | annotations: 7 | pipelinesascode.tekton.dev/on-event: "[push]" 8 | pipelinesascode.tekton.dev/on-target-branch: "main" 9 | pipelinesascode.tekton.dev/pipeline: "[.tekton/pipeline/test-rhtap-e2e.yaml]" 10 | pipelinesascode.tekton.dev/task-1: "[.tekton/tasks/deploy-cluster.yaml]" 11 | pipelinesascode.tekton.dev/task-2: "[.tekton/tasks/destroy-cluster.yaml]" 12 | pipelinesascode.tekton.dev/task-3: "[.tekton/tasks/generate-cluster-name.yaml]" 13 | pipelinesascode.tekton.dev/task-4: "[.tekton/tasks/run-rhtap-e2e.yaml]" 14 | pipelinesascode.tekton.dev/task-5: "[git-clone]" 15 | pipelinesascode.tekton.dev/max-keep-runs: "5" 16 | spec: 17 | pipelineRef: 18 | name: test-rhtap-e2e 19 | params: 20 | - name: ocp_version 21 | value: "4.12.36" 22 | - name: repo_url 23 | value: "{{ repo_url }}" 24 | - name: revision 25 | value: "{{ revision }}" 26 | - name: repo_owner 27 | value: "{{ repo_owner }}" 28 | - name: target_branch 29 | value: "{{ target_branch }}" 30 | timeouts: 31 | pipeline: "1h30m0s" 32 | workspaces: 33 | - name: shared-workspace 34 | volumeClaimTemplate: 35 | spec: 36 | accessModes: 37 | - ReadWriteOnce 38 | resources: 39 | requests: 40 | storage: 3Gi 41 | -------------------------------------------------------------------------------- /.tekton/update-dependencies.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: tekton.dev/v1 3 | kind: PipelineRun 4 | metadata: 5 | annotations: 6 | pipelinesascode.tekton.dev/on-event: "[push]" 7 | pipelinesascode.tekton.dev/on-target-branch: "[main]" 8 | pipelinesascode.tekton.dev/pipeline: "[.tekton/pipeline/update-repository.yaml]" 9 | pipelinesascode.tekton.dev/task-1: "[.tekton/tasks/update-repository.yaml]" 10 | pipelinesascode.tekton.dev/max-keep-runs: "5" 11 | name: update-pipeline-service-dependencies 12 | spec: 13 | timeouts: 14 | pipeline: "0h20m0s" 15 | tasks: "0h5m0s" 16 | pipelineRef: 17 | name: update-repository 18 | params: 19 | - name: repo_name 20 | value: "{{ repo_name }}" 21 | - name: repo_owner 22 | value: "{{ repo_owner }}" 23 | - name: repo_url 24 | value: "{{ repo_url }}" 25 | - name: source_branch 26 | value: "{{ source_branch }}" 27 | workspaces: 28 | - name: basic-auth 29 | workspace: basic-auth 30 | secret: 31 | secretName: "{{ git_auth_secret }}" 32 | - name: workdir 33 | volumeClaimTemplate: 34 | spec: 35 | accessModes: 36 | - ReadWriteOnce 37 | resources: 38 | requests: 39 | storage: 1Gi 40 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to the project 2 | 3 | ## Development environment 4 | 5 | You can see the required dependencies in [DEPENDENCIES.md](./DEPENDENCIES.md). 6 | 7 | You have various options to setup your environment: 8 | * local environment: the [install.sh](developer/images/devenv/install.sh) can be used to 9 | install all dependencies but be careful as it will impact your system configuration by 10 | installing/configuring packages locally. 11 | * running in a container: use [run.sh](developer/images/devenv/run.sh) to spawn a container 12 | and be dropped in a shell. 13 | * VS Code in a container: you can use the `Remote Development` extension 14 | (`ms-vscode-remote.vscode-remote-extensionpack`), and VS Code will use the content of 15 | [.devcontainer](.devcontainer) to spawn a container and drop you in the development 16 | environment. This will require an action on your side when opening the project, so look 17 | out for the `Reopen in container` notification. 18 | 19 | ## PR process 20 | 21 | * When you open a PR, add a few reviewers. If the PR solves a GitHub issue, make sure 22 | that the contributor who opened the issue is one of the reviewers. 23 | * If you do not see any progress on the PR over a couple of days, feel free to put a 24 | comment tagging one or two users asking them to take the time to review your change. 25 | * The reviewer will perform the code review. If they are satisfied with the change, and 26 | they feel like the change does not require a second pair of eyes, they will merge the 27 | PR. 28 | * The PR may be large enough or import enough to require a second opinion. In that case, 29 | the 1st reviewer will put a comment asking for an additional review. In that case, the 30 | last reviewer to approve the PR is responsible for merging it. 31 | -------------------------------------------------------------------------------- /OWNERS: -------------------------------------------------------------------------------- 1 | reviewers: 2 | - adambkaplan 3 | - avinal 4 | - enarha 5 | - gabemontero 6 | - ramessesii2 7 | - Roming22 8 | - sayan-biswas 9 | - xinredhat 10 | 11 | approvers: 12 | - adambkaplan 13 | - avinal 14 | - enarha 15 | - gabemontero 16 | - ramessesii2 17 | - Roming22 18 | - sayan-biswas 19 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Pipeline Service 2 | 3 | Pipeline Service provides a SaaS for pipelines. It leverages: 4 | 5 | - Kubernetes / OpenShift for the compute 6 | - Tekton Pipelines, Results and friends for the core of the service 7 | - OpenShift GitOps / Argo CD, Pipelines as Code for managing the infrastructure 8 | 9 | 10 | Tekton and Kubernetes provide a great infrastructure for building pipelines. They come however with some limitations. 11 | 12 | - Multi-tenancy: Kubernetes provides a level of multi-tenancy. However, this does not extend to cluster scoped resources. CustomResourceDefinitions (CRD) are extensively used for extending the Kubernetes API, following [the operator pattern](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/). CRDs are cluster scoped. This induces a coupling between the operator version provided by the platform and what you can use as a tenant. The control plane is also shared between tenants. 13 | - Scalability: Kubernetes has made it easy to distribute the load onto many servers and scalability at cloud scale more approachable. Like everything, its control plane has however its limits. 14 | - Availability and geo-redundancy: Kubernetes control plane is based on an etcd cluster, which is sensible to latency between its members. This restricts what can be done in terms of geographical distribution. 15 | 16 | Work is in progress in order to solve these challenges. 17 | **KCP related work was discontinued and can be found in [kcp](https://github.com/openshift-pipelines/pipeline-service/tree/kcp) branch** 18 | 19 | ## How do I start? 20 | 21 | ### Running in Kubernetes or OpenShift 22 | 23 | You can deploy Pipeline Service on your OpenShift cluster with the [dev_setup.sh](./developer/openshift/dev_setup.sh) script in developer folder. 24 | 25 | More info [here](./developer/openshift/README.md). 26 | 27 | ## Running locally 28 | 29 | See [the development guide](./developer/docs/DEVELOPMENT.md) for instructions on how to set up a local development environment. 30 | -------------------------------------------------------------------------------- /ci/README.md: -------------------------------------------------------------------------------- 1 | # Documentation for continuous integration 2 | 3 | This folder holds all the files required for the Continuous Integration (CI) process. 4 | This process help with testing and delivering the service, and is responsible for 5 | the quality of the deliverables. 6 | 7 | See the [continuous integration documentation](./docs/) for more information. 8 | -------------------------------------------------------------------------------- /ci/docs/static_checks.md: -------------------------------------------------------------------------------- 1 | # Static checking 2 | 3 | Various tools are run against the repository to validate formatting, detect errors and 4 | conformance to known best practices. 5 | 6 | Those tools are run as part of the CI but can be run locally by the developer before 7 | submitting a change. 8 | -------------------------------------------------------------------------------- /ci/hack/plnsvc_upgrade_tests.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | #quit if exit status of any cmd is a non-zero value 4 | set -o errexit 5 | set -o nounset 6 | set -o pipefail 7 | 8 | usage() { 9 | echo " 10 | Usage: 11 | $0 [options] 12 | 13 | Run Pipeline Service upgrade tests on the cluster referenced by KUBECONFIG. 14 | 15 | Using the 'main' branch as the baseline, it will deploy Pipeline Service, 16 | upgrade to your current branch, and downgrade back to 'main', testing the 17 | service at every step along the way. 18 | 19 | Optional arguments: 20 | -k, --kubeconfig KUBECONFIG 21 | kubeconfig to the cluster to test. 22 | The current context will be used. 23 | Default value: \$KUBECONFIG" 24 | # -f, --from VERSION 25 | # Branch, SHA or tag of the base version. 26 | # Default: main. 27 | # -t, --to VERSION 28 | # Branch, SHA or tag of the new version. 29 | # Default: Current commit. 30 | echo "\ 31 | -d, --debug 32 | Activate tracing/debug mode. 33 | -h, --help 34 | Display this message. 35 | 36 | Example: 37 | $0 --kubeconfig mykubeconfig.yaml 38 | " 39 | } 40 | 41 | parse_args() { 42 | KUBECONFIG="${KUBECONFIG:-$HOME/.kube/config}" 43 | FROM_VERSION="main" 44 | TO_VERSION=$(git branch --show-current) 45 | while [[ $# -gt 0 ]]; do 46 | case $1 in 47 | -k | --kubeconfig) 48 | shift 49 | KUBECONFIG="$1" 50 | ;; 51 | # -f | --from) 52 | # shift 53 | # FROM_VERSION="$1" 54 | # ;; 55 | # -t | --to) 56 | # shift 57 | # TO_VERSION="$1" 58 | # ;; 59 | -d | --debug) 60 | DEBUG="--debug" 61 | set -x 62 | ;; 63 | -h | --help) 64 | usage 65 | exit 0 66 | ;; 67 | *) 68 | echo "Unknown argument: $1" 69 | usage 70 | exit 1 71 | ;; 72 | esac 73 | shift 74 | done 75 | DEBUG="${DEBUG:-}" 76 | } 77 | 78 | init() { 79 | SCRIPT_DIR=$( 80 | cd "$(dirname "$0")" >/dev/null 81 | pwd 82 | ) 83 | PROJECT_DIR=$( 84 | cd "$SCRIPT_DIR/../.." >/dev/null 85 | pwd 86 | ) 87 | export KUBECONFIG 88 | } 89 | 90 | run_for(){ 91 | OPTS="" 92 | case "$1" in 93 | from) 94 | VERSION="$FROM_VERSION" 95 | ;; 96 | to) 97 | VERSION="$TO_VERSION" 98 | OPTS="--use-current-branch" 99 | ;; 100 | esac 101 | echo 102 | git checkout "$VERSION" 103 | echo "[Deploying $VERSION]" 104 | # shellcheck disable=SC2086 105 | "$PROJECT_DIR/developer/openshift/dev_setup.sh" $OPTS $DEBUG 106 | 107 | echo 108 | echo "[Testing $VERSION]" 109 | # shellcheck disable=SC2086 110 | "$PROJECT_DIR/operator/test/test.sh" $DEBUG 111 | } 112 | 113 | on_exit(){ 114 | git checkout -f "$TO_VERSION" 115 | [ "$STASH" == "1" ] || git stash pop 116 | } 117 | 118 | main() { 119 | parse_args "$@" 120 | init 121 | 122 | trap on_exit EXIT 123 | 124 | STASH="$(git stash | grep -c "No local changes to save" || true)" 125 | 126 | run_for "from" 127 | run_for "to" 128 | run_for "from" 129 | } 130 | 131 | if [ "${BASH_SOURCE[0]}" == "$0" ]; then 132 | main "$@" 133 | fi 134 | -------------------------------------------------------------------------------- /ci/hack/rosa_cluster_provision.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | #quit if exit status of any cmd is a non-zero value 4 | set -o errexit 5 | set -o nounset 6 | set -o pipefail 7 | 8 | usage() { 9 | echo " 10 | Usage: 11 | ${0##*/} ./rosa_cluster_provision.sh [options] 12 | 13 | Provision a ROSA cluster 14 | 15 | Mandatory arguments: 16 | -t, --token 17 | API token to authenticate against the Red Hat OpenShift Service on AWS account. 18 | --id 19 | AWS access key id. 20 | --key 21 | AWS secret access key. 22 | -r, --region 23 | AWS region name. 24 | -n, --name 25 | name of the cluster. 26 | 27 | Optional arguments: 28 | -d, --debug 29 | Activate tracing/debug mode. 30 | -h, --help 31 | Display this message. 32 | Example: 33 | ${0##*/} ./rosa_cluster_provision.sh --token --region us-west-2 34 | " >&2 35 | 36 | } 37 | 38 | parse_args() { 39 | while [[ $# -gt 0 ]]; do 40 | case $1 in 41 | -t | --token) 42 | shift 43 | export ROSA_TOKEN="$1" 44 | ;; 45 | --id) 46 | shift 47 | export AWS_ACCESS_KEY_ID="$1" 48 | ;; 49 | --key) 50 | shift 51 | export AWS_SECRET_ACCESS_KEY="$1" 52 | ;; 53 | -r | --region) 54 | shift 55 | export AWS_REGION="$1" 56 | ;; 57 | -n | --name) 58 | shift 59 | export CLUSTER_NAME="$1" 60 | ;; 61 | -d | --debug) 62 | set -x 63 | ;; 64 | -h | --help) 65 | usage 66 | exit 0 67 | ;; 68 | *) 69 | echo "Unknown argument: $1" 70 | usage 71 | exit 1 72 | ;; 73 | esac 74 | shift 75 | done 76 | } 77 | 78 | prechecks() { 79 | if [[ -z "$ROSA_TOKEN" ]]; then 80 | printf "ROSA API authentication token is not set\n\n" 81 | usage 82 | exit 1 83 | fi 84 | if [[ -z "$AWS_ACCESS_KEY_ID" ]]; then 85 | printf "AWS access key id is not set\n\n" 86 | usage 87 | exit 1 88 | fi 89 | if [[ -z "$AWS_SECRET_ACCESS_KEY" ]]; then 90 | printf "AWS secret access key is not set\n\n" 91 | usage 92 | exit 1 93 | fi 94 | if [[ -z "$AWS_REGION" ]]; then 95 | printf "AWS region is not set\n\n" 96 | usage 97 | exit 1 98 | fi 99 | if [[ -z "$CLUSTER_NAME" ]]; then 100 | printf "Cluster name is not set\n\n" 101 | usage 102 | exit 1 103 | fi 104 | } 105 | 106 | init() { 107 | tmpdir=$(mktemp -d) 108 | cd "$tmpdir" 109 | } 110 | 111 | provision_rosa_cluster() { 112 | # This repo has the ROSA provision and destroy cluster scripts 113 | git clone --branch main git@github.com:stolostron/bootstrap-ks.git 114 | cd bootstrap-ks/rosa 115 | git checkout 1200f8b7 116 | ./install.sh 117 | ./provision.sh 118 | } 119 | 120 | main() { 121 | init 122 | parse_args "$@" 123 | prechecks 124 | provision_rosa_cluster 125 | } 126 | 127 | if [ "${BASH_SOURCE[0]}" == "$0" ]; then 128 | main "$@" 129 | fi -------------------------------------------------------------------------------- /ci/images/ci-runner/Dockerfile: -------------------------------------------------------------------------------- 1 | #@FROM registry.access.redhat.com/ubi9/ubi-minimal 2 | FROM registry.access.redhat.com/ubi9/ubi-minimal@sha256:a7d837b00520a32502ada85ae339e33510cdfdbc8d2ddf460cc838e12ec5fa5a 3 | RUN set -x \ 4 | && mkdir ~/.kube \ 5 | && mkdir -p /tmp/image-build \ 6 | && microdnf install -y \ 7 | gettext-0.21 \ 8 | git-2.43.5 \ 9 | findutils-1:4.8.0 \ 10 | openssl-1:3.0.7 \ 11 | podman-4:4.9.4 \ 12 | procps-ng-3.3.17 \ 13 | python3-pip-21.2.3 \ 14 | rsync-3.2.3 \ 15 | unzip-6.0 \ 16 | tar-2:1.34 \ 17 | xz-5.2.5 \ 18 | && microdnf clean all 19 | COPY shared /tmp/image-build/shared 20 | RUN /tmp/image-build/shared/hack/install.sh --debug --bin argocd,aws,bitwarden,go,jq,kubectl,oc,rosa,terraform,tkn,yq \ 21 | && rm -rf /tmp/image-build 22 | WORKDIR "/source" 23 | -------------------------------------------------------------------------------- /ci/images/ci-runner/hack/bin/cleanup-expired-clusters.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -o errexit 3 | set -o nounset 4 | set -o pipefail 5 | 6 | SCRIPT_DIR="$( 7 | cd "$(dirname "$0")" >/dev/null 8 | pwd 9 | )" 10 | 11 | # shellcheck source=ci/images/ci-runner/hack/bin/utils.sh 12 | source "$SCRIPT_DIR/utils.sh" 13 | 14 | EXCLUDE_CLUSTER=(local-cluster sprayproxy-qe1) 15 | 16 | is_cluster_expired() { 17 | cluster_name=$1 18 | 19 | if ! rosa describe cluster --region "$REGION" --cluster="$cluster_name" -o json | \ 20 | jq -e 'select((.creation_timestamp | fromdateiso8601) < (now - 7200))' >/dev/null; then 21 | echo "false" 22 | fi 23 | echo "true" 24 | } 25 | 26 | destroy_expired_clusters() { 27 | expired_clusters=() 28 | mapfile -t clusters < <(rosa list clusters --region "$REGION" | grep "ready" | awk '{print $2}') 29 | 30 | echo "[$(date +"%Y/%m/%d %H:%M:%S")] Cluster count: ${#clusters[@]}" 31 | for cluster in "${clusters[@]}"; do 32 | if [[ "$cluster" =~ ^debug- || "${EXCLUDE_CLUSTER[*]}" =~ $cluster ]]; then 33 | continue 34 | fi 35 | 36 | is_expired=$(is_cluster_expired "$cluster") 37 | 38 | if [[ "$is_expired" == "true" ]]; then 39 | expired_clusters+=( "$cluster" ) 40 | fi 41 | done 42 | 43 | echo "[$(date +"%Y/%m/%d %H:%M:%S")] Expired cluster count: ${#expired_clusters[@]}" 44 | count=0 45 | for cluster in "${expired_clusters[@]}"; do 46 | count=$(( count + 1 )) 47 | echo "Destroying $cluster [$count/${#expired_clusters[@]}]" 48 | export CLUSTER_NAME="$cluster" 49 | "$SCRIPT_DIR"/destroy-cluster.sh 50 | done 51 | } 52 | 53 | setx_off 54 | rosa login --token="$ROSA_TOKEN" 55 | setx_on 56 | destroy_expired_clusters 57 | -------------------------------------------------------------------------------- /ci/images/ci-runner/hack/bin/copy-plnsvc-code.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -o errexit 3 | set -o nounset 4 | set -o pipefail 5 | set -x 6 | 7 | echo "Copy source code of pipeline service to the ci-runner container" 8 | for _ in {1..10}; do 9 | if kubectl cp "./" "default/ci-runner:/workspace/source"; then 10 | break 11 | fi 12 | echo "Failed to copy source code of pipeline service to the ci-runner container, retrying ..." 13 | sleep 5 14 | done 15 | 16 | echo "Copy sidecar sources to the ci-runner container" 17 | for _ in {1..10}; do 18 | if kubectl cp "./ci/images/ci-runner/hack/sidecar" "default/ci-runner:/workspace/sidecar"; then 19 | break 20 | fi 21 | echo "Failed to copy sidecar sources to the ci-runner container, retrying ..." 22 | sleep 5 23 | done 24 | 25 | echo "Copy new cluster's kubeconfig to the ci-runner container" 26 | for _ in {1..10}; do 27 | if kubectl cp "$KUBECONFIG" "default/ci-runner:/kubeconfig"; then 28 | break 29 | fi 30 | echo "Failed to copy cluster's kubeconfig to the ci-runner container, retrying ..." 31 | sleep 5 32 | done 33 | -------------------------------------------------------------------------------- /ci/images/ci-runner/hack/bin/create-ci-runner-container.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -o errexit 3 | set -o nounset 4 | set -o pipefail 5 | set -x 6 | 7 | MANIFEST_DIR=$( 8 | cd "$(dirname "$0")/../manifests"; 9 | pwd; 10 | ) 11 | kubectl -n default apply -k "$MANIFEST_DIR/sidecar" 12 | 13 | if ! kubectl -n default wait pod/ci-runner --for=condition=Ready --timeout=180s; then 14 | echo "ci-runner is not ready" >&2 15 | kubectl -n default describe pod/ci-runner >&2 16 | exit 1 17 | fi 18 | 19 | echo 20 | echo "KUBECONFIG:" 21 | cat "$KUBECONFIG" 22 | echo 23 | echo "Connect to the ci-runner with: kubectl exec -n default --stdin --tty ci-runner -- bash" 24 | echo 25 | -------------------------------------------------------------------------------- /ci/images/ci-runner/hack/bin/destroy-cluster.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -o errexit 3 | set -o nounset 4 | set -o pipefail 5 | set -x 6 | 7 | SCRIPT_DIR="$( 8 | cd "$(dirname "$0")" >/dev/null 9 | pwd 10 | )" 11 | 12 | # Give developers 15mins to connect to a pod and remove the file 13 | # if they want to investigate the failure 14 | failure_file="$PWD/destroy-cluster.txt" 15 | if [ -e "$failure_file" ]; then 16 | echo "Failure detected." 17 | echo "Delete '$failure_file' within 15 minutes to keep the cluster alive for investigation." 18 | echo 19 | echo "KUBECONFIG:" 20 | cat "$KUBECONFIG_DIR/config" 21 | echo 22 | echo "Connect to the ci-runner with: kubectl exec -n default --stdin --tty ci-runner -- bash" 23 | echo 24 | 25 | sleep 900 26 | if [ -e "$failure_file" ]; then 27 | echo "Failure is not being investigated, cluster will be destroyed." 28 | else 29 | echo "Failure under investigation, cluster will not be destroyed." 30 | exit 1 31 | fi 32 | fi 33 | 34 | # shellcheck source=ci/images/ci-runner/hack/bin/utils.sh 35 | source "$SCRIPT_DIR/utils.sh" 36 | 37 | if [[ -n "$CLUSTER_NAME" ]]; then 38 | echo "[$(date +"%Y/%m/%d %H:%M:%S")] Started to destroy cluster [$CLUSTER_NAME]..." 39 | 40 | printf "Log in to your Red Hat account...\n" | indent 2 41 | setx_off 42 | rosa login --token="$ROSA_TOKEN" 43 | setx_on 44 | 45 | # Here we just trigger the deletion of the cluster, we won't wait for it to be completely deleted 46 | rosa delete cluster --region "$REGION" --cluster="$CLUSTER_NAME" -y 47 | 48 | else 49 | echo "No OCP cluster need to be destroyed." 50 | fi 51 | 52 | echo "[$(date +"%Y/%m/%d %H:%M:%S")] Done" 53 | -------------------------------------------------------------------------------- /ci/images/ci-runner/hack/bin/run-plnsvc-setup.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -o errexit 3 | set -o nounset 4 | set -o pipefail 5 | set -x 6 | 7 | # Create a file that will prevent the cluster deletion in case tests are failing 8 | touch "$PWD/destroy-cluster.txt" 9 | 10 | echo "Execute dev_setup.sh script to set up pipeline-service ..." 11 | # if the following comand fail, it retries 3 times with 5 seconds sleep 12 | for _ in {1..3}; do 13 | if kubectl -n default exec pod/ci-runner -- \ 14 | sh -c "/workspace/sidecar/bin/plnsvc_setup.sh $REPO_URL $REPO_REVISION"; then 15 | break 16 | fi 17 | echo "Failed to execute dev_setup.sh script, retrying ..." 18 | sleep 5 19 | done 20 | 21 | -------------------------------------------------------------------------------- /ci/images/ci-runner/hack/bin/run-plnsvc-test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -o errexit 3 | set -o nounset 4 | set -o pipefail 5 | set -x 6 | 7 | echo "Run pipeline-service tests..." 8 | if kubectl -n default exec pod/ci-runner -- \ 9 | sh -c "/workspace/sidecar/bin/plnsvc_test.sh"; then 10 | 11 | # In case the user deleted the file early when they expected a failure 12 | if [ -e "$PWD/destroy-cluster.txt" ]; then 13 | # If the tests are successful, the cluster can be destroyed right away 14 | rm "$PWD/destroy-cluster.txt" 15 | fi 16 | else 17 | exit 1 18 | fi 19 | -------------------------------------------------------------------------------- /ci/images/ci-runner/hack/manifests/sidecar/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - pod.yaml 6 | -------------------------------------------------------------------------------- /ci/images/ci-runner/hack/manifests/sidecar/pod.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: ci-runner 6 | spec: 7 | containers: 8 | - name: ci-runner 9 | image: quay.io/redhat-pipeline-service/ci-runner:main 10 | imagePullPolicy: Always 11 | resources: 12 | requests: 13 | ephemeral-storage: "2Gi" 14 | limits: 15 | ephemeral-storage: "2Gi" 16 | command: 17 | - /bin/bash 18 | - -c 19 | - sleep 3600 20 | securityContext: 21 | privileged: true 22 | volumeMounts: 23 | - name: ci-runner-storage 24 | mountPath: "/workspace" 25 | workingDir: "/workspace/source" 26 | volumes: 27 | - name: ci-runner-storage 28 | emptyDir: {} 29 | -------------------------------------------------------------------------------- /ci/images/ci-runner/hack/sidecar/bin/plnsvc_setup.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -o errexit 3 | set -o nounset 4 | set -o pipefail 5 | set -x 6 | 7 | export KUBECONFIG="/kubeconfig" 8 | REPO_URL=$1 9 | REPO_REVISION=$2 10 | 11 | # to avoid the issue of "fatal: detected dubious ownership in repository at '/workspace/source'" 12 | git config --global --add safe.directory "$PWD" 13 | 14 | # Checkout the branch we want to setup 15 | git fetch origin "$REPO_REVISION" 16 | git checkout --force "$REPO_REVISION" 17 | 18 | OPENSHIFT_DIR=$(find "$PWD" -type f -name dev_setup.sh -exec dirname {} +) 19 | CONFIG="$OPENSHIFT_DIR/../config.yaml" 20 | 21 | echo "Start executing pipeline-service setup ..." 22 | yq -i e ".git_url=\"$REPO_URL\"" "$CONFIG" 23 | yq -i e ".git_ref=\"$REPO_REVISION\"" "$CONFIG" 24 | 25 | "$OPENSHIFT_DIR/dev_setup.sh" --debug --use-current-branch --force --work-dir "$OPENSHIFT_DIR/work" 26 | -------------------------------------------------------------------------------- /ci/images/ci-runner/hack/sidecar/bin/plnsvc_test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -o errexit 3 | set -o nounset 4 | set -o pipefail 5 | set -x 6 | 7 | echo "Start executing pipeline cases ..." 8 | TEST_DIR=$(find "$PWD" -type f -name test.sh -exec dirname {} +) 9 | "$TEST_DIR/test.sh" --kubeconfig "/kubeconfig" --debug 10 | -------------------------------------------------------------------------------- /ci/images/e2e-test-runner/Dockerfile: -------------------------------------------------------------------------------- 1 | #@FROM registry.access.redhat.com/ubi9/ubi-minimal 2 | FROM registry.access.redhat.com/ubi9/ubi-minimal@sha256:a7d837b00520a32502ada85ae339e33510cdfdbc8d2ddf460cc838e12ec5fa5a 3 | RUN set -x \ 4 | && mkdir ~/.kube \ 5 | && mkdir -p /tmp/image-build \ 6 | && microdnf install -y \ 7 | httpd-tools \ 8 | git-2.43.5 \ 9 | make-1:4.3 \ 10 | openssl-1:3.0.7 \ 11 | tar-2:1.34 \ 12 | unzip-6.0 \ 13 | which-2.21 \ 14 | && microdnf clean all 15 | COPY shared /tmp/image-build/shared 16 | RUN /tmp/image-build/shared/hack/install.sh --debug --bin bitwarden,go,jq,kubectl,oc,yq \ 17 | && rm -rf /tmp/image-build 18 | 19 | WORKDIR "/source" 20 | -------------------------------------------------------------------------------- /ci/images/e2e-test-runner/run-e2e-test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -o errexit 3 | set -o nounset 4 | set -o pipefail 5 | set -x 6 | 7 | SCRIPT_DIR="$( 8 | cd "$(dirname "$0")" >/dev/null 9 | pwd 10 | )" 11 | 12 | PROJECT_DIR="$( 13 | cd "$SCRIPT_DIR/../../.." >/dev/null || exit 1 14 | pwd 15 | )" 16 | 17 | # shellcheck source=ci/images/ci-runner/hack/bin/utils.sh 18 | source "$PROJECT_DIR/ci/images/ci-runner/hack/bin/utils.sh" 19 | 20 | export_variables() { 21 | printf "Export variables\n" | indent 2 22 | # The following variables are exported by the Stonesoup CI: 23 | # DEFAULT_QUAY_ORG DEFAULT_QUAY_ORG_TOKEN GITHUB_USER GITHUB_TOKEN QUAY_TOKEN QUAY_OAUTH_USER QUAY_OAUTH_TOKEN QUAY_OAUTH_TOKEN_RELEASE_SOURCE QUAY_OAUTH_TOKEN_RELEASE_DESTINATION 24 | # GITHUB_ACCOUNTS_ARRAY PREVIOUS_RATE_REMAINING GITHUB_USERNAME_ARRAY GH_RATE_REMAINING 25 | 26 | export DEFAULT_QUAY_ORG=redhat-appstudio-qe 27 | printf "Fetch secrets from bitwarden server\n" | indent 2 28 | 29 | open_bitwarden_session 30 | get_default_quay_org_token 31 | get_github_user 32 | get_github_token 33 | get_quay_token 34 | get_quay_oauth_user 35 | get_quay_oauth_token 36 | get_quay_oauth_token_release_source 37 | get_quay_oauth_token_release_destination 38 | get_github_accounts 39 | } 40 | 41 | handle_ratelimit() { 42 | PREVIOUS_RATE_REMAINING=0 43 | 44 | # user stored: username:token,username:token 45 | for account in "${GITHUB_ACCOUNTS_ARRAY[@]}" 46 | do : 47 | IFS=':' read -r -a GITHUB_USERNAME_ARRAY <<< "$account" 48 | 49 | GH_RATE_REMAINING=$(curl -s \ 50 | -H "Accept: application/vnd.github+json" \ 51 | -H "Authorization: Bearer ${GITHUB_USERNAME_ARRAY[1]}"\ 52 | https://api.github.com/rate_limit | jq ".rate.remaining") 53 | 54 | echo -e "[INFO ] user: ${GITHUB_USERNAME_ARRAY[0]} with rate limit remaining $GH_RATE_REMAINING" 55 | if [[ "${GH_RATE_REMAINING}" -ge "${PREVIOUS_RATE_REMAINING}" ]];then 56 | GITHUB_USER="${GITHUB_USERNAME_ARRAY[0]}" 57 | GITHUB_TOKEN="${GITHUB_USERNAME_ARRAY[1]}" 58 | fi 59 | PREVIOUS_RATE_REMAINING="${GH_RATE_REMAINING}" 60 | done 61 | 62 | echo -e "[INFO] Start tests with user: ${GITHUB_USER}" 63 | } 64 | 65 | run_test() { 66 | ##git config 67 | git config --global user.name "redhat-appstudio-qe-bot" 68 | git config --global user.email redhat-appstudio-qe-bot@redhat.com 69 | 70 | mkdir -p "${HOME}/creds" 71 | GIT_CREDS_PATH="${HOME}/creds/file" 72 | git config --global credential.helper "store --file ${GIT_CREDS_PATH}" 73 | echo "https://${GITHUB_USER}:${GITHUB_TOKEN}@github.com" > "${GIT_CREDS_PATH}" 74 | 75 | 76 | cd "$(mktemp -d)" 77 | 78 | git clone --branch main "https://${GITHUB_TOKEN}@github.com/redhat-appstudio/e2e-tests.git" . 79 | ## Deploy StoneSoup 80 | make local/cluster/prepare 81 | 82 | # Launch partial StoneSoup e2e tests 83 | go mod tidy 84 | go mod vendor 85 | make build 86 | ./bin/e2e-appstudio --ginkgo.label-filter "pipeline" --ginkgo.vv 87 | } 88 | 89 | 90 | 91 | export_variables 92 | handle_ratelimit 93 | run_test -------------------------------------------------------------------------------- /ci/images/quay-upload/Dockerfile: -------------------------------------------------------------------------------- 1 | #@FROM registry.access.redhat.com/ubi9/ubi-minimal 2 | FROM registry.access.redhat.com/ubi9/ubi-minimal@sha256:a7d837b00520a32502ada85ae339e33510cdfdbc8d2ddf460cc838e12ec5fa5a 3 | LABEL build-date= \ 4 | com.redhat.build-host= \ 5 | description="This image provides binaries and a script to tag images with the latest commit ID on quay.io" \ 6 | distribution-scope="public" \ 7 | io.k8s.description="This image provides binaries and a script to tag images with the latest commit ID on quay.io" \ 8 | io.k8s.display-name="quay-upload" \ 9 | maintainer="Pipeline Service" \ 10 | name="quay-upload" \ 11 | release="0.1" \ 12 | summary="Providea a script to tag images with the latest commit ID on quay.io" \ 13 | url="" \ 14 | vcs-ref= \ 15 | vcs-type="git" \ 16 | vendor="Pipeline Service" \ 17 | version="0.1" 18 | WORKDIR / 19 | RUN mkdir /workspace && chmod 777 /workspace && chown 65532:65532 /workspace 20 | ENV HOME /tmp/home 21 | RUN mkdir $HOME && chmod 777 $HOME && chown 65532:65532 $HOME 22 | RUN JQ_VERSION="1.6" && \ 23 | curl -sSL -o "/usr/local/bin/jq" "https://github.com/stedolan/jq/releases/download/jq-$JQ_VERSION/jq-linux64" && \ 24 | chmod 755 "/usr/local/bin/jq" 25 | COPY image-upload.sh /usr/local/bin/image-upload.sh 26 | RUN chmod 755 /usr/local/bin/image-upload.sh 27 | USER 65532:65532 28 | VOLUME /workspace 29 | WORKDIR /workspace 30 | ENTRYPOINT ["/usr/local/bin/image-upload.sh"] 31 | -------------------------------------------------------------------------------- /ci/images/static-checks/Dockerfile: -------------------------------------------------------------------------------- 1 | #@FROM registry.access.redhat.com/ubi9/ubi-minimal 2 | FROM registry.access.redhat.com/ubi9/ubi-minimal@sha256:a7d837b00520a32502ada85ae339e33510cdfdbc8d2ddf460cc838e12ec5fa5a 3 | LABEL build-date= \ 4 | com.redhat.build-host= \ 5 | description="This image provides the static checks binaries." \ 6 | distribution-scope="public" \ 7 | io.k8s.description="This image provides the static checks binaries." \ 8 | io.k8s.display-name="static-checks" \ 9 | maintainer="Pipeline Service" \ 10 | name="static-checks" \ 11 | release="0.1" \ 12 | summary="Provides all the binaries used for static code checks" \ 13 | url="https://github.com/openshift-pipelines/pipeline-service/tree/main/ci/images/static-checks" \ 14 | vcs-ref= \ 15 | vcs-type="git" \ 16 | vendor="Pipeline Service" \ 17 | version="0.1" 18 | WORKDIR / 19 | RUN mkdir /workspace && chmod 777 /workspace && chown 65532:65532 /workspace 20 | ENV HOME /tmp/home 21 | RUN mkdir $HOME && chmod 777 $HOME && chown 65532:65532 $HOME 22 | RUN microdnf install -y findutils-4.8.0 python3-3.9.18 python3-pip-21.2.3 tar-1.34 xz-5.2.5 && \ 23 | microdnf clean all 24 | 25 | COPY shared /tmp/image-build/shared 26 | WORKDIR /tmp/image-build/shared/hack 27 | RUN set -x && \ 28 | ./install.sh --bin checkov,hadolint,jq,shellcheck,yamllint && \ 29 | rm -rf /tmp/image-build 30 | 31 | COPY ci/images/static-checks/content /opt/static-checks 32 | RUN chmod 755 /opt/static-checks/bin/*.sh 33 | ENV PATH="/opt/static-checks/bin:${PATH}" 34 | USER 65532:65532 35 | ENV WORK_DIR /workspace 36 | VOLUME /workspace 37 | WORKDIR /workspace 38 | CMD ["/opt/static-checks/bin/all.sh", "--workspace_dir", "/workspace"] 39 | -------------------------------------------------------------------------------- /ci/images/static-checks/content/bin/all.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Copyright 2022 The Pipeline Service Authors. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | set -o errexit 18 | set -o nounset 19 | set -o pipefail 20 | 21 | SCRIPT_DIR="$( 22 | cd "$(dirname "$0")" >/dev/null 23 | pwd 24 | )" 25 | PROJECT_DIR="$( 26 | cd "$SCRIPT_DIR/../.." >/dev/null || exit 1 27 | pwd 28 | )" 29 | export PROJECT_DIR 30 | 31 | usage() { 32 | echo " 33 | Usage: 34 | ${0##*/} [options] 35 | 36 | Run the static checks on the project. 37 | 38 | Optional arguments: 39 | -w, --workspace_dir WORKSPACE_DIR. 40 | Workspace directory. 41 | Default: $PROJECT_DIR 42 | -d, --debug 43 | Activate tracing/debug mode. 44 | -h, --help 45 | Display this message. 46 | 47 | Example: 48 | ${0##*/} 49 | " >&2 50 | } 51 | 52 | parse_args() { 53 | while [[ $# -gt 0 ]]; do 54 | case $1 in 55 | -w | --workspace_dir) 56 | shift 57 | WORKSPACE_DIR="$1" 58 | ;; 59 | -d | --debug) 60 | set -x 61 | DEBUG="--debug" 62 | export DEBUG 63 | ;; 64 | -h | --help) 65 | usage 66 | exit 0 67 | ;; 68 | *) 69 | echo "[ERROR] Unknown argument: $1" >&2 70 | usage 71 | exit 1 72 | ;; 73 | esac 74 | shift 75 | done 76 | } 77 | 78 | run_checks() { 79 | FAILED=() 80 | mapfile -t check_list < <( 81 | find "$SCRIPT_DIR" -name \*.sh -exec basename {} \; | 82 | grep -vE "^all.sh$" | 83 | sort 84 | ) 85 | for check in "${check_list[@]}"; do 86 | check_name=${check::-3} 87 | echo "[$check_name]" 88 | if ! "$SCRIPT_DIR/$check" --workspace_dir "$WORKSPACE_DIR"; then 89 | FAILED+=("$check_name") 90 | fi 91 | echo 92 | done 93 | } 94 | 95 | main() { 96 | if [ -n "${DEBUG:-}" ]; then 97 | set -x 98 | fi 99 | parse_args "$@" 100 | run_checks 101 | if [ -n "${FAILED[*]}" ]; then 102 | echo "[ERROR] Test failed: ${FAILED[*]}" >&2 103 | exit 1 104 | fi 105 | } 106 | 107 | if [ "${BASH_SOURCE[0]}" == "$0" ]; then 108 | main "$@" 109 | fi 110 | -------------------------------------------------------------------------------- /ci/images/static-checks/content/bin/checkov.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Copyright 2022 The Pipeline Service Authors. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | set -o errexit 18 | set -o nounset 19 | set -o pipefail 20 | 21 | SCRIPT_DIR="$( 22 | cd "$(dirname "$0")" >/dev/null 23 | pwd 24 | )" 25 | PROJECT_DIR="$( 26 | cd "$SCRIPT_DIR/../.." >/dev/null || exit 1 27 | pwd 28 | )" 29 | export PROJECT_DIR 30 | 31 | usage() { 32 | echo " 33 | Usage: 34 | ${0##*/} [options] 35 | 36 | Run checkov against the content of the workspace directory 37 | 38 | Optional arguments: 39 | -w, --workspace_dir WORKSPACE_DIR. 40 | Workspace directory. 41 | Default: $PROJECT_DIR 42 | -c, --config-file CONFIG_FILE 43 | Configuration file. 44 | Default: the path '../config/checkov.yaml' from this script 45 | -d, --debug 46 | Activate tracing/debug mode. 47 | -h, --help 48 | Display this message. 49 | 50 | Example: 51 | ${0##*/} --workspace_dir \$PWD 52 | " >&2 53 | } 54 | 55 | parse_args() { 56 | WORKSPACE_DIR="$PROJECT_DIR" 57 | CONFIG_FILE="$SCRIPT_DIR/../config/checkov.yaml" 58 | while [[ $# -gt 0 ]]; do 59 | case $1 in 60 | -w | --workspace_dir) 61 | shift 62 | WORKSPACE_DIR="$1" 63 | ;; 64 | -d | --debug) 65 | set -x 66 | DEBUG="--debug" 67 | export DEBUG 68 | ;; 69 | -h | --help) 70 | usage 71 | exit 0 72 | ;; 73 | -c | --config-file) 74 | shift 75 | CONFIG_FILE="$1" 76 | ;; 77 | *) 78 | echo "[ERROR] Unknown argument: $1" >&2 79 | usage 80 | exit 1 81 | ;; 82 | esac 83 | shift 84 | done 85 | } 86 | 87 | init() { 88 | checkov --version 89 | } 90 | 91 | run() { 92 | checkov --directory "$WORKSPACE_DIR" --config-file "$CONFIG_FILE" 93 | } 94 | 95 | main() { 96 | if [ -n "${DEBUG:-}" ]; then 97 | set -x 98 | fi 99 | parse_args "$@" 100 | init 101 | run 102 | } 103 | 104 | if [ "${BASH_SOURCE[0]}" == "$0" ]; then 105 | main "$@" 106 | fi 107 | -------------------------------------------------------------------------------- /ci/images/static-checks/content/bin/grafana-dashboards.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Copyright 2022 The Pipeline Service Authors. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | set -o errexit 18 | set -o nounset 19 | set -o pipefail 20 | 21 | SCRIPT_DIR="$( 22 | cd "$(dirname "$0")" >/dev/null 23 | pwd 24 | )" 25 | PROJECT_DIR="$( 26 | cd "$SCRIPT_DIR/../.." >/dev/null || exit 1 27 | pwd 28 | )" 29 | export PROJECT_DIR 30 | 31 | usage() { 32 | echo " 33 | Usage: 34 | ${0##*/} [options] 35 | 36 | Validate grafana JSON dashboard definitions. 37 | 38 | Optional arguments: 39 | -w, --workspace_dir WORKSPACE_DIR. 40 | Workspace directory. 41 | Default: $PROJECT_DIR 42 | -d, --debug 43 | Activate tracing/debug mode. 44 | -h, --help 45 | Display this message. 46 | 47 | Example: 48 | ${0##*/} --workspace_dir \$PWD 49 | " >&2 50 | } 51 | 52 | parse_args() { 53 | WORKSPACE_DIR="$PROJECT_DIR" 54 | while [[ $# -gt 0 ]]; do 55 | case $1 in 56 | -w | --workspace_dir) 57 | shift 58 | WORKSPACE_DIR="$1" 59 | ;; 60 | -d | --debug) 61 | set -x 62 | DEBUG="--debug" 63 | export DEBUG 64 | ;; 65 | -h | --help) 66 | usage 67 | exit 0 68 | ;; 69 | *) 70 | echo "[ERROR] Unknown argument: $1" >&2 71 | usage 72 | exit 1 73 | ;; 74 | esac 75 | shift 76 | done 77 | WORKSPACE_DIR="$WORKSPACE_DIR/operator/gitops/argocd/grafana/dashboards" 78 | } 79 | 80 | init() { 81 | jq --version 82 | } 83 | 84 | check_datasource() { 85 | if [ "$(jq '.. | select(.datasource?) | select(.datasource.uid != "${DS_PROMETHEUS-APPSTUDIO-DS}")' "$JSON" | wc -l)" != "0" ]; then 86 | echo "FAIL" 87 | echo "[ERROR] 'datasource' element(s) found in '$JSON'" 88 | exit 1 89 | fi 90 | } 91 | 92 | run() { 93 | echo "Validating dashboards:" 94 | find "$WORKSPACE_DIR" -name \*.json | while read -r JSON; do 95 | echo -n " - $JSON: " 96 | check_datasource 97 | echo "OK" 98 | done 99 | } 100 | 101 | main() { 102 | if [ -n "${DEBUG:-}" ]; then 103 | set -x 104 | fi 105 | parse_args "$@" 106 | init 107 | run 108 | } 109 | 110 | if [ "${BASH_SOURCE[0]}" == "$0" ]; then 111 | main "$@" 112 | fi 113 | -------------------------------------------------------------------------------- /ci/images/static-checks/content/bin/hadolint.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Copyright 2022 The Pipeline Service Authors. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | set -o errexit 18 | set -o nounset 19 | set -o pipefail 20 | 21 | SCRIPT_DIR="$( 22 | cd "$(dirname "$0")" >/dev/null 23 | pwd 24 | )" 25 | PROJECT_DIR="$( 26 | cd "$SCRIPT_DIR/../.." >/dev/null || exit 1 27 | pwd 28 | )" 29 | export PROJECT_DIR 30 | 31 | usage() { 32 | echo " 33 | Usage: 34 | ${0##*/} [options] 35 | 36 | Run hadolint in the content of the workspace directory 37 | 38 | Optional arguments: 39 | -w, --workspace_dir WORKSPACE_DIR. 40 | Workspace directory. 41 | Default: $PROJECT_DIR 42 | -c, --config-file CONFIG_FILE 43 | Configuration file. 44 | Default: the path '../config/hadolint.yaml' from this script 45 | -d, --debug 46 | Activate tracing/debug mode. 47 | -h, --help 48 | Display this message. 49 | 50 | Example: 51 | ${0##*/} --workspace_dir \$PWD 52 | " >&2 53 | } 54 | 55 | parse_args() { 56 | WORKSPACE_DIR="$PROJECT_DIR" 57 | CONFIG_FILE="$SCRIPT_DIR/../config/hadolint.yaml" 58 | while [[ $# -gt 0 ]]; do 59 | case $1 in 60 | -w | --workspace_dir) 61 | shift 62 | WORKSPACE_DIR="$1" 63 | ;; 64 | -d | --debug) 65 | set -x 66 | DEBUG="--debug" 67 | export DEBUG 68 | ;; 69 | -h | --help) 70 | usage 71 | exit 0 72 | ;; 73 | -c | --config-file) 74 | shift 75 | CONFIG_FILE="$1" 76 | ;; 77 | *) 78 | echo "[ERROR] Unknown argument: $1" >&2 79 | usage 80 | exit 1 81 | ;; 82 | esac 83 | shift 84 | done 85 | } 86 | 87 | init() { 88 | hadolint --version 89 | } 90 | 91 | run() { 92 | find "$WORKSPACE_DIR" -name Dockerfile -exec \ 93 | hadolint -c "$CONFIG_FILE" {} + 94 | } 95 | 96 | main() { 97 | if [ -n "${DEBUG:-}" ]; then 98 | set -x 99 | fi 100 | parse_args "$@" 101 | init 102 | run 103 | } 104 | 105 | if [ "${BASH_SOURCE[0]}" == "$0" ]; then 106 | main "$@" 107 | fi 108 | -------------------------------------------------------------------------------- /ci/images/static-checks/content/bin/shellcheck.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Copyright 2022 The Pipeline Service Authors. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | set -o errexit 18 | set -o nounset 19 | set -o pipefail 20 | 21 | SCRIPT_DIR="$( 22 | cd "$(dirname "$0")" >/dev/null 23 | pwd 24 | )" 25 | PROJECT_DIR="$( 26 | cd "$SCRIPT_DIR/../.." >/dev/null || exit 1 27 | pwd 28 | )" 29 | export PROJECT_DIR 30 | 31 | usage() { 32 | echo " 33 | Usage: 34 | ${0##*/} [options] 35 | 36 | Run shellcheck in the content of the workspace directory 37 | 38 | Optional arguments: 39 | -w, --workspace_dir WORKSPACE_DIR. 40 | Workspace directory. 41 | Default: $PROJECT_DIR 42 | -d, --debug 43 | Activate tracing/debug mode. 44 | -h, --help 45 | Display this message. 46 | 47 | Example: 48 | ${0##*/} --workspace_dir \$PWD 49 | " >&2 50 | } 51 | 52 | parse_args() { 53 | WORKSPACE_DIR="$PROJECT_DIR" 54 | while [[ $# -gt 0 ]]; do 55 | case $1 in 56 | -w | --workspace_dir) 57 | shift 58 | WORKSPACE_DIR="$1" 59 | ;; 60 | -d | --debug) 61 | set -x 62 | DEBUG="--debug" 63 | export DEBUG 64 | ;; 65 | -h | --help) 66 | usage 67 | exit 0 68 | ;; 69 | *) 70 | echo "[ERROR] Unknown argument: $1" >&2 71 | usage 72 | exit 1 73 | ;; 74 | esac 75 | shift 76 | done 77 | } 78 | 79 | init() { 80 | shellcheck --version 81 | } 82 | 83 | run() { 84 | cd "$WORKSPACE_DIR" 85 | find . -type f -name "*.sh" -print0 | 86 | xargs -r -0 shellcheck 87 | } 88 | 89 | main() { 90 | if [ -n "${DEBUG:-}" ]; then 91 | set -x 92 | fi 93 | parse_args "$@" 94 | init 95 | run 96 | } 97 | 98 | if [ "${BASH_SOURCE[0]}" == "$0" ]; then 99 | main "$@" 100 | fi 101 | -------------------------------------------------------------------------------- /ci/images/static-checks/content/bin/yamllint.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Copyright 2022 The Pipeline Service Authors. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | set -o errexit 18 | set -o nounset 19 | set -o pipefail 20 | 21 | SCRIPT_DIR="$( 22 | cd "$(dirname "$0")" >/dev/null 23 | pwd 24 | )" 25 | PROJECT_DIR="$( 26 | cd "$SCRIPT_DIR/../.." >/dev/null || exit 1 27 | pwd 28 | )" 29 | export PROJECT_DIR 30 | 31 | usage() { 32 | echo " 33 | Usage: 34 | ${0##*/} [options] 35 | 36 | Run yamllint in the content of the workspace directory 37 | 38 | Optional arguments: 39 | -w, --workspace_dir WORKSPACE_DIR. 40 | Workspace directory. 41 | Default: $PROJECT_DIR 42 | -c, --config-file CONFIG_FILE 43 | Configuration file. 44 | Default: the path '../config/yamllint.yaml' from this script 45 | -d, --debug 46 | Activate tracing/debug mode. 47 | -h, --help 48 | Display this message. 49 | 50 | Example: 51 | ${0##*/} --workspace_dir \$PWD 52 | " >&2 53 | } 54 | 55 | parse_args() { 56 | WORKSPACE_DIR="$PROJECT_DIR" 57 | CONFIG_FILE="$SCRIPT_DIR/../config/yamllint.yaml" 58 | while [[ $# -gt 0 ]]; do 59 | case $1 in 60 | -w | --workspace_dir) 61 | shift 62 | WORKSPACE_DIR="$1" 63 | ;; 64 | -d | --debug) 65 | set -x 66 | DEBUG="--debug" 67 | export DEBUG 68 | ;; 69 | -h | --help) 70 | usage 71 | exit 0 72 | ;; 73 | -c | --config-file) 74 | shift 75 | CONFIG_FILE="$1" 76 | ;; 77 | *) 78 | echo "[ERROR] Unknown argument: $1" >&2 79 | usage 80 | exit 1 81 | ;; 82 | esac 83 | shift 84 | done 85 | } 86 | 87 | init() { 88 | yamllint --version 89 | } 90 | 91 | run() { 92 | yamllint -c "$CONFIG_FILE" "$WORKSPACE_DIR" 93 | } 94 | 95 | main() { 96 | if [ -n "${DEBUG:-}" ]; then 97 | set -x 98 | fi 99 | parse_args "$@" 100 | init 101 | run 102 | } 103 | 104 | if [ "${BASH_SOURCE[0]}" == "$0" ]; then 105 | main "$@" 106 | fi 107 | -------------------------------------------------------------------------------- /ci/images/static-checks/content/config/checkov.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | check: [] 3 | compact: false 4 | evaluate-variables: true 5 | framework: 6 | - kubernetes 7 | - dockerfile 8 | - github_actions 9 | - kustomize 10 | skip-download: false 11 | output: cli 12 | quiet: true 13 | skip-check: 14 | # skip Healthcheck instruction error for Docker Images 15 | - CKV_DOCKER_2 16 | # not enforcing liveness/readiness probes at this time; minimally, chains, results, metric exporter do not have 17 | # we have opened https://github.com/tektoncd/results/issues/280 upstream 18 | - CKV_K8S_8 19 | - CKV_K8S_9 20 | # RHTAP utilizes LimitRanges for cpu/mem requests and limits settings (handles 10-13) 21 | - CKV_K8S_10 22 | - CKV_K8S_11 23 | - CKV_K8S_12 24 | - CKV_K8S_13 25 | # image ref related 26 | - CKV_K8S_43 # deployments referenced by checkov are either items like chains which will be replace by openshift-pipelines 1.10 or a fooled by our use of kustomize for image setting 27 | - CKV_K8S_14 # deployments referenced by checkov are either items like chains which will be replace by openshift-pipelines 1.10 or a fooled by our use of kustomize for image setting 28 | - CKV_K8S_15 # with sha specific image refs setting pull policy to always is redundant and negates us of openshift node cache 29 | # need to reivew chains/pac needs to read secrets in a couple of namespaces, not a clusterrolebinding, create webhooks 30 | - CKV2_K8S_5 31 | # there is no use of hostPID, hostIPC, hostNetwork in repo, but scan complains about not setting explicitly to false 32 | # will check in live tests 33 | - CKV_K8S_17 34 | - CKV_K8S_18 35 | - CKV_K8S_19 36 | # openshift scc / security addresses these check by mutating pod under the covers 37 | # with pods getting assigned the restricted scc unless explicitly allowed otherwise 38 | - CKV_K8S_20 # no allowPrivilegeEscalation 39 | - CKV_K8S_21 # the default namespace should not be used 40 | - CKV_K8S_22 # read only FS 41 | - CKV_K8S_23 # admission of root containers 42 | - CKV_K8S_25 # we are not adding capabilities, running under restricted-scc 43 | - CKV_K8S_28 # admission of NET RAW capability 44 | - CKV_K8S_29 # apply security context to pod and containers 45 | - CKV_K8S_30 # apply security context to containers 46 | - CKV_K8S_31 # runtime/default seccomp profile 47 | - CKV_K8S_33 # also, no kubernetes-dashboard on openshift 48 | - CKV_K8S_37 # any capabilities 49 | - CKV_K8S_38 # our pods almost always a) need to access api svr, b) do not have privileged SA 50 | - CKV_K8S_40 # high UID number 51 | - CKV_K8S_35 # opened https://github.com/tektoncd/results/issues/432 for secrets via env var 52 | # need to allow argocd to create/delete the validatingadmissionwebhooks for tekton (core part of knative) 53 | - CKV_K8S_155 54 | - CKV_K8S_157 55 | - CKV2_K8S_6 # use NetworkPolicy like what registration-service and integration-service employ are untenable for tekton controllers 56 | soft-fail: false 57 | skip-path: 58 | - developer 59 | - ci 60 | -------------------------------------------------------------------------------- /ci/images/static-checks/content/config/hadolint.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Hadolint configuration file 3 | 4 | # configure ignore rules 5 | # see https://github.com/hadolint/hadolint#rules for a list of available rules. 6 | 7 | ignored: [] 8 | failure-threshold: info 9 | format: tty 10 | no-color: false 11 | no-fail: false 12 | trustedRegistries: 13 | - quay.io 14 | - registry.access.redhat.com 15 | -------------------------------------------------------------------------------- /ci/images/static-checks/content/config/shellcheck.rc: -------------------------------------------------------------------------------- 1 | # Each code should be disabled on its own line, and have a comment which 2 | # follow this template: 3 | # Error message: Error message returned by shellcheck/ 4 | # Reason for disabling: Reason why the error has been disabled. 5 | 6 | # Error message: See if you can use ${variable//search/replace} instead. 7 | # Reason for disabling: Cannot be consistenly enforced, as even the 8 | # documentation says 'Utilizing some of the more complex capabilities of 9 | # sed is required occasionally and it is safe to ignore SC2001.'. 10 | disable=SC2001 11 | -------------------------------------------------------------------------------- /ci/images/static-checks/content/config/yamllint.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | yaml-files: 3 | - '*.yaml' 4 | - '*.yml' 5 | rules: 6 | braces: enable 7 | brackets: enable 8 | colons: enable 9 | commas: enable 10 | comments: 11 | require-starting-space: true 12 | ignore-shebangs: true 13 | min-spaces-from-content: 1 14 | comments-indentation: enable 15 | document-end: disable 16 | document-start: enable 17 | empty-lines: enable 18 | empty-values: disable 19 | float-values: disable 20 | hyphens: enable 21 | indentation: enable 22 | key-duplicates: enable 23 | key-ordering: disable 24 | line-length: disable 25 | new-line-at-end-of-file: enable 26 | new-lines: enable 27 | octal-values: disable 28 | quoted-strings: disable 29 | trailing-spaces: enable 30 | truthy: 31 | check-keys: false 32 | -------------------------------------------------------------------------------- /ci/images/vulnerability-scan/Dockerfile: -------------------------------------------------------------------------------- 1 | #@FROM registry.access.redhat.com/ubi9/ubi-minimal 2 | FROM registry.access.redhat.com/ubi9/ubi-minimal@sha256:a7d837b00520a32502ada85ae339e33510cdfdbc8d2ddf460cc838e12ec5fa5a 3 | LABEL build-date= \ 4 | com.redhat.build-host= \ 5 | description="This image provides a script to scan Clair for security vulnerabilities on container images via Quay." \ 6 | distribution-scope="public" \ 7 | io.k8s.description="This image provides a script to scan Clair for security vulnerabilities on container images via Quay." \ 8 | io.k8s.display-name="clair scan" \ 9 | maintainer="Pipeline Service" \ 10 | name="clair-scan" \ 11 | release="0.1" \ 12 | summary="Provides the latest results of clair security scan." \ 13 | url="https://github.com/openshift-pipelines/pipeline-service/tree/main/ci/images/vulnerability-scan" \ 14 | vcs-ref= \ 15 | vcs-type="git" \ 16 | vendor="Pipeline Service" \ 17 | version="0.1" 18 | WORKDIR / 19 | RUN mkdir /workspace && chmod 777 /workspace && chown 65532:65532 /workspace 20 | ENV HOME /tmp/home 21 | RUN mkdir $HOME && chmod 777 $HOME && chown 65532:65532 $HOME 22 | COPY ./scan.sh /usr/local/bin/scan.sh 23 | RUN chmod 755 /usr/local/bin/scan.sh 24 | USER 65532:65532 25 | VOLUME /workspace 26 | WORKDIR /workspace 27 | ENTRYPOINT ["/usr/local/bin/scan.sh"] 28 | -------------------------------------------------------------------------------- /ci/images/vulnerability-scan/common/utils.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Copyright 2022 The Pipeline Service Authors. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | set -o errexit 18 | set -o nounset 19 | 20 | get_digest() { 21 | local img="$1" 22 | quay_url=${quay_url:-} 23 | #number of retries=3 24 | for i in {1..3} 25 | do 26 | #Get the digest of the image 27 | digest_http_code=$(curl -s -w '%{http_code}' -o /tmp/digest.json -H "Accept: application/vnd.quay.distribution.manifest.v2+json Content-type: application/json Authorization: Bearer $AUTH_BEARER_TOKEN" "$quay_url"/"$img") 28 | 29 | if [[ "$digest_http_code" == "200" ]]; then 30 | digest=$(jq '.tags[] | select(.name == "main").manifest_digest ' /tmp/digest.json | tr -d '"') 31 | break 32 | else 33 | if [[ "$i" -lt 3 ]]; then 34 | printf "Unable to fetch the digest from Quay. Retrying..." 35 | sleep 30 36 | else 37 | printf "Error while fetching the digest from Quay. Status code: %s\n" "${digest_http_code}" 38 | fi 39 | fi 40 | done 41 | } 42 | 43 | get_vulnerabilities() { 44 | local img=$1 45 | #number of retries=3 46 | for i in {1..3} 47 | do 48 | #Scan for vulnerabilities 49 | results_http_code=$(curl -s -w '%{http_code}' -o /tmp/vulnerability.json -H "Content-type: application/json Authorization: Bearer $AUTH_BEARER_TOKEN" "$quay_url"/"$img"/manifest/"$digest"/security?vulnerabilities=true) 50 | 51 | if [[ "$results_http_code" == "200" ]]; then 52 | printf "\n *********************************** \n" 53 | printf " \t %s results " "$img" 54 | printf "\n *********************************** \n" 55 | 56 | #Filtering results with vulnerabilities only. 57 | results=$(jq -r '.data.Layer.Features[].Vulnerabilities[]' /tmp/vulnerability.json) 58 | 59 | if [[ -n $results ]]; then 60 | printf "%s" "$results" | jq 61 | printf "\n Check the full report here: https://quay.io/repository/redhat-pipeline-service/%s/manifest/%s?tab=vulnerabilities \n" "$img" "$digest" 62 | return 1 63 | else 64 | printf "No vulnerabilities found \n" 65 | fi 66 | return 67 | else 68 | if [[ "$i" -lt 3 ]]; then 69 | printf "Unable to fetch the vulnerability report from Quay. Retrying..." 70 | sleep 30 71 | else 72 | printf "Error while fetching the vulnerability report from Quay. Status code: %s\n" "${results_http_code}" 73 | fi 74 | fi 75 | done 76 | } -------------------------------------------------------------------------------- /ci/images/vulnerability-scan/scan-image.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Copyright 2022 The Pipeline Service Authors. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | set -o errexit 18 | set -o nounset 19 | 20 | SCRIPT_DIR="$( 21 | cd "$(dirname "$0")" >/dev/null 22 | pwd 23 | )" 24 | 25 | # shellcheck source=ci/images/vulnerability-scan/common/utils.sh 26 | source "$SCRIPT_DIR/common/utils.sh" 27 | 28 | VULNERABILITIES_EXIST=0 29 | 30 | main() { 31 | get_digest "$IMAGE_NAME" 32 | get_vulnerabilities "$IMAGE_NAME" || VULNERABILITIES_EXIST=$? 33 | printf "%s" "$VULNERABILITIES_EXIST" 34 | } 35 | 36 | if [ "${BASH_SOURCE[0]}" == "$0" ]; then 37 | main "$@" 38 | fi 39 | -------------------------------------------------------------------------------- /ci/images/vulnerability-scan/scan.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Copyright 2022 The Pipeline Service Authors. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | set -o errexit 18 | set -o nounset 19 | 20 | SCRIPT_DIR="$( 21 | cd "$(dirname "$0")" >/dev/null 22 | pwd 23 | )" 24 | 25 | # shellcheck source=ci/images/vulnerability-scan/common/utils.sh 26 | source "$SCRIPT_DIR/common/utils.sh" 27 | 28 | images_vul=("${images[@]/*/1}") 29 | 30 | main() { 31 | for i in "${!images[@]}"; do 32 | get_digest "${images[i]}" 33 | get_vulnerabilities "${images[i]}" || images_vul[i]=$? 34 | done 35 | printf "%s " "${images_vul[@]}" 36 | } 37 | 38 | if [ "${BASH_SOURCE[0]}" == "$0" ]; then 39 | main "$@" 40 | fi 41 | -------------------------------------------------------------------------------- /developer/config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # This YAML file allows openshift_dev_setup.sh to fetch values of certain variables. 3 | # Usage - to be used for overwriting defaults 4 | 5 | # cluster_type can only be "openshift" for now. 6 | cluster_type: openshift 7 | 8 | # git_url refers to a git repo to be considered as the source of truth for Argo CD applications. 9 | git_url: https://github.com/openshift-pipelines/pipeline-service.git 10 | 11 | # git_ref refers to the git repo's ref to be considered as the source of truth for Argo CD applications. 12 | git_ref: main 13 | 14 | # Applications to be deployed on the cluster 15 | apps: 16 | - openshift-gitops # openshift-gitops is a pre-requisite for Pipeline Service 17 | - pipeline-service # pipeline-service sets up Pipeline Service on the cluster. 18 | 19 | # Tekton results database credentials 20 | tekton_results_db: 21 | user: 22 | password: 23 | 24 | # Minio S3 compatible storage credentials for tekton results 25 | tekton_results_s3: 26 | user: 27 | password: 28 | -------------------------------------------------------------------------------- /developer/docs/README.md: -------------------------------------------------------------------------------- 1 | # Documentation for Developers 2 | 3 | ## Development environment 4 | 5 | You can see the required dependencies in [DEPENDENCIES.md](../../DEPENDENCIES.md). 6 | 7 | You have various options to setup your environment: 8 | * local environment: the [install.sh](../images/devenv/install.sh) can be used to 9 | install all dependencies but be careful as it will impact your system configuration by 10 | installing/configuring packages locally. 11 | * running in a container: use [run.sh](../images/devenv/run.sh) to spawn a container 12 | and be dropped in a shell. 13 | * VS Code in a container: you can use the `Remote Development` extension 14 | (`ms-vscode-remote.vscode-remote-extensionpack`), and VS Code will use the content of 15 | [.devcontainer](../../.devcontainer) to spawn a container and drop you in the development 16 | environment. This will require an action on your side when opening the project, so look 17 | out for the `Reopen in container` notification. 18 | 19 | ## Testing changes 20 | 21 | Testing changes always involves commiting those changes to a branch, and pushing those changes. 22 | This is because even in development mode, ArgoCD is used to control the resources deployed on the cluster. 23 | 24 | ### Using the locally checked out repository/branch 25 | This method is best when testing a current development as it does not require any modification to files tracked by git. 26 | 27 | * Run `developer/openshift/dev_setup.sh` with the `--use-current-branch` option. 28 | 29 | Note: If the current checked out workdir belongs to a branch, the branch will be deployed. 30 | If it is a detached HEAD, then the revision number will be used. 31 | In the latter case updating the deployment requires running the script again, whereas in the former just pushing a change to branch will cause ArogCD to trigger the synchronization. 32 | 33 | ### Using another repository/branch 34 | 35 | * Update the `git_url` and `git_ref` in `developer/config.yaml` to reference the fork and branch/revision. 36 | * Run `developer/openshift/dev_setup.sh`. 37 | 38 | When using this method, make sure that you do not commit and push changes to `developer/config.yaml`. 39 | 40 | ## PR process 41 | 42 | * When you open a PR, add a few reviewers. If the PR solves a GitHub issue, make sure 43 | that the contributor who opened the issue is one of the reviewers. 44 | * If you do not see any progress on the PR over a couple of days, feel free to put a 45 | comment tagging one or two users asking them to take the time to review your change. 46 | * The reviewer will perform the code review. If they are satisfied with the change, and 47 | they feel like the change does not require a second pair of eyes, they will merge the 48 | PR. 49 | * The PR may be large enough or import enough to require a second opinion. In that case, 50 | the 1st reviewer will put a comment asking for an additional review. In that case, the 51 | last reviewer to approve the PR is responsible for merging it. 52 | -------------------------------------------------------------------------------- /developer/docs/troubleshooting.md: -------------------------------------------------------------------------------- 1 | # Troubleshooting 2 | 3 | ## If we install yq via snap store on Fedora, it uses strict confinement policy which does not provide access to root (including /tmp). 4 | 5 | ``` 6 | $ yq e ".current-context" "/tmp/tmp.QNwtlzJzZh/credentials/kubeconfig/compute/compute.kubeconfig.base" 7 | Error: open /tmp/tmp.QNwtlzJzZh/credentials/kubeconfig/compute/compute.kubeconfig.base: no such file or directory 8 | ``` 9 | 10 | Make sure tools such as yq, jq or any other that is using a strict confinement policy is setup to have access to root filesystem. This could be done by installing these tools locally rather than through package managers. 11 | -------------------------------------------------------------------------------- /developer/hack/build-images-buildah.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -o errexit 3 | set -o nounset 4 | set -o pipefail 5 | set -x 6 | 7 | SCRIPT_DIR="$( 8 | cd "$(dirname "$0")" >/dev/null 9 | pwd 10 | )" 11 | PROJECT_DIR="$( 12 | cd "$SCRIPT_DIR/../.." >/dev/null || exit 1 13 | pwd 14 | )" 15 | export PROJECT_DIR 16 | 17 | parse_args() { 18 | mapfile -t DEFAULT_IMAGE_DIRS < <( 19 | find "$PROJECT_DIR" -type f -name Dockerfile -exec dirname {} \; | 20 | sed "s:$PROJECT_DIR/::" | 21 | sort 22 | ) 23 | IMAGE_DIRS=() 24 | while [[ $# -gt 0 ]]; do 25 | case $1 in 26 | --delete) 27 | DELETE_IMAGE="1" 28 | ;; 29 | -i | --image) 30 | shift 31 | if [ ! -d "$1" ]; then 32 | echo "[ERROR] Directory does not exists: $1" >&2 33 | exit 1 34 | else 35 | if [ ! -e "$1/Dockerfile" ]; then 36 | echo "[ERROR] Dockerfile not found in '$1'" >&2 37 | exit 1 38 | fi 39 | fi 40 | IMAGE_DIRS+=("$1") 41 | ;; 42 | -t | --tag) 43 | shift 44 | TAG="$1" 45 | ;; 46 | -d | --debug) 47 | set -x 48 | DEBUG="--debug" 49 | export DEBUG 50 | ;; 51 | -h | --help) 52 | usage 53 | exit 0 54 | ;; 55 | *) 56 | echo "Unknown argument: $1" 57 | usage 58 | exit 1 59 | ;; 60 | esac 61 | shift 62 | done 63 | } 64 | 65 | init() { 66 | TAG=${TAG:-latest} 67 | if [ -z "${IMAGE_DIRS[*]}" ]; then 68 | IMAGE_DIRS=("${DEFAULT_IMAGE_DIRS[@]}") 69 | fi 70 | buildah="buildah --storage-driver=vfs" 71 | } 72 | 73 | build_image() { 74 | image_name=$(basename "$image_dir") 75 | case "$image_name" in 76 | quay-upload|vulnerability-scan) 77 | context="$image_dir" 78 | ;; 79 | *) 80 | context="$PROJECT_DIR" 81 | ;; 82 | esac 83 | 84 | $buildah build --format=oci \ 85 | --log-level debug \ 86 | --tls-verify=true --no-cache \ 87 | -f "$image_dir/Dockerfile" --tag "$image_name:$TAG" "$context" 88 | } 89 | 90 | delete_image() { 91 | image_name="localhost/$(basename "$image_dir")" 92 | if $buildah images "$image_name:$TAG"; then 93 | $buildah rmi "$image_name:$TAG" 94 | fi 95 | } 96 | 97 | main() { 98 | if [ -n "${DEBUG:-}" ]; then 99 | set -x 100 | fi 101 | parse_args "$@" 102 | init 103 | for image_dir in "${IMAGE_DIRS[@]}"; do 104 | echo "[$image_dir]" 105 | build_image 106 | if [ -n "${DELETE_IMAGE:-}" ]; then 107 | delete_image 108 | fi 109 | echo 110 | done 111 | } 112 | 113 | if [ "${BASH_SOURCE[0]}" == "$0" ]; then 114 | main "$@" 115 | fi 116 | -------------------------------------------------------------------------------- /developer/hack/ci.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Copyright 2022 The Pipeline Service Authors. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | set -o errexit 18 | set -o nounset 19 | set -o pipefail 20 | 21 | SCRIPT_DIR="$( 22 | cd "$(dirname "$0")" >/dev/null 23 | pwd 24 | )" 25 | PROJECT_DIR="$( 26 | cd "$SCRIPT_DIR/../.." >/dev/null || exit 1 27 | pwd 28 | )" 29 | export PROJECT_DIR 30 | 31 | usage() { 32 | echo " 33 | Usage: 34 | ${0##*/} [options] 35 | 36 | Run the CI on the project, using the images. 37 | 38 | Optional arguments: 39 | -d, --debug 40 | Activate tracing/debug mode. 41 | -h, --help 42 | Display this message. 43 | 44 | Example: 45 | ${0##*/} 46 | " >&2 47 | } 48 | 49 | parse_args() { 50 | while [[ $# -gt 0 ]]; do 51 | case $1 in 52 | -d | --debug) 53 | set -x 54 | DEBUG="--debug" 55 | export DEBUG 56 | ;; 57 | -h | --help) 58 | usage 59 | exit 0 60 | ;; 61 | *) 62 | echo "[ERROR] Unknown argument: $1" >&2 63 | usage 64 | exit 1 65 | ;; 66 | esac 67 | shift 68 | done 69 | } 70 | 71 | run_ci() { 72 | image_dir="ci/images/static-checks" 73 | "$SCRIPT_DIR/run_image.sh" --quiet "$image_dir" 74 | } 75 | 76 | main() { 77 | if [ -n "${DEBUG:-}" ]; then 78 | set -x 79 | fi 80 | parse_args "$@" 81 | run_ci 82 | } 83 | 84 | if [ "${BASH_SOURCE[0]}" == "$0" ]; then 85 | main "$@" 86 | fi 87 | -------------------------------------------------------------------------------- /developer/images/dependencies-update/Dockerfile: -------------------------------------------------------------------------------- 1 | #@FROM registry.access.redhat.com/ubi9/ubi-minimal 2 | FROM registry.access.redhat.com/ubi9/ubi-minimal@sha256:a7d837b00520a32502ada85ae339e33510cdfdbc8d2ddf460cc838e12ec5fa5a 3 | LABEL build-date= \ 4 | com.redhat.build-host= \ 5 | description="This image provides the scripts to update the repository dependencies." \ 6 | distribution-scope="public" \ 7 | io.k8s.description="This image provides the scripts to update the repository dependencies." \ 8 | io.k8s.display-name="dependencies-update" \ 9 | maintainer="Pipeline Service" \ 10 | name="dependencies-update" \ 11 | release="0.1" \ 12 | summary="Provides the scripts to update the repository dependencies" \ 13 | url="https://github.com/openshift-pipelines/pipeline-service/tree/main/developer/images/dependencies-update" \ 14 | vcs-ref= \ 15 | vcs-type="git" \ 16 | vendor="Pipeline Service" \ 17 | version="0.1" 18 | WORKDIR / 19 | RUN mkdir /workspace && chmod 777 /workspace && chown 65532:65532 /workspace 20 | ENV HOME /tmp/home 21 | RUN mkdir $HOME && chmod 777 $HOME && chown 65532:65532 $HOME 22 | RUN microdnf install -y \ 23 | findutils-1:4.8.0 \ 24 | git-2.43.5 \ 25 | skopeo-2:1.14.3 \ 26 | && microdnf clean all 27 | 28 | COPY shared /tmp/image-build/shared 29 | RUN /tmp/image-build/shared/hack/install.sh --debug --bin jq \ 30 | && rm -rf /tmp/image-build 31 | 32 | COPY developer/images/dependencies-update/hack /opt/dependencies-update 33 | RUN chmod 755 /opt/dependencies-update/bin/*.sh 34 | USER 65532:65532 35 | ENV WORK_DIR /workspace 36 | VOLUME /workspace 37 | WORKDIR /workspace 38 | -------------------------------------------------------------------------------- /developer/images/dependencies-update/hack/bin/is-pr-skipped.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -o errexit 3 | set -o nounset 4 | set -o pipefail 5 | 6 | usage() { 7 | echo " 8 | Usage: 9 | ${0##*/} [options] 10 | 11 | Check if a PR is required to submit commited changes. 12 | The environment variable GITHUB_TOKEN must be set with a token that 13 | allows the PRs in the repository to be read. 14 | 15 | Mandatory arguments: 16 | -b, --target_branch 17 | Branch to open against which the PR is opened. 18 | -n, --name 19 | Repository name. 20 | -o, --onwer 21 | Repository owner. 22 | 23 | Optional arguments: 24 | -r, --result 25 | File in which to write the result. 26 | Default: /dev/stdout 27 | -d, --debug 28 | Activate tracing/debug mode. 29 | -h, --help 30 | Display this message. 31 | 32 | Example: 33 | ${0##*/} --owner openshift-pipelines --name pipeline-service --target_branch main 34 | " >&2 35 | } 36 | 37 | parse_args() { 38 | RESULT="/dev/stdout" 39 | while [[ $# -gt 0 ]]; do 40 | case $1 in 41 | -n | --name) 42 | shift 43 | REPO_NAME="$1" 44 | ;; 45 | -o | --owner) 46 | shift 47 | REPO_OWNER="$1" 48 | ;; 49 | -b | --target_branch) 50 | shift 51 | REPO_TARGET_BRANCH="$1" 52 | ;; 53 | -r | --result) 54 | shift 55 | RESULT="$1" 56 | ;; 57 | -d | --debug) 58 | set -x 59 | ;; 60 | -h | --help) 61 | usage 62 | exit 0 63 | ;; 64 | *) 65 | echo "Unknown argument: $1" 66 | usage 67 | exit 1 68 | ;; 69 | esac 70 | shift 71 | done 72 | } 73 | 74 | prechecks() { 75 | if [[ -z "${REPO_NAME:-}" ]]; then 76 | printf "\n[ERROR] Missing parameter --name" >&2 77 | exit 1 78 | fi 79 | if [[ -z "${REPO_OWNER:-}" ]]; then 80 | printf "\n[ERROR] Missing parameter --owner" >&2 81 | exit 1 82 | fi 83 | if [[ -z "${REPO_TARGET_BRANCH:-}" ]]; then 84 | printf "\n[ERROR] Missing parameter --target_branch" >&2 85 | exit 1 86 | fi 87 | } 88 | 89 | run() { 90 | prechecks 91 | git config --global --add safe.directory "$PWD" 92 | if git diff --quiet "$REPO_TARGET_BRANCH"; then 93 | # No diff 94 | printf "yes" >"$RESULT" 95 | exit 96 | fi 97 | 98 | IS_PR_CREATED=$( 99 | curl \ 100 | --data-urlencode "head=$REPO_OWNER:$REPO_TARGET_BRANCH" \ 101 | --fail \ 102 | --get \ 103 | --header "Accept: application/vnd.github+json" \ 104 | --header "Authorization: Bearer $GITHUB_TOKEN" \ 105 | --silent \ 106 | "https://api.github.com/repos/$REPO_OWNER/$REPO_NAME/pulls" | 107 | jq ". | length" 108 | ) 109 | if [ "$IS_PR_CREATED" = "0" ]; then 110 | printf "no" 111 | else 112 | # PR already opened 113 | printf "yes" 114 | fi >"$RESULT" 115 | } 116 | 117 | main() { 118 | parse_args "$@" 119 | run 120 | } 121 | 122 | if [ "${BASH_SOURCE[0]}" == "$0" ]; then 123 | main "$@" 124 | fi 125 | -------------------------------------------------------------------------------- /developer/images/dependencies-update/hack/bin/setup-local-repository.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -o errexit 3 | set -o nounset 4 | set -o pipefail 5 | 6 | usage() { 7 | echo " 8 | Usage: 9 | ${0##*/} [options] 10 | 11 | Setup the CI git environment. 12 | 13 | Optional arguments: 14 | -b, --branch 15 | Branch to checkout. 16 | Default: main. 17 | -d, --debug 18 | Activate tracing/debug mode. 19 | -h, --help 20 | Display this message. 21 | 22 | Example: 23 | ${0##*/} 24 | " >&2 25 | } 26 | 27 | parse_args() { 28 | REPO_SOURCE_BRANCH="main" 29 | while [[ $# -gt 0 ]]; do 30 | case $1 in 31 | -b | --branch) 32 | shift 33 | REPO_SOURCE_BRANCH="$1" 34 | ;; 35 | -d | --debug) 36 | set -x 37 | ;; 38 | -h | --help) 39 | usage 40 | exit 0 41 | ;; 42 | *) 43 | echo "Unknown argument: $1" 44 | usage 45 | exit 1 46 | ;; 47 | esac 48 | shift 49 | done 50 | } 51 | 52 | run() { 53 | # Setup user 54 | git config --global --add safe.directory "$PWD" 55 | git config --local user.email "pipeline-service@example.com" 56 | git config --local user.name "Pipeline Service CI Robot" 57 | 58 | # Use SSH authentication 59 | git config --replace-all remote.origin.url "$( 60 | git config --get remote.origin.url | 61 | sed -e "s|^https\?://github.com/|git@github.com:|" -e "s|\(\.git\)\?$|.git|" 62 | )" 63 | 64 | # Set the branch 65 | git checkout -b "$REPO_SOURCE_BRANCH" 66 | } 67 | 68 | main() { 69 | parse_args "$@" 70 | run 71 | } 72 | 73 | if [ "${BASH_SOURCE[0]}" == "$0" ]; then 74 | main "$@" 75 | fi 76 | -------------------------------------------------------------------------------- /developer/images/dependencies-update/hack/bin/task.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Copyright 2022 The Pipeline Service Authors. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | task_init() { 18 | if [ -z "${COMMIT_MSG:-}" ]; then 19 | echo "[ERROR] Unset variable: COMMIT_MSG" >&2 20 | exit 1 21 | elif [ -e "$COMMIT_MSG" ]; then 22 | rm -f "$COMMIT_MSG" 23 | fi 24 | if [ -z "${WORKSPACE_DIR:-}" ]; then 25 | echo "[ERROR] Unset variable: WORKSPACE_DIR" >&2 26 | exit 1 27 | fi 28 | } 29 | 30 | task_end() { 31 | # Commit_changes 32 | if ! git diff --quiet; then 33 | git add . 34 | git commit --file="$COMMIT_MSG" --quiet --signoff 35 | fi 36 | } 37 | 38 | main() { 39 | if [ -n "${DEBUG:-}" ]; then 40 | set -x 41 | fi 42 | task_init 43 | run_task 44 | task_end 45 | echo "Done" 46 | } 47 | -------------------------------------------------------------------------------- /developer/images/dependencies-update/hack/bin/tasks/update_dockerfiles_base_images_sha.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Copyright 2022 The Pipeline Service Authors. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | set -o errexit 18 | set -o nounset 19 | set -o pipefail 20 | 21 | SCRIPT_DIR="$( 22 | cd "$(dirname "$0")" >/dev/null 23 | pwd 24 | )" 25 | # shellcheck source=developer/images/dependencies-update/hack/bin/task.sh 26 | source "$SCRIPT_DIR/../task.sh" 27 | 28 | run_task() { 29 | echo "Update base images SHA in Dockerfiles" >"$COMMIT_MSG" 30 | echo >>"$COMMIT_MSG" 31 | process_dockerfiles 32 | } 33 | 34 | process_dockerfiles() { 35 | mapfile -t DOCKERFILES < <( 36 | find "$WORKSPACE_DIR" -type f -name Dockerfile | 37 | sed "s:$WORKSPACE_DIR/::" | 38 | grep --invert-match --extended-regexp "developer/exploration" | 39 | sort 40 | ) 41 | while read -r from_base_image; do 42 | process_base_image_cmd 43 | done < <(grep --no-filename --regexp "^#@FROM " "${DOCKERFILES[@]}" | sort -u) 44 | } 45 | 46 | process_base_image_cmd() { 47 | base_image_name=$(echo "$from_base_image" | sed 's:^.* ::') 48 | echo -n " - $base_image_name" 49 | get_base_image_sha 50 | echo -n "@$base_image_sha : " 51 | update_base_image_sha 52 | if git diff --quiet; then 53 | echo "No update" 54 | else 55 | echo "Updated" 56 | echo "- Update base image SHA for '$base_image_name' to '$base_image_sha'" >>"$COMMIT_MSG" 57 | fi 58 | } 59 | 60 | get_base_image_sha() { 61 | base_image_sha=$(skopeo inspect "docker://$base_image_name" --format="{{.Digest}}") 62 | } 63 | 64 | update_base_image_sha() { 65 | sed -i "s|^FROM *$base_image_name@[^ ]*|FROM $base_image_name@$base_image_sha|" "${DOCKERFILES[@]}" 66 | } 67 | 68 | if [ "${BASH_SOURCE[0]}" == "$0" ]; then 69 | main "$@" 70 | fi 71 | -------------------------------------------------------------------------------- /developer/images/dependencies-update/hack/bin/update.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Copyright 2022 The Pipeline Service Authors. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | set -o errexit 18 | set -o nounset 19 | set -o pipefail 20 | 21 | SCRIPT_DIR="$( 22 | cd "$(dirname "$0")" >/dev/null 23 | pwd 24 | )" 25 | PROJECT_DIR="$( 26 | cd "$SCRIPT_DIR/../../../../.." >/dev/null || exit 1 27 | pwd 28 | )" 29 | export PROJECT_DIR 30 | 31 | usage() { 32 | echo " 33 | Usage: 34 | ${0##*/} [options] 35 | 36 | Upgrade Pipeline Service dependencies. 37 | 38 | Optional arguments: 39 | -t, --task TASKNAME 40 | Only run the selected task. Can be repeated to run multiple tasks. 41 | TASKNAME must be in [$(echo "${DEFAULT_TASKS[@]}" | sed 's: :, :')]. 42 | Default: None 43 | -w, --workspace_dir WORKSPACE_DIR. 44 | Workspace directory. 45 | Default: $PROJECT_DIR 46 | -d, --debug 47 | Activate tracing/debug mode. 48 | -h, --help 49 | Display this message. 50 | 51 | Example: 52 | ${0##*/} --task update_dockerfiles_base_images_sha 53 | " >&2 54 | } 55 | 56 | parse_args() { 57 | mapfile -t DEFAULT_TASKS < <(find "$SCRIPT_DIR/tasks" -type f -name \*.sh -exec basename {} \; | sort | sed 's:...$::') 58 | TASKS=() 59 | WORKSPACE_DIR="$PROJECT_DIR" 60 | while [[ $# -gt 0 ]]; do 61 | case $1 in 62 | -t | --task) 63 | shift 64 | TASKS+=("$1") 65 | ;; 66 | -w | --workspace_dir) 67 | shift 68 | WORKSPACE_DIR="$1" 69 | ;; 70 | -d | --debug) 71 | set -x 72 | DEBUG="--debug" 73 | export DEBUG 74 | ;; 75 | -h | --help) 76 | usage 77 | exit 0 78 | ;; 79 | *) 80 | echo "Unknown argument: $1" 81 | usage 82 | exit 1 83 | ;; 84 | esac 85 | shift 86 | done 87 | } 88 | 89 | init() { 90 | if [ -z "${TASKS[*]}" ]; then 91 | TASKS=( "${DEFAULT_TASKS[@]}" ) 92 | fi 93 | COMMIT_MSG="${TMPDIR:-/tmp}/update_commit_msg.txt" 94 | export COMMIT_MSG 95 | export WORKSPACE_DIR 96 | cd "$WORKSPACE_DIR" 97 | git config --global --add safe.directory "$PWD" 98 | } 99 | 100 | prepare_branch(){ 101 | # Revert any change to the current branch 102 | if ! git diff --quiet; then 103 | git stash --include-untracked 104 | GIT_STASH="true" 105 | fi 106 | GIT_STASH=${GIT_STASH:-} 107 | START_COMMIT=$(git rev-parse HEAD) 108 | } 109 | 110 | show_summary(){ 111 | local updated="false" 112 | if ! git diff --quiet "$START_COMMIT"..HEAD; then 113 | updated="true" 114 | fi 115 | 116 | echo 117 | echo "[Summary]" 118 | if [ "$updated" = "true" ]; then 119 | git log --format="%B" "$START_COMMIT..HEAD" 120 | else 121 | echo "No updates" 122 | fi 123 | } 124 | 125 | revert_branch(){ 126 | echo 127 | git clean --force -x 128 | if [ -n "$GIT_STASH" ]; then 129 | git stash pop 130 | fi 131 | } 132 | 133 | main() { 134 | if [ -n "${DEBUG:-}" ]; then 135 | set -x 136 | fi 137 | parse_args "$@" 138 | init 139 | 140 | prepare_branch 141 | for task_name in "${TASKS[@]}"; do 142 | echo 143 | echo "[$task_name]" 144 | "$SCRIPT_DIR/tasks/$task_name.sh" 145 | done 146 | show_summary 147 | revert_branch 148 | } 149 | 150 | if [ "${BASH_SOURCE[0]}" == "$0" ]; then 151 | main "$@" 152 | fi 153 | -------------------------------------------------------------------------------- /developer/openshift/README.md: -------------------------------------------------------------------------------- 1 | # Pipelines Service in Openshift 2 | 3 | ## Description 4 | 5 | This script essentially does this : 6 | 7 | 1. Install OpenShift GitOps on the cluster. 8 | 1. Deploy Pipelines Service on the cluster via an ArgoCD application. 9 | 10 | ## Dependencies 11 | 12 | Before installing the prerequisites, refer [DEPENDENCIES.md](../../DEPENDENCIES.md) to verify the versions of products, operators and tools used in Pipeline Service. 13 | 14 | ### Pre-requisites 15 | 16 | Before you execute the script, you need: 17 | 18 | 1. to have an OpenShift cluster with at least 6 CPU cores and 16GB RAM. 19 | 1. to install [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/) 20 | 1. to install [oc](https://docs.openshift.com/container-platform/4.11/cli_reference/openshift_cli/getting-started-cli.html) 21 | 1. to install [argocd CLI](https://argo-cd.readthedocs.io/en/stable/cli_installation/) 22 | 1. to install [yq](https://mikefarah.gitbook.io/yq/#install) 23 | 24 | You can run the `dev_setup.sh` script with or without parameters. 25 | The `--use-current-branch` parameter should be used when testing manifests changes. 26 | 27 | The [test.sh](../../operator/test/test.sh) script runs certain examples from tektoncd repo for pipelines. You can run the below script only after `dev_setup.sh` is run and the required resources are up and running. 28 | 29 | ```bash 30 | ./test.sh --test pipelines 31 | # Runs a minimal PipelineRun 32 | # Checks that the pipelinerun is successful. 33 | 34 | ./test.sh --test chains 35 | # Simulates the creation of an image 36 | # Checks that the pipeline and image are signed. 37 | # Checks that the key to decode the signed data is available to all users. 38 | 39 | ./test.sh --test results 40 | # Checks that logs are uploaded by tekton-results. 41 | ``` 42 | 43 | ### Development - Onboarding a new component 44 | 45 | This developer environment can be used to develop/test a new component on the Pipeline Service by changing parameters in [config.yaml](./config.yaml). 46 | Considerations for testing a new component: 47 | 1. We are deploying various applications using the GitOps approach and hence a user would need to change the values of `git_url` and `git_ref` to reflect their own Git repository. 48 | 2. A user can modify the applications to be deployed on the cluster by modifying the [apps field](./config.yaml). 49 | 3. Onboarding a new component requires creating a new Argo CD application in [argo-apps](../../operator/gitops/argocd/argo-apps/) and adding it to [kustomization.yaml](../../operator/gitops/argocd/argo-apps/kustomization.yaml). 50 | 4. For testing, users need to modify only the git source path and ref of their Argo CD application to reflect their own Git repo. 51 | 52 | ### Reset 53 | 54 | One can reset its environment and all the resources deployed by dev scripts: 55 | 56 | ```bash 57 | developer/openshift/reset.sh --work-dir /path/to/my_dir 58 | ``` 59 | -------------------------------------------------------------------------------- /developer/openshift/apps/openshift-gitops.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -o errexit 3 | set -o nounset 4 | set -o pipefail 5 | 6 | install() { 7 | app="openshift-gitops" 8 | local ns="$app" 9 | 10 | ############################################################################# 11 | # Install the gitops operator 12 | ############################################################################# 13 | echo -n "- OpenShift-GitOps: " 14 | kubectl apply -k "$DEV_DIR/operators/$app" >/dev/null 15 | echo "OK" 16 | 17 | # Subscription information for potential debug 18 | mkdir -p "$WORK_DIR/logs/$app" 19 | kubectl get subscriptions $app-operator -n openshift-operators -o yaml >"$WORK_DIR/logs/$app/subscription.yaml" 20 | 21 | ############################################################################# 22 | # Wait for the URL to be available 23 | ############################################################################# 24 | echo -n "- Argo CD dashboard: " 25 | test_cmd="kubectl get route/openshift-gitops-server --ignore-not-found -n $ns -o jsonpath={.spec.host}" 26 | argocd_hostname="$(${test_cmd})" 27 | until curl --fail --insecure --output /dev/null --silent "https://$argocd_hostname"; do 28 | echo -n "." 29 | sleep 2 30 | argocd_hostname="$(${test_cmd})" 31 | done 32 | echo "OK" 33 | echo "- Argo CD URL: https://$argocd_hostname" 34 | 35 | ############################################################################# 36 | # Post install 37 | ############################################################################# 38 | # Log into Argo CD 39 | echo -n "- Argo CD Login: " 40 | local argocd_password 41 | argocd_password="$(kubectl get secret openshift-gitops-cluster -n $ns -o jsonpath="{.data.admin\.password}" | base64 --decode)" 42 | argocd login "$argocd_hostname" --grpc-web --insecure --username admin --password "$argocd_password" >/dev/null 43 | echo "OK" 44 | 45 | # Register the host cluster as pipeline-cluster 46 | local cluster_name="plnsvc" 47 | if ! argocd cluster get "$cluster_name" >/dev/null 2>&1; then 48 | echo "- Register host cluster to ArgoCD as '$cluster_name': " 49 | argocd cluster add "$(yq e ".current-context" <"$KUBECONFIG")" --name="$cluster_name" --upsert --yes >/dev/null 50 | echo " OK" 51 | else 52 | echo "- Register host cluster to ArgoCD as '$cluster_name': OK" 53 | fi 54 | } 55 | 56 | main() { 57 | if [ -n "${DEBUG:-}" ]; then 58 | set -x 59 | fi 60 | install 61 | } 62 | 63 | if [ "${BASH_SOURCE[0]}" == "$0" ]; then 64 | main "$@" 65 | fi 66 | -------------------------------------------------------------------------------- /developer/openshift/gitops/argocd/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - pipeline-service.yaml 7 | - pipeline-service-storage.yaml 8 | - pipeline-service-o11y.yaml 9 | -------------------------------------------------------------------------------- /developer/openshift/gitops/argocd/pipeline-service-o11y.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: Application 4 | metadata: 5 | name: pipeline-service-o11y 6 | namespace: openshift-gitops 7 | annotations: 8 | argocd.argoproj.io/sync-wave: "0" 9 | spec: 10 | destination: 11 | namespace: openshift-gitops 12 | server: https://kubernetes.default.svc 13 | source: 14 | path: developer/openshift/gitops/argocd/pipeline-service-o11y 15 | repoURL: https://github.com/openshift-pipelines/pipeline-service.git 16 | targetRevision: main 17 | project: default 18 | syncPolicy: 19 | # Comment this out if you want to manually trigger deployments (using the 20 | # Argo CD Web UI or Argo CD CLI), rather than automatically deploying on 21 | # every new Git commit to your directory. 22 | automated: 23 | prune: true 24 | selfHeal: true 25 | syncOptions: 26 | - CreateNamespace=true 27 | retry: 28 | limit: -1 # number of failed sync attempt retries; unlimited number of attempts if less than 0 29 | backoff: 30 | duration: 10s # the amount to back off. Default unit is seconds, but could also be a duration (e.g. "2m", "1h") 31 | factor: 2 # a factor to multiply the base duration after each failed retry 32 | maxDuration: 3m # the maximum amount of time allowed for the backoff strategy 33 | -------------------------------------------------------------------------------- /developer/openshift/gitops/argocd/pipeline-service-o11y/README.md: -------------------------------------------------------------------------------- 1 | # Pipeline Service Observability 2 | 3 | This deploys observability (o11y) components used to configure Prometheus and 4 | Grafana. It utilizes the o11y stack from redhat-appstudio, with modifications 5 | to only deploy what is needed by the Pipeline Service for GitOps-based 6 | development. 7 | 8 | ## GitOps Inner Loop Development 9 | 10 | The `dev_setup.sh` configures Prometheus and Grafana to deploy the Pipeline 11 | Service dashboards. Grafana's UI can be accessed at the 12 | `grafana-access-appstudio-grafana.apps...` route on your cluster. 13 | 14 | To iterate on your dashboard (deploying with `--use-current-branch`): 15 | 16 | 1. Navigate to the Pipeline Service dashboard in Grafana. 17 | 2. Add the panels and/or rows for your metrics as desired. 18 | 3. Click the "Share" icon to save the dashboard to JSON. You use the "Export" tab. Be sure and check "Export for sharing externally"; otherwise, the `datasource` references will have the `uid` entries specific to your cluster, which are not portable to staging or prod long term. NOTE: pipeline-service PR checks should prevent you from checking in dashboard JSON with those refs. 19 | 4. Copy the JSON into the pipeline-service-dashboard.json file, located in 20 | `operator/gitops/argocd/grafana/dashboards`. 21 | 5. Commit the updated JSON, push to your branch, and verify the dashboard is 22 | updated once ArgoCD syncs your repository. 23 | 24 | ## Components 25 | 26 | ### appstudio-prometheus 27 | 28 | This configures the Red Hat 29 | [Observability Operator](https://github.com/rhobs/observability-operator) using 30 | the same manifests used by App Studio. The operator consolidates the "cluster 31 | monitoring" and "user workload" monitoring stacks, allowing metrics to combined 32 | in a single data source view. 33 | 34 | ### appstudio-grafana 35 | 36 | This configures the [Grafana Operator](https://github.com/grafana-operator/grafana-operator) 37 | using the same manifests as App Studio. The operator deploys and manages 38 | Grafana instances, and lets dashboards be configured with custom resources and 39 | JSON stored in ConfigMaps. The deployment includes the Pipeline Service 40 | dashboard, which is referenced in-tree. 41 | -------------------------------------------------------------------------------- /developer/openshift/gitops/argocd/pipeline-service-o11y/appstudio-grafana/allow-argocd-to-manage.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: openshift-gitops-apply-grafana 6 | annotations: 7 | argocd.argoproj.io/sync-wave: "0" 8 | rules: 9 | - apiGroups: 10 | - grafana.integreatly.org 11 | resources: 12 | - grafanas 13 | - grafanadashboards 14 | - grafanadatasources 15 | verbs: 16 | - get 17 | - list 18 | - create 19 | - update 20 | - patch 21 | - delete 22 | --- 23 | apiVersion: rbac.authorization.k8s.io/v1 24 | kind: ClusterRoleBinding 25 | metadata: 26 | name: openshift-gitops-apply-grafana 27 | annotations: 28 | argocd.argoproj.io/sync-wave: "0" 29 | roleRef: 30 | apiGroup: rbac.authorization.k8s.io 31 | kind: ClusterRole 32 | name: openshift-gitops-apply-grafana 33 | subjects: 34 | - kind: ServiceAccount 35 | name: openshift-gitops-argocd-application-controller 36 | namespace: openshift-gitops 37 | --- 38 | apiVersion: rbac.authorization.k8s.io/v1 39 | kind: Role 40 | metadata: 41 | name: openshift-gitops-manage-grafana 42 | namespace: appstudio-grafana 43 | annotations: 44 | argocd.argoproj.io/sync-wave: "0" 45 | rules: 46 | - apiGroups: 47 | - "" 48 | resources: 49 | - secrets 50 | - configmaps 51 | - serviceaccounts 52 | verbs: 53 | - get 54 | - list 55 | - create 56 | - update 57 | - patch 58 | - delete 59 | - apiGroups: 60 | - route.openshift.io 61 | resources: 62 | - routes 63 | verbs: 64 | - get 65 | - list 66 | - create 67 | - update 68 | - patch 69 | - delete 70 | --- 71 | apiVersion: rbac.authorization.k8s.io/v1 72 | kind: RoleBinding 73 | metadata: 74 | name: openshift-gitops-manage-grafana 75 | namespace: appstudio-grafana 76 | annotations: 77 | argocd.argoproj.io/sync-wave: "0" 78 | roleRef: 79 | apiGroup: rbac.authorization.k8s.io 80 | kind: Role 81 | name: openshift-gitops-manage-grafana 82 | subjects: 83 | - kind: ServiceAccount 84 | name: openshift-gitops-argocd-application-controller 85 | namespace: openshift-gitops 86 | -------------------------------------------------------------------------------- /developer/openshift/gitops/argocd/pipeline-service-o11y/appstudio-grafana/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - allow-argocd-to-manage.yaml 6 | - namespace.yaml 7 | - https://raw.githubusercontent.com/redhat-appstudio/infra-deployments/main/components/monitoring/grafana/base/grafana-operator.yaml 8 | - https://raw.githubusercontent.com/redhat-appstudio/infra-deployments/main/components/monitoring/grafana/base/grafana-app.yaml 9 | - ../../../../../../operator/gitops/argocd/grafana 10 | commonAnnotations: 11 | argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true 12 | namespace: appstudio-grafana 13 | configurations: 14 | - https://raw.githubusercontent.com/redhat-appstudio/infra-deployments/main/components/monitoring/grafana/base/dashboards/cm-dashboard.yaml 15 | -------------------------------------------------------------------------------- /developer/openshift/gitops/argocd/pipeline-service-o11y/appstudio-grafana/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: appstudio-grafana 6 | annotations: 7 | argocd.argoproj.io/sync-wave: "0" 8 | spec: {} 9 | -------------------------------------------------------------------------------- /developer/openshift/gitops/argocd/pipeline-service-o11y/appstudio-prometheus/allow-argocd-to-manage.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: openshift-gitops-apply-prometheus 6 | annotations: 7 | argocd.argoproj.io/sync-wave: "0" 8 | rules: 9 | - apiGroups: 10 | - monitoring.rhobs 11 | resources: 12 | - monitoringstacks 13 | - servicemonitors 14 | verbs: 15 | - get 16 | - list 17 | - patch 18 | - create 19 | - update 20 | - delete 21 | - apiGroups: 22 | - monitoring.coreos.com 23 | resources: 24 | - prometheusrules 25 | - servicemonitors 26 | verbs: 27 | - get 28 | - list 29 | - patch 30 | - create 31 | - update 32 | - delete 33 | --- 34 | apiVersion: rbac.authorization.k8s.io/v1 35 | kind: ClusterRoleBinding 36 | metadata: 37 | name: openshift-gitops-apply-prometheus 38 | annotations: 39 | argocd.argoproj.io/sync-wave: "0" 40 | roleRef: 41 | apiGroup: rbac.authorization.k8s.io 42 | kind: ClusterRole 43 | name: openshift-gitops-apply-prometheus 44 | subjects: 45 | - kind: ServiceAccount 46 | name: openshift-gitops-argocd-application-controller 47 | namespace: openshift-gitops 48 | --- 49 | apiVersion: rbac.authorization.k8s.io/v1 50 | kind: Role 51 | metadata: 52 | name: openshift-gitops-manage-secrets 53 | namespace: dummy-service 54 | annotations: 55 | argocd.argoproj.io/sync-wave: "0" 56 | rules: 57 | - apiGroups: 58 | - "" 59 | resources: 60 | - secrets 61 | verbs: 62 | - get 63 | - list 64 | - create 65 | - update 66 | - patch 67 | - delete 68 | --- 69 | apiVersion: rbac.authorization.k8s.io/v1 70 | kind: RoleBinding 71 | metadata: 72 | name: openshift-gitops-manage-secrets 73 | namespace: dummy-service 74 | annotations: 75 | argocd.argoproj.io/sync-wave: "0" 76 | roleRef: 77 | apiGroup: rbac.authorization.k8s.io 78 | kind: Role 79 | name: openshift-gitops-manage-secrets 80 | subjects: 81 | - kind: ServiceAccount 82 | name: openshift-gitops-argocd-application-controller 83 | namespace: openshift-gitops 84 | -------------------------------------------------------------------------------- /developer/openshift/gitops/argocd/pipeline-service-o11y/appstudio-prometheus/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - allow-argocd-to-manage.yaml 6 | - https://github.com/redhat-appstudio/infra-deployments/components/monitoring/prometheus/development?ref=main 7 | -------------------------------------------------------------------------------- /developer/openshift/gitops/argocd/pipeline-service-o11y/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - appstudio-prometheus 6 | - appstudio-grafana 7 | -------------------------------------------------------------------------------- /developer/openshift/gitops/argocd/pipeline-service-storage.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: Application 4 | metadata: 5 | name: pipeline-service-storage 6 | namespace: openshift-gitops 7 | annotations: 8 | argocd.argoproj.io/sync-wave: "0" 9 | spec: 10 | destination: 11 | namespace: openshift-gitops 12 | server: https://kubernetes.default.svc 13 | source: 14 | path: developer/openshift/gitops/argocd/pipeline-service-storage 15 | repoURL: https://github.com/openshift-pipelines/pipeline-service.git 16 | targetRevision: main 17 | project: default 18 | syncPolicy: 19 | # Comment this out if you want to manually trigger deployments (using the 20 | # Argo CD Web UI or Argo CD CLI), rather than automatically deploying on 21 | # every new Git commit to your directory. 22 | automated: 23 | prune: true 24 | selfHeal: true 25 | syncOptions: 26 | - CreateNamespace=true 27 | retry: 28 | limit: -1 # number of failed sync attempt retries; unlimited number of attempts if less than 0 29 | backoff: 30 | duration: 10s # the amount to back off. Default unit is seconds, but could also be a duration (e.g. "2m", "1h") 31 | factor: 2 # a factor to multiply the base duration after each failed retry 32 | maxDuration: 3m # the maximum amount of time allowed for the backoff strategy 33 | -------------------------------------------------------------------------------- /developer/openshift/gitops/argocd/pipeline-service-storage/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - postgres.yaml 7 | - minio 8 | -------------------------------------------------------------------------------- /developer/openshift/gitops/argocd/pipeline-service-storage/minio/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - operator 7 | - tenant 8 | -------------------------------------------------------------------------------- /developer/openshift/gitops/argocd/pipeline-service-storage/minio/operator/allow-argocd-to-manage.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: openshift-minio-apply-tenants 6 | annotations: 7 | argocd.argoproj.io/sync-wave: "0" 8 | rules: 9 | - apiGroups: 10 | - minio.min.io 11 | resources: 12 | - tenants 13 | verbs: 14 | - "get" 15 | - "create" 16 | - "update" 17 | - "patch" 18 | - "delete" 19 | - apiGroups: 20 | - apps 21 | resources: 22 | - deployments 23 | verbs: 24 | - "*" 25 | - apiGroups: 26 | - "" 27 | resources: 28 | - serviceaccounts 29 | - services 30 | verbs: 31 | - create 32 | --- 33 | apiVersion: rbac.authorization.k8s.io/v1 34 | kind: ClusterRoleBinding 35 | metadata: 36 | name: openshift-minio-apply-tenants 37 | annotations: 38 | argocd.argoproj.io/sync-wave: "0" 39 | roleRef: 40 | apiGroup: rbac.authorization.k8s.io 41 | kind: ClusterRole 42 | name: openshift-minio-apply-tenants 43 | subjects: 44 | - kind: ServiceAccount 45 | name: openshift-gitops-argocd-application-controller 46 | namespace: openshift-gitops 47 | -------------------------------------------------------------------------------- /developer/openshift/gitops/argocd/pipeline-service-storage/minio/operator/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - allow-argocd-to-manage.yaml 6 | - minio.yaml 7 | -------------------------------------------------------------------------------- /developer/openshift/gitops/argocd/pipeline-service-storage/minio/operator/minio.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: operators.coreos.com/v1alpha1 3 | kind: Subscription 4 | metadata: 5 | name: minio-operator 6 | namespace: openshift-operators 7 | annotations: 8 | argocd.argoproj.io/sync-wave: "0" 9 | spec: 10 | channel: stable 11 | installPlanApproval: Automatic 12 | name: minio-operator 13 | source: certified-operators 14 | sourceNamespace: openshift-marketplace 15 | -------------------------------------------------------------------------------- /developer/openshift/gitops/argocd/pipeline-service-storage/minio/tenant/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - tenant.yaml 6 | -------------------------------------------------------------------------------- /developer/openshift/gitops/argocd/pipeline-service-storage/minio/tenant/tenant.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: minio.min.io/v2 3 | kind: Tenant 4 | metadata: 5 | name: storage 6 | namespace: tekton-results 7 | labels: 8 | app: minio 9 | annotations: 10 | prometheus.io/path: /minio/v2/metrics/cluster 11 | prometheus.io/port: "9000" 12 | prometheus.io/scrape: "true" 13 | argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true 14 | argocd.argoproj.io/sync-wave: "0" 15 | spec: 16 | exposeServices: 17 | minio: false 18 | features: 19 | bucketDNS: false 20 | domains: {} 21 | # This desired part doesn't work. Issues: 22 | # https://github.com/minio/operator/issues/1345 23 | # https://github.com/minio/operator/issues/1346 24 | # users: 25 | # - name: storage-user 26 | # buckets: 27 | # - name: "tekton-results" 28 | # region: "us-east-1" 29 | # objectLock: true 30 | certConfig: {} 31 | podManagementPolicy: Parallel 32 | configuration: 33 | name: minio-storage-configuration 34 | env: [] 35 | serviceMetadata: 36 | minioServiceLabels: {} 37 | minioServiceAnnotations: {} 38 | consoleServiceLabels: {} 39 | consoleServiceAnnotations: {} 40 | priorityClassName: "" 41 | externalCaCertSecret: [] 42 | externalCertSecret: [] 43 | externalClientCertSecrets: [] 44 | image: quay.io/minio/minio:RELEASE.2022-09-17T00-09-45Z 45 | imagePullSecret: {} 46 | mountPath: /export 47 | subPath: "" 48 | pools: 49 | - servers: 1 50 | name: pool-0 51 | volumesPerServer: 2 52 | nodeSelector: {} 53 | tolerations: [] 54 | affinity: 55 | nodeAffinity: {} 56 | podAffinity: {} 57 | podAntiAffinity: {} 58 | resources: {} 59 | volumeClaimTemplate: 60 | apiVersion: v1 61 | kind: persistentvolumeclaims 62 | metadata: {} 63 | spec: 64 | accessModes: 65 | - ReadWriteOnce 66 | resources: 67 | requests: 68 | storage: 1Gi 69 | status: {} 70 | securityContext: {} 71 | containerSecurityContext: {} 72 | requestAutoCert: true 73 | -------------------------------------------------------------------------------- /developer/openshift/gitops/argocd/pipeline-service-storage/postgres.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: Application 4 | metadata: 5 | name: postgres 6 | namespace: openshift-gitops 7 | annotations: 8 | argocd.argoproj.io/sync-wave: "0" 9 | spec: 10 | project: default 11 | destination: 12 | namespace: tekton-results 13 | server: https://kubernetes.default.svc 14 | source: 15 | chart: postgresql 16 | repoURL: https://charts.bitnami.com/bitnami 17 | targetRevision: 14.0.5 18 | helm: 19 | releaseName: postgres 20 | parameters: 21 | - name: image.tag 22 | value: 13.14.0 23 | - name: tls.enabled 24 | value: "true" 25 | - name: tls.certificatesSecret 26 | value: "postgresql-tls" 27 | - name: tls.certFilename 28 | value: "tls.crt" 29 | - name: tls.certKeyFilename 30 | value: "tls.key" 31 | # There is an unresolved issue with CA cert that stops pods from 32 | # starting due to readiness probe failure. The workaround is 33 | # discussed here along with the linked issues: 34 | # https://github.com/bitnami/charts/issues/8026 35 | # - name: tls.certCAFilename 36 | # value: "ca.crt" 37 | - name: auth.database 38 | value: "tekton_results" 39 | - name: auth.username 40 | value: "tekton" 41 | - name: auth.existingSecret 42 | value: "tekton-results-database" 43 | - name: auth.secretKeys.userPasswordKey 44 | value: "db.password" 45 | - name: auth.secretKeys.adminPasswordKey 46 | value: "db.password" 47 | - name: primary.resources.requests 48 | value: "null" 49 | - name: primary.podSecurityContext.fsGroup 50 | value: "null" 51 | - name: primary.podSecurityContext.seccompProfile.type 52 | value: RuntimeDefault 53 | - name: primary.containerSecurityContext.runAsUser 54 | value: "null" 55 | - name: primary.containerSecurityContext.allowPrivilegeEscalation 56 | value: "false" 57 | - name: primary.containerSecurityContext.runAsNonRoot 58 | value: "true" 59 | - name: primary.containerSecurityContext.seccompProfile.type 60 | value: RuntimeDefault 61 | - name: primary.containerSecurityContext.capabilities.drop[0] 62 | value: ALL 63 | - name: volumePermissions.enabled 64 | value: "false" 65 | - name: shmVolume.enabled 66 | value: "false" 67 | syncPolicy: 68 | # Comment this out if you want to manually trigger deployments (using the 69 | # Argo CD Web UI or Argo CD CLI), rather than automatically deploying on 70 | # every new Git commit to your directory. 71 | automated: 72 | prune: true 73 | selfHeal: true 74 | syncOptions: 75 | - CreateNamespace=false 76 | # workaround to make working both minio: newer and old 4.5.4 77 | - Validate=false 78 | retry: 79 | limit: -1 # number of failed sync attempt retries; unlimited number of attempts if less than 0 80 | backoff: 81 | duration: 10s # the amount to back off. Default unit is seconds, but could also be a duration (e.g. "2m", "1h") 82 | factor: 2 # a factor to multiply the base duration after each failed retry 83 | maxDuration: 3m # the maximum amount of time allowed for the backoff strategy 84 | -------------------------------------------------------------------------------- /developer/openshift/gitops/argocd/pipeline-service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: Application 4 | metadata: 5 | name: pipeline-service 6 | namespace: openshift-gitops 7 | annotations: 8 | argocd.argoproj.io/sync-wave: "0" 9 | spec: 10 | destination: 11 | namespace: openshift-gitops 12 | server: https://kubernetes.default.svc 13 | source: 14 | path: developer/openshift/gitops/argocd/pipeline-service 15 | repoURL: https://github.com/openshift-pipelines/pipeline-service.git 16 | targetRevision: main 17 | project: default 18 | syncPolicy: 19 | # Comment this out if you want to manually trigger deployments (using the 20 | # Argo CD Web UI or Argo CD CLI), rather than automatically deploying on 21 | # every new Git commit to your directory. 22 | automated: 23 | prune: true 24 | selfHeal: true 25 | syncOptions: 26 | - CreateNamespace=true 27 | retry: 28 | limit: -1 # number of failed sync attempt retries; unlimited number of attempts if less than 0 29 | backoff: 30 | duration: 10s # the amount to back off. Default unit is seconds, but could also be a duration (e.g. "2m", "1h") 31 | factor: 2 # a factor to multiply the base duration after each failed retry 32 | maxDuration: 3m # the maximum amount of time allowed for the backoff strategy 33 | -------------------------------------------------------------------------------- /developer/openshift/gitops/argocd/pipeline-service/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - ../../../../../operator/gitops/argocd/pipeline-service 7 | 8 | patches: 9 | - path: tekton-results/minio-create-bucket.yaml 10 | - path: tekton-results/minio-tls.yaml 11 | 12 | # Skip applying the Tekton operands while the Tekton operator is being installed. 13 | # See more information about this option, here: 14 | # https://argo-cd.readthedocs.io/en/stable/user-guide/sync-options/#skip-dry-run-for-new-custom-resources-types 15 | commonAnnotations: 16 | argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true 17 | -------------------------------------------------------------------------------- /developer/openshift/gitops/argocd/pipeline-service/tekton-results/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - ../../../../../../operator/gitops/argocd/pipeline-service/tekton-results 6 | 7 | patches: 8 | - path: minio-create-bucket.yaml 9 | - path: minio-tls.yaml 10 | -------------------------------------------------------------------------------- /developer/openshift/gitops/argocd/pipeline-service/tekton-results/minio-create-bucket.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: tekton-results-api 6 | namespace: tekton-results 7 | annotations: 8 | argocd.argoproj.io/sync-wave: "0" 9 | spec: 10 | template: 11 | spec: 12 | containers: 13 | - name: api 14 | initContainers: 15 | - name: mc 16 | image: >- 17 | quay.io/minio/mc:RELEASE.2023-01-28T20-29-38Z 18 | command: 19 | - /bin/bash 20 | args: 21 | - '-c' 22 | - | 23 | mc --config-dir /tmp config host add minio "$S3_ENDPOINT" "$S3_ACCESS_KEY_ID" "$S3_SECRET_ACCESS_KEY" && 24 | if [ -z "$(mc --config-dir /tmp ls minio | grep "$S3_BUCKET_NAME")" ]; then 25 | mc --config-dir /tmp mb --with-lock --region "$S3_REGION" minio/"$S3_BUCKET_NAME" && 26 | echo "Minio bucket $S3_BUCKET_NAME successfully created." 27 | fi 28 | imagePullPolicy: Always 29 | env: 30 | - name: S3_ACCESS_KEY_ID 31 | valueFrom: 32 | secretKeyRef: 33 | key: aws_access_key_id 34 | name: tekton-results-s3 35 | - name: S3_SECRET_ACCESS_KEY 36 | valueFrom: 37 | secretKeyRef: 38 | key: aws_secret_access_key 39 | name: tekton-results-s3 40 | - name: S3_REGION 41 | valueFrom: 42 | secretKeyRef: 43 | key: aws_region 44 | name: tekton-results-s3 45 | - name: S3_BUCKET_NAME 46 | valueFrom: 47 | secretKeyRef: 48 | key: bucket 49 | name: tekton-results-s3 50 | - name: S3_ENDPOINT 51 | valueFrom: 52 | secretKeyRef: 53 | key: endpoint 54 | name: tekton-results-s3 55 | volumeMounts: 56 | - name: ca-s3 57 | mountPath: /etc/ssl/certs/s3-cert.crt 58 | subPath: s3-cert.crt 59 | - name: tmp-mc-volume 60 | mountPath: /tmp 61 | resources: 62 | requests: 63 | cpu: 5m 64 | memory: 32Mi 65 | limits: 66 | cpu: 100m 67 | memory: 128Mi 68 | securityContext: 69 | readOnlyRootFilesystem: true 70 | runAsNonRoot: true 71 | volumes: 72 | - name: tmp-mc-volume 73 | emptyDir: {} 74 | -------------------------------------------------------------------------------- /developer/openshift/gitops/argocd/pipeline-service/tekton-results/minio-tls.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: tekton-results-api 6 | namespace: tekton-results 7 | annotations: 8 | argocd.argoproj.io/sync-wave: "0" 9 | spec: 10 | template: 11 | spec: 12 | volumes: 13 | - name: ca-s3 14 | secret: 15 | secretName: storage-tls 16 | items: 17 | - key: public.crt 18 | path: s3-cert.crt 19 | containers: 20 | - name: api 21 | volumeMounts: 22 | - name: ca-s3 23 | mountPath: /etc/ssl/certs/s3-cert.crt 24 | subPath: s3-cert.crt 25 | -------------------------------------------------------------------------------- /developer/openshift/gitops/local/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - tekton-results 6 | - https://github.com/owner/repository.git/path/gitops/argocd?ref=branch 7 | patches: 8 | - path: patch-pipeline-service.yaml 9 | - path: patch-pipeline-service-storage.yaml 10 | - path: patch-pipeline-service-o11y.yaml 11 | -------------------------------------------------------------------------------- /developer/openshift/gitops/local/patch-pipeline-service-o11y.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: Application 4 | metadata: 5 | name: pipeline-service-o11y 6 | namespace: openshift-gitops 7 | spec: 8 | source: 9 | repoURL: https://github.com/Roming22/pipeline-service.git 10 | targetRevision: main 11 | -------------------------------------------------------------------------------- /developer/openshift/gitops/local/patch-pipeline-service-storage.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: Application 4 | metadata: 5 | name: pipeline-service-storage 6 | namespace: openshift-gitops 7 | spec: 8 | source: 9 | repoURL: https://github.com/Roming22/pipeline-service.git 10 | targetRevision: main 11 | -------------------------------------------------------------------------------- /developer/openshift/gitops/local/patch-pipeline-service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: Application 4 | metadata: 5 | name: pipeline-service 6 | namespace: openshift-gitops 7 | spec: 8 | source: 9 | repoURL: https://github.com/Roming22/pipeline-service.git 10 | targetRevision: main 11 | -------------------------------------------------------------------------------- /developer/openshift/gitops/local/tekton-results/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - namespace.yaml 6 | - tekton-results-db-secret.yaml 7 | - tekton-results-s3-secret.yaml 8 | - tekton-results-minio-config.yaml 9 | - rds-db-cert-configmap.yaml 10 | - tekton-results-postgresql-tls-secret.yaml 11 | -------------------------------------------------------------------------------- /developer/openshift/gitops/local/tekton-results/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: tekton-results 6 | labels: 7 | argocd.argoproj.io/managed-by: openshift-gitops 8 | -------------------------------------------------------------------------------- /developer/openshift/gitops/local/tekton-results/rds-db-cert-configmap.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: rds-root-crt 6 | namespace: tekton-results 7 | data: 8 | # contents of the public certificate should be inserted here 9 | # the name of the key must be same as provided in the tekton results .env config 10 | tekton-results-db-ca.pem: 11 | -------------------------------------------------------------------------------- /developer/openshift/gitops/local/tekton-results/tekton-results-db-secret.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | data: 4 | # Host: postgres-postgresql.tekton-results.svc.cluster.local 5 | db.host: cG9zdGdyZXMtcG9zdGdyZXNxbC50ZWt0b24tcmVzdWx0cy5zdmMuY2x1c3Rlci5sb2NhbA== 6 | # Name: tekton_results 7 | db.name: dGVrdG9uX3Jlc3VsdHM= 8 | db.password: 9 | db.user: 10 | kind: Secret 11 | metadata: 12 | name: tekton-results-database 13 | namespace: tekton-results 14 | -------------------------------------------------------------------------------- /developer/openshift/gitops/local/tekton-results/tekton-results-minio-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Secret 4 | data: 5 | config.env: 6 | metadata: 7 | name: minio-storage-configuration 8 | namespace: tekton-results 9 | type: Opaque 10 | -------------------------------------------------------------------------------- /developer/openshift/gitops/local/tekton-results/tekton-results-postgresql-tls-secret.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: postgresql-tls 6 | namespace: tekton-results 7 | data: 8 | # Provide CA, TLS cert and key. CA cert is not being used until readiness 9 | # probe issue is resolved with the binami/postgresql chart 10 | ca.crt: 11 | tls.crt: 12 | tls.key: 13 | -------------------------------------------------------------------------------- /developer/openshift/gitops/local/tekton-results/tekton-results-s3-secret.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | data: 4 | aws_access_key_id: 5 | # Region: not-applicable 6 | aws_region: bm90LWFwcGxpY2FibGU= 7 | aws_secret_access_key: 8 | # Bucket: bucket=tekton-results 9 | bucket: dGVrdG9uLXJlc3VsdHM= 10 | # Endpoint: https://minio.tekton-results.svc.cluster.local 11 | endpoint: aHR0cHM6Ly9taW5pby50ZWt0b24tcmVzdWx0cy5zdmMuY2x1c3Rlci5sb2NhbA== 12 | kind: Secret 13 | metadata: 14 | name: tekton-results-s3 15 | namespace: tekton-results 16 | -------------------------------------------------------------------------------- /developer/openshift/operators/openshift-gitops/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - openshift-gitops.yaml 6 | -------------------------------------------------------------------------------- /developer/openshift/operators/openshift-gitops/openshift-gitops.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: operators.coreos.com/v1alpha1 3 | kind: Subscription 4 | metadata: 5 | name: openshift-gitops-operator 6 | namespace: openshift-operators 7 | annotations: 8 | argocd.argoproj.io/sync-wave: "0" 9 | spec: 10 | channel: latest 11 | installPlanApproval: Automatic 12 | name: openshift-gitops-operator 13 | source: redhat-operators 14 | sourceNamespace: openshift-marketplace 15 | -------------------------------------------------------------------------------- /operator/gitops/README.md: -------------------------------------------------------------------------------- 1 | # Purpose 2 | 3 | This directory contains the manifests used to automate the installation of operators and other components leveraged by Pipeline Service on the workload clusters. It provides an opinionated approach to managing their configuration based on GitOps principles. 4 | 5 | ## Why GitOps? 6 | 7 | We want to make the onboarding experience to use Pipeline Service as easy and customizable as possible. With that in mind, Pipeline Service is built around the principles of GitOps. Using kustomize, users will be able to set up, modify and update cluster resources without having to disrupt their existing setup. We provide base kustomization.yaml files to help get started, so that users can then add their customizations in the overlay/kustomization.yaml files. 8 | 9 | ## Dependencies 10 | 11 | Before installing the prerequisites, refer [DEPENDENCIES.md](../../DEPENDENCIES.md) to verify the versions of products, operators and tools used in Pipeline Service. 12 | 13 | ## Components 14 | 15 | Pipeline Service is composed of the following components, which can be deployed via `kustomize` or referenced in an ArgoCD application: 16 | 17 | - `pipeline-service` - the core components that make up the service. Deploys the following: 18 | - OpenShift Pipelines operator 19 | - Pipelines as Code 20 | - Tekton Chains 21 | - Tekton Results 22 | - Tekton Metrics Exporter 23 | - `grafana` - optional Grafana dashboard for monitoring. 24 | -------------------------------------------------------------------------------- /operator/gitops/argocd/grafana/dashboard.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: grafana.integreatly.org/v1beta1 3 | kind: GrafanaDashboard 4 | metadata: 5 | name: grafana-dashboard-pipeline-service 6 | labels: 7 | app: appstudio-grafana 8 | annotations: 9 | argocd.argoproj.io/sync-wave: "0" 10 | spec: 11 | instanceSelector: 12 | matchLabels: 13 | dashboards: "appstudio-grafana" 14 | configMapRef: 15 | name: grafana-dashboard-pipeline-service 16 | key: pipeline-service-dashboard.json 17 | -------------------------------------------------------------------------------- /operator/gitops/argocd/grafana/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Kustomization 3 | apiVersion: kustomize.config.k8s.io/v1beta1 4 | 5 | namespace: o11y 6 | 7 | configMapGenerator: 8 | - name: grafana-dashboard-pipeline-service 9 | files: 10 | - dashboards/pipeline-service-dashboard.json 11 | 12 | resources: 13 | - dashboard.yaml 14 | -------------------------------------------------------------------------------- /operator/gitops/argocd/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | resources: 3 | - pipeline-service.yaml 4 | apiVersion: kustomize.config.k8s.io/v1beta1 5 | kind: Kustomization 6 | -------------------------------------------------------------------------------- /operator/gitops/argocd/pipeline-service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: argoproj.io/v1alpha1 3 | kind: Application 4 | metadata: 5 | name: pipeline-service 6 | namespace: openshift-gitops 7 | annotations: 8 | argocd.argoproj.io/sync-wave: "0" 9 | spec: 10 | destination: 11 | namespace: openshift-gitops 12 | server: https://kubernetes.default.svc 13 | source: 14 | path: operator/gitops/argocd/pipeline-service 15 | repoURL: https://github.com/openshift-pipelines/pipeline-service.git 16 | targetRevision: main 17 | project: default 18 | syncPolicy: 19 | # Comment this out if you want to manually trigger deployments (using the 20 | # Argo CD Web UI or Argo CD CLI), rather than automatically deploying on 21 | # every new Git commit to your directory. 22 | automated: 23 | prune: true 24 | selfHeal: true 25 | syncOptions: 26 | - CreateNamespace=true 27 | retry: 28 | limit: -1 # number of failed sync attempt retries; unlimited number of attempts if less than 0 29 | backoff: 30 | duration: 10s # the amount to back off. Default unit is seconds, but could also be a duration (e.g. "2m", "1h") 31 | factor: 2 # a factor to multiply the base duration after each failed retry 32 | maxDuration: 3m # the maximum amount of time allowed for the backoff strategy 33 | -------------------------------------------------------------------------------- /operator/gitops/argocd/pipeline-service/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - openshift-pipelines 7 | - tekton-results 8 | - metrics-exporter 9 | 10 | # Skip applying the Tekton operands while the Tekton operator is being installed. 11 | # See more information about this option, here: 12 | # https://argo-cd.readthedocs.io/en/stable/user-guide/sync-options/#skip-dry-run-for-new-custom-resources-types 13 | commonAnnotations: 14 | argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true 15 | -------------------------------------------------------------------------------- /operator/gitops/argocd/pipeline-service/metrics-exporter/clusterrole.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: pipeline-service-exporter-reader 6 | annotations: 7 | argocd.argoproj.io/sync-wave: "0" 8 | rules: 9 | - apiGroups: [""] 10 | resources: ["pods", "services", "namespaces", "endpoints"] 11 | verbs: ["get", "list", "watch"] 12 | 13 | - apiGroups: 14 | - apiextensions.k8s.io 15 | resources: 16 | - customresourcedefinitions 17 | verbs: 18 | - get 19 | 20 | - apiGroups: ["tekton.dev", "resolution.tekton.dev"] 21 | resources: ["pipelineruns", "taskruns", "resolutionrequests"] 22 | verbs: ["get", "list", "watch", "patch"] 23 | 24 | - nonResourceURLs: 25 | - "/metrics" 26 | verbs: 27 | - get 28 | -------------------------------------------------------------------------------- /operator/gitops/argocd/pipeline-service/metrics-exporter/clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: pipeline-service-exporter-reader-binding 6 | annotations: 7 | argocd.argoproj.io/sync-wave: "0" 8 | roleRef: 9 | apiGroup: rbac.authorization.k8s.io 10 | kind: ClusterRole 11 | name: pipeline-service-exporter-reader 12 | subjects: 13 | - kind: ServiceAccount 14 | name: pipeline-service-exporter 15 | namespace: openshift-pipelines 16 | -------------------------------------------------------------------------------- /operator/gitops/argocd/pipeline-service/metrics-exporter/deployment.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: pipeline-metrics-exporter 6 | namespace: openshift-pipelines 7 | annotations: 8 | argocd.argoproj.io/sync-wave: "0" 9 | spec: 10 | replicas: 1 11 | selector: 12 | matchLabels: 13 | app: pipeline-metrics-exporter 14 | template: 15 | metadata: 16 | labels: 17 | app: pipeline-metrics-exporter 18 | spec: 19 | serviceAccountName: pipeline-service-exporter 20 | containers: 21 | - name: pipeline-metrics-exporter 22 | image: quay.io/konflux-ci/pipeline-service-exporter:placeholder 23 | args: 24 | [ 25 | "-pprof-address", 26 | "6060", 27 | ] 28 | ports: 29 | - containerPort: 9117 30 | name: metrics 31 | resources: 32 | requests: 33 | memory: "128Mi" 34 | cpu: "250m" 35 | limits: 36 | memory: "512Mi" 37 | cpu: "500m" 38 | securityContext: 39 | readOnlyRootFilesystem: true 40 | runAsNonRoot: true 41 | restartPolicy: Always 42 | -------------------------------------------------------------------------------- /operator/gitops/argocd/pipeline-service/metrics-exporter/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | resources: 3 | - serviceaccount.yaml 4 | - clusterrole.yaml 5 | - clusterrolebinding.yaml 6 | - deployment.yaml 7 | - service.yaml 8 | - servicemonitor.yaml 9 | 10 | images: 11 | - name: quay.io/konflux-ci/pipeline-service-exporter 12 | newName: quay.io/konflux-ci/pipeline-service-exporter 13 | newTag: 9d2439c8a77d2ce0527cc5aea3fc6561b7671b48 14 | 15 | apiVersion: kustomize.config.k8s.io/v1beta1 16 | kind: Kustomization 17 | -------------------------------------------------------------------------------- /operator/gitops/argocd/pipeline-service/metrics-exporter/service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: pipeline-metrics-exporter-service 6 | namespace: openshift-pipelines 7 | labels: 8 | app: pipeline-metrics-exporter 9 | annotations: 10 | argocd.argoproj.io/sync-wave: "0" 11 | spec: 12 | selector: 13 | app: pipeline-metrics-exporter 14 | ports: 15 | - name: metrics 16 | port: 9117 17 | targetPort: 9117 18 | protocol: TCP 19 | -------------------------------------------------------------------------------- /operator/gitops/argocd/pipeline-service/metrics-exporter/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: pipeline-service-exporter 6 | namespace: openshift-pipelines 7 | annotations: 8 | argocd.argoproj.io/sync-wave: "0" 9 | -------------------------------------------------------------------------------- /operator/gitops/argocd/pipeline-service/metrics-exporter/servicemonitor.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # for RHTAP, the pipeline-service monitor is defined in infra-deployments, but we define here in our developer folder (vs. the operator folder) to define this out of dev_setup.sh 3 | apiVersion: monitoring.coreos.com/v1 4 | kind: ServiceMonitor 5 | metadata: 6 | name: pipeline-service 7 | namespace: openshift-pipelines 8 | annotations: 9 | argocd.argoproj.io/sync-wave: "0" 10 | spec: 11 | jobLabel: app 12 | namespaceSelector: 13 | matchNames: 14 | - openshift-pipelines 15 | endpoints: 16 | - path: /metrics 17 | port: metrics 18 | interval: 15s 19 | scheme: http 20 | honorLabels: true 21 | selector: 22 | matchLabels: 23 | app: pipeline-metrics-exporter 24 | -------------------------------------------------------------------------------- /operator/gitops/argocd/pipeline-service/openshift-pipelines/allow-argocd-to-manage-jobs.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: openshift-gitops-jobs-admin 6 | annotations: 7 | argocd.argoproj.io/sync-wave: "0" 8 | rules: 9 | - apiGroups: 10 | - batch 11 | resources: 12 | - jobs 13 | verbs: 14 | - get 15 | - list 16 | - patch 17 | - create 18 | - delete 19 | --- 20 | apiVersion: rbac.authorization.k8s.io/v1 21 | kind: ClusterRoleBinding 22 | metadata: 23 | name: openshift-gitops-jobs-admin 24 | annotations: 25 | argocd.argoproj.io/sync-wave: "0" 26 | roleRef: 27 | apiGroup: rbac.authorization.k8s.io 28 | kind: ClusterRole 29 | name: openshift-gitops-jobs-admin 30 | subjects: 31 | - kind: ServiceAccount 32 | name: openshift-gitops-argocd-application-controller 33 | namespace: openshift-gitops 34 | -------------------------------------------------------------------------------- /operator/gitops/argocd/pipeline-service/openshift-pipelines/allow-argocd-to-manage.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: openshift-gitops-apply-tekton-config-parameters 6 | annotations: 7 | argocd.argoproj.io/sync-wave: "0" 8 | rules: 9 | - apiGroups: 10 | - monitoring.coreos.com 11 | resources: 12 | - servicemonitors 13 | verbs: 14 | - get 15 | - list 16 | - patch 17 | - create 18 | - delete 19 | - apiGroups: 20 | - operator.tekton.dev 21 | resources: 22 | - tektonconfigs 23 | verbs: 24 | - get 25 | - list 26 | - patch 27 | - create 28 | - delete 29 | - apiGroups: 30 | - security.openshift.io 31 | resources: 32 | - securitycontextconstraints 33 | verbs: 34 | - get 35 | - list 36 | - patch 37 | - create 38 | - delete 39 | --- 40 | apiVersion: rbac.authorization.k8s.io/v1 41 | kind: ClusterRoleBinding 42 | metadata: 43 | name: openshift-gitops-apply-tekton-config-parameters 44 | annotations: 45 | argocd.argoproj.io/sync-wave: "0" 46 | roleRef: 47 | apiGroup: rbac.authorization.k8s.io 48 | kind: ClusterRole 49 | name: openshift-gitops-apply-tekton-config-parameters 50 | subjects: 51 | - kind: ServiceAccount 52 | name: openshift-gitops-argocd-application-controller 53 | namespace: openshift-gitops 54 | -------------------------------------------------------------------------------- /operator/gitops/argocd/pipeline-service/openshift-pipelines/appstudio-pipelines-scc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: SecurityContextConstraints 3 | metadata: 4 | name: appstudio-pipelines-scc 5 | annotations: 6 | argocd.argoproj.io/sync-wave: "0" 7 | allowHostDirVolumePlugin: false 8 | allowHostIPC: false 9 | allowHostNetwork: false 10 | allowHostPID: false 11 | allowHostPorts: false 12 | allowPrivilegeEscalation: false 13 | allowPrivilegedContainer: false 14 | allowedCapabilities: 15 | - SETFCAP 16 | apiVersion: security.openshift.io/v1 17 | defaultAddCapabilities: null 18 | fsGroup: 19 | type: MustRunAs 20 | groups: 21 | - system:cluster-admins 22 | priority: 10 23 | readOnlyRootFilesystem: false 24 | requiredDropCapabilities: 25 | - MKNOD 26 | runAsUser: 27 | type: RunAsAny 28 | seLinuxContext: 29 | type: MustRunAs 30 | supplementalGroups: 31 | type: RunAsAny 32 | users: [] 33 | volumes: 34 | - configMap 35 | - downwardAPI 36 | - emptyDir 37 | - persistentVolumeClaim 38 | - projected 39 | - secret 40 | -------------------------------------------------------------------------------- /operator/gitops/argocd/pipeline-service/openshift-pipelines/chains-observability-service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: tekton-chains 6 | namespace: openshift-pipelines 7 | annotations: 8 | argocd.argoproj.io/sync-wave: "0" 9 | ignore-check.kube-linter.io/dangling-service: This service is not dangling, it exposes metric for an OSP deployment 10 | labels: 11 | app.kubernetes.io/part-of: tekton-chains 12 | app.kubernetes.io/component: metrics 13 | app: tekton-chains-controller 14 | spec: 15 | ports: 16 | - name: metrics 17 | port: 9090 18 | protocol: TCP 19 | targetPort: 9090 20 | selector: 21 | app.kubernetes.io/instance: default 22 | app.kubernetes.io/part-of: tekton-chains 23 | app.kubernetes.io/component: controller 24 | -------------------------------------------------------------------------------- /operator/gitops/argocd/pipeline-service/openshift-pipelines/chains-public-key-viewer.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: tekton-chains-public-key-viewer 6 | annotations: 7 | argocd.argoproj.io/sync-wave: "0" 8 | rules: 9 | - apiGroups: 10 | - "" 11 | resources: 12 | - secrets 13 | resourceNames: 14 | - public-key 15 | verbs: 16 | - get 17 | - list 18 | - watch 19 | --- 20 | # public-key access 21 | apiVersion: rbac.authorization.k8s.io/v1 22 | kind: RoleBinding 23 | metadata: 24 | name: tekton-chains-public-key-viewer 25 | namespace: openshift-pipelines 26 | annotations: 27 | argocd.argoproj.io/sync-wave: "0" 28 | roleRef: 29 | apiGroup: rbac.authorization.k8s.io 30 | kind: ClusterRole 31 | name: tekton-chains-public-key-viewer 32 | subjects: 33 | - apiGroup: rbac.authorization.k8s.io 34 | kind: Group 35 | name: system:authenticated 36 | -------------------------------------------------------------------------------- /operator/gitops/argocd/pipeline-service/openshift-pipelines/chains-service-monitor.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: ServiceMonitor 4 | metadata: 5 | name: tekton-chains-controller 6 | namespace: openshift-pipelines 7 | annotations: 8 | argocd.argoproj.io/sync-wave: "1" 9 | spec: 10 | jobLabel: "app.kubernetes.io/name" 11 | endpoints: 12 | - path: /metrics 13 | port: metrics 14 | interval: 15s 15 | scheme: http 16 | honorLabels: true 17 | namespaceSelector: 18 | matchNames: 19 | - openshift-pipelines 20 | selector: 21 | matchLabels: 22 | app.kubernetes.io/part-of: tekton-chains 23 | app.kubernetes.io/component: metrics 24 | app: tekton-chains-controller 25 | targetLabels: 26 | - app 27 | - app.kubernetes.io/component 28 | - app.kubernetes.io/part-of 29 | -------------------------------------------------------------------------------- /operator/gitops/argocd/pipeline-service/openshift-pipelines/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - allow-argocd-to-manage.yaml 6 | - allow-argocd-to-manage-jobs.yaml 7 | - appstudio-pipelines-scc.yaml 8 | - openshift-operator.yaml 9 | - tekton-config.yaml 10 | - chains-service-monitor.yaml 11 | - bugfix-pac-gitauth-secrets.yaml 12 | # Manually add ConfigMap and Service until PLNSRVCE-1359 is fixed 13 | - chains-observability-service.yaml 14 | - chains-public-key-viewer.yaml 15 | - chains-secrets-config.yaml 16 | - namespace.yaml 17 | - osp-nightly-catalog-source.yaml 18 | -------------------------------------------------------------------------------- /operator/gitops/argocd/pipeline-service/openshift-pipelines/namespace.yaml: -------------------------------------------------------------------------------- 1 | # Solves https://issues.redhat.com/browse/PLNSRVCE-1620 2 | --- 3 | apiVersion: v1 4 | kind: Namespace 5 | metadata: 6 | name: openshift-pipelines 7 | labels: 8 | argocd.argoproj.io/managed-by: openshift-gitops 9 | annotations: 10 | argocd.argoproj.io/sync-wave: "-1" 11 | -------------------------------------------------------------------------------- /operator/gitops/argocd/pipeline-service/openshift-pipelines/openshift-operator.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: operators.coreos.com/v1alpha1 3 | kind: Subscription 4 | metadata: 5 | name: openshift-pipelines-operator 6 | namespace: openshift-operators 7 | annotations: 8 | argocd.argoproj.io/sync-wave: "0" 9 | spec: 10 | channel: latest 11 | name: openshift-pipelines-operator-rh 12 | source: custom-operators 13 | sourceNamespace: openshift-marketplace 14 | -------------------------------------------------------------------------------- /operator/gitops/argocd/pipeline-service/openshift-pipelines/osp-nightly-catalog-source.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: operators.coreos.com/v1alpha1 3 | kind: CatalogSource 4 | metadata: 5 | name: custom-operators 6 | namespace: openshift-marketplace 7 | annotations: 8 | argocd.argoproj.io/sync-wave: "0" 9 | spec: 10 | sourceType: grpc 11 | image: quay.io/openshift-pipeline/openshift-pipelines-pipelines-operator-bundle-container-index@sha256:99d1e1ba1c24d950db7147e26041193304247ed92e88788023b58eb787282a9a 12 | displayName: custom-operators 13 | updateStrategy: 14 | registryPoll: 15 | interval: 30m 16 | -------------------------------------------------------------------------------- /operator/gitops/argocd/pipeline-service/tekton-results/README.md: -------------------------------------------------------------------------------- 1 | # Tekton Results 2 | 3 | Tekton Results aims to help users logically group CI/CD workload history and separate out long term result storage away from the Pipeline controller. This allows you to: 4 | 5 | - Provide custom Result Metadata about your CI/CD workflows that aren't available in the Tekton TaskRun and PipelineRun CRDs (such as post-run actions). 6 | - Group related workloads together (e.g., bundle related TaskRuns and PipelineRuns into a single unit). 7 | - Separate long-term result history from the Pipeline CRD controller, freeing up etcd resources for Run execution. 8 | 9 | ## Description 10 | 11 | This will install Tekton results on the cluster to gather Tekton PipelineRun and TaskRun results (status and logs) for long term storage. 12 | Installation is based on the [installation instructions](https://github.com/tektoncd/results/blob/main/docs/install.md) from upstream Tekton results. The image is built using the [downstream](https://github.com/openshift-pipelines/tektoncd-results) fork of Tekton Results and stored in and , and referenced using the latest commit hash on the downstream repo. 13 | In a [dev environment](../../../../developer/README.md), a PostgreSQL database is installed so that results can be stored on the cluster. Otherwise, Tekton results will use a configurable external database, such as Amazon's RDS. 14 | More information can be found [here](https://github.com/tektoncd/results#readme) 15 | -------------------------------------------------------------------------------- /operator/gitops/argocd/pipeline-service/tekton-results/api-db-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: tekton-results-api 6 | annotations: 7 | argocd.argoproj.io/sync-wave: "0" 8 | spec: 9 | template: 10 | spec: 11 | containers: 12 | - name: api 13 | env: 14 | - name: DB_USER 15 | valueFrom: 16 | secretKeyRef: 17 | name: tekton-results-database 18 | key: db.user 19 | - name: DB_PASSWORD 20 | valueFrom: 21 | secretKeyRef: 22 | name: tekton-results-database 23 | key: db.password 24 | - name: DB_HOST 25 | value: 26 | valueFrom: 27 | secretKeyRef: 28 | name: tekton-results-database 29 | key: db.host 30 | - name: DB_NAME 31 | value: 32 | valueFrom: 33 | secretKeyRef: 34 | name: tekton-results-database 35 | key: db.name 36 | volumeMounts: 37 | - name: db-tls-ca 38 | mountPath: /etc/tls/db 39 | readOnly: true 40 | volumes: 41 | - name: db-tls-ca 42 | configMap: 43 | name: rds-root-crt 44 | -------------------------------------------------------------------------------- /operator/gitops/argocd/pipeline-service/tekton-results/api-kube-rbac-proxy.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: tekton-results-api 6 | spec: 7 | template: 8 | spec: 9 | containers: 10 | - name: kube-rbac-proxy 11 | image: registry.redhat.io/openshift4/ose-kube-rbac-proxy:v4.12 12 | args: 13 | - "--secure-listen-address=0.0.0.0:9443" 14 | - "--upstream=http://127.0.0.1:9090/" 15 | - "--logtostderr=true" 16 | - "--v=6" 17 | securityContext: 18 | allowPrivilegeEscalation: false 19 | readOnlyRootFilesystem: true 20 | runAsNonRoot: true 21 | seccompProfile: 22 | type: RuntimeDefault 23 | capabilities: 24 | drop: 25 | - ALL 26 | ports: 27 | - containerPort: 9443 28 | protocol: TCP 29 | name: metrics 30 | resources: 31 | limits: 32 | cpu: 500m 33 | memory: 128Mi 34 | requests: 35 | cpu: 5m 36 | memory: 64Mi 37 | -------------------------------------------------------------------------------- /operator/gitops/argocd/pipeline-service/tekton-results/api-route.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Route 3 | apiVersion: route.openshift.io/v1 4 | metadata: 5 | name: tekton-results 6 | labels: 7 | app.kubernetes.io/part-of: tekton-results 8 | annotations: 9 | openshift.io/host.generated: "true" 10 | haproxy.router.openshift.io/hsts_header: "max-age=63072000" 11 | # https://issues.redhat.com/browse/OCPBUGS-16375 12 | router.openshift.io/haproxy.health.check.interval: "86400s" 13 | haproxy.router.openshift.io/timeout: "86410s" 14 | argocd.argoproj.io/sync-wave: "0" 15 | spec: 16 | to: 17 | kind: Service 18 | name: tekton-results-api-service 19 | weight: 100 20 | port: 21 | # tekton-results now supports both REST and gRPC traffic on same port 22 | targetPort: server 23 | tls: 24 | termination: reencrypt 25 | insecureEdgeTerminationPolicy: Redirect 26 | wildcardPolicy: None 27 | -------------------------------------------------------------------------------- /operator/gitops/argocd/pipeline-service/tekton-results/api-s3-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: tekton-results-api 6 | annotations: 7 | argocd.argoproj.io/sync-wave: "0" 8 | spec: 9 | template: 10 | spec: 11 | containers: 12 | - name: api 13 | env: 14 | - name: LOGS_API 15 | value: "true" 16 | - name: LOGS_TYPE 17 | value: S3 18 | - name: S3_HOSTNAME_IMMUTABLE 19 | value: "true" 20 | - name: S3_ACCESS_KEY_ID 21 | valueFrom: 22 | secretKeyRef: 23 | key: aws_access_key_id 24 | name: tekton-results-s3 25 | - name: S3_SECRET_ACCESS_KEY 26 | valueFrom: 27 | secretKeyRef: 28 | key: aws_secret_access_key 29 | name: tekton-results-s3 30 | - name: S3_REGION 31 | valueFrom: 32 | secretKeyRef: 33 | key: aws_region 34 | name: tekton-results-s3 35 | - name: S3_BUCKET_NAME 36 | valueFrom: 37 | secretKeyRef: 38 | key: bucket 39 | name: tekton-results-s3 40 | - name: S3_ENDPOINT 41 | valueFrom: 42 | secretKeyRef: 43 | key: endpoint 44 | name: tekton-results-s3 45 | resources: 46 | requests: 47 | cpu: 100m 48 | memory: 512Mi 49 | limits: 50 | cpu: 100m 51 | memory: 512Mi 52 | securityContext: 53 | readOnlyRootFilesystem: true 54 | -------------------------------------------------------------------------------- /operator/gitops/argocd/pipeline-service/tekton-results/api-service-patch.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - op: replace 3 | path: /spec/ports/1/name 4 | value: metrics 5 | - op: replace 6 | path: /spec/ports/1/port 7 | value: 9443 8 | - op: replace 9 | path: /spec/ports/1/targetPort 10 | value: metrics 11 | -------------------------------------------------------------------------------- /operator/gitops/argocd/pipeline-service/tekton-results/api-service-sync.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: api-service 6 | annotations: 7 | argocd.argoproj.io/sync-wave: "1" 8 | -------------------------------------------------------------------------------- /operator/gitops/argocd/pipeline-service/tekton-results/api-service-tls.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: api-service 6 | annotations: 7 | service.beta.openshift.io/serving-cert-secret-name: tekton-results-tls 8 | argocd.argoproj.io/sync-wave: "0" 9 | -------------------------------------------------------------------------------- /operator/gitops/argocd/pipeline-service/tekton-results/api-sync.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: tekton-results-api 6 | annotations: 7 | argocd.argoproj.io/sync-wave: "1" 8 | -------------------------------------------------------------------------------- /operator/gitops/argocd/pipeline-service/tekton-results/config.env: -------------------------------------------------------------------------------- 1 | DB_USER= 2 | DB_PASSWORD= 3 | DB_HOST= 4 | DB_PORT=5432 5 | DB_NAME= 6 | DB_SSLMODE=verify-full 7 | DB_SSLROOTCERT=/etc/tls/db/tekton-results-db-ca.pem 8 | DB_ENABLE_AUTO_MIGRATION=true 9 | SERVER_PORT=8080 10 | PROMETHEUS_PORT=9090 11 | PROMETHEUS_HISTOGRAM=true 12 | TLS_PATH=/etc/tls 13 | AUTH_DISABLE=false 14 | AUTH_IMPERSONATE=true 15 | LOG_LEVEL=info 16 | LOGS_API=false 17 | LOGS_TYPE=File 18 | LOGS_BUFFER_SIZE=5242880 19 | LOGS_PATH=/logs 20 | S3_BUCKET_NAME= 21 | S3_ENDPOINT= 22 | S3_HOSTNAME_IMMUTABLE=false 23 | S3_REGION= 24 | S3_ACCESS_KEY_ID= 25 | S3_SECRET_ACCESS_KEY= 26 | S3_MULTI_PART_SIZE=5242880 27 | GCS_BUCKET_NAME= 28 | STORAGE_EMULATOR_HOST= 29 | -------------------------------------------------------------------------------- /operator/gitops/argocd/pipeline-service/tekton-results/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | namespace: tekton-results 5 | resources: 6 | - https://github.com/openshift-pipelines/tektoncd-results.git/config/overlays/base-only/?ref=e35af9274c0df84386b73aae8df0ad496ad175df 7 | - namespace.yaml 8 | - api-route.yaml 9 | - watcher-logging-rbac.yaml 10 | - service-monitor.yaml 11 | - watcher-rbac.yaml 12 | 13 | images: 14 | - name: ko://github.com/tektoncd/results/cmd/api 15 | newName: quay.io/konflux-ci/tekton-results-api 16 | newTag: e35af9274c0df84386b73aae8df0ad496ad175df 17 | - name: ko://github.com/tektoncd/results/cmd/watcher 18 | newName: quay.io/konflux-ci/tekton-results-watcher 19 | newTag: e35af9274c0df84386b73aae8df0ad496ad175df 20 | 21 | # generate a new configmap with updated values (logs api, db ssl mode) and replace the default one 22 | configMapGenerator: 23 | - behavior: replace 24 | files: 25 | - config.env 26 | name: api-config 27 | options: 28 | disableNameSuffixHash: true 29 | 30 | patches: 31 | - path: api-db-config.yaml 32 | - path: api-s3-config.yaml 33 | - path: api-sync.yaml 34 | - path: api-service-sync.yaml 35 | - path: api-service-tls.yaml 36 | - path: watcher-config.yaml 37 | - path: watcher-logging.yaml 38 | - path: watcher-sync.yaml 39 | - path: watcher-service-sync.yaml 40 | - path: api-kube-rbac-proxy.yaml 41 | - path: watcher-kube-rbac-proxy.yaml 42 | - path: watcher-service-patch.yaml 43 | target: 44 | version: v1 45 | kind: Service 46 | name: tekton-results-watcher 47 | labelSelector: "app.kubernetes.io/name=tekton-results-watcher" 48 | - path: api-service-patch.yaml 49 | target: 50 | version: v1 51 | kind: Service 52 | name: tekton-results-api-service 53 | labelSelector: "app.kubernetes.io/name=tekton-results-api" 54 | -------------------------------------------------------------------------------- /operator/gitops/argocd/pipeline-service/tekton-results/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: tekton-results 6 | labels: 7 | argocd.argoproj.io/managed-by: openshift-gitops 8 | annotations: 9 | # Solves https://issues.redhat.com/browse/PLNSRVCE-1620 10 | argocd.argoproj.io/sync-wave: "-1" 11 | -------------------------------------------------------------------------------- /operator/gitops/argocd/pipeline-service/tekton-results/service-monitor.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: metrics-reader 6 | annotations: 7 | argocd.argoproj.io/sync-wave: "0" 8 | --- 9 | apiVersion: v1 10 | kind: Secret 11 | metadata: 12 | name: metrics-reader 13 | annotations: 14 | kubernetes.io/service-account.name: metrics-reader 15 | argocd.argoproj.io/sync-wave: "0" 16 | type: kubernetes.io/service-account-token 17 | --- 18 | apiVersion: rbac.authorization.k8s.io/v1 19 | kind: ClusterRole 20 | metadata: 21 | name: tekton-results-service-metrics-reader 22 | annotations: 23 | argocd.argoproj.io/sync-wave: "0" 24 | rules: 25 | - nonResourceURLs: 26 | - /metrics 27 | verbs: 28 | - get 29 | --- 30 | apiVersion: rbac.authorization.k8s.io/v1 31 | kind: ClusterRoleBinding 32 | metadata: 33 | name: prometheus-tekton-results-service-metrics-reader 34 | annotations: 35 | argocd.argoproj.io/sync-wave: "0" 36 | roleRef: 37 | apiGroup: rbac.authorization.k8s.io 38 | kind: ClusterRole 39 | name: tekton-results-service-metrics-reader 40 | subjects: 41 | - kind: ServiceAccount 42 | name: metrics-reader 43 | --- 44 | apiVersion: monitoring.coreos.com/v1 45 | kind: ServiceMonitor 46 | metadata: 47 | name: tekton-results-api 48 | annotations: 49 | argocd.argoproj.io/sync-wave: "0" 50 | spec: 51 | jobLabel: "app.kubernetes.io/name" 52 | endpoints: 53 | - path: /metrics 54 | port: metrics 55 | scheme: https 56 | bearerTokenSecret: 57 | name: "metrics-reader" 58 | key: token 59 | tlsConfig: 60 | insecureSkipVerify: true 61 | selector: 62 | matchLabels: 63 | app.kubernetes.io/name: "tekton-results-api" 64 | --- 65 | apiVersion: monitoring.coreos.com/v1 66 | kind: ServiceMonitor 67 | metadata: 68 | name: tekton-results-watcher 69 | annotations: 70 | argocd.argoproj.io/sync-wave: "0" 71 | spec: 72 | endpoints: 73 | - path: /metrics 74 | port: watchermetrics 75 | scheme: https 76 | bearerTokenSecret: 77 | name: "metrics-reader" 78 | key: token 79 | tlsConfig: 80 | insecureSkipVerify: true 81 | selector: 82 | matchLabels: 83 | app.kubernetes.io/name: tekton-results-watcher 84 | -------------------------------------------------------------------------------- /operator/gitops/argocd/pipeline-service/tekton-results/watcher-config.yaml: -------------------------------------------------------------------------------- 1 | # Adjusting args to tekton-results namespace 2 | --- 3 | apiVersion: apps/v1 4 | kind: Deployment 5 | metadata: 6 | name: tekton-results-watcher 7 | annotations: 8 | argocd.argoproj.io/sync-wave: "0" 9 | spec: 10 | replicas: 1 11 | template: 12 | spec: 13 | affinity: 14 | nodeAffinity: 15 | requiredDuringSchedulingIgnoredDuringExecution: 16 | nodeSelectorTerms: 17 | - matchExpressions: 18 | - key: kubernetes.io/os 19 | operator: NotIn 20 | values: 21 | - windows 22 | podAntiAffinity: 23 | preferredDuringSchedulingIgnoredDuringExecution: 24 | - podAffinityTerm: 25 | labelSelector: 26 | matchLabels: 27 | app.kubernetes.io/name: tekton-results-watcher 28 | topologyKey: kubernetes.io/hostname 29 | weight: 100 30 | containers: 31 | - name: watcher 32 | args: 33 | [ 34 | "-api_addr", 35 | "tekton-results-api-service.tekton-results.svc.cluster.local:8080", 36 | "-auth_mode", 37 | "token", 38 | "-check_owner=false", 39 | "-completed_run_grace_period", 40 | "10m", 41 | ] 42 | resources: 43 | requests: 44 | cpu: 250m 45 | memory: 2Gi 46 | limits: 47 | cpu: 250m 48 | memory: 2Gi 49 | securityContext: 50 | readOnlyRootFilesystem: true 51 | -------------------------------------------------------------------------------- /operator/gitops/argocd/pipeline-service/tekton-results/watcher-kube-rbac-proxy.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: tekton-results-watcher 6 | spec: 7 | template: 8 | spec: 9 | containers: 10 | - name: kube-rbac-proxy 11 | image: registry.redhat.io/openshift4/ose-kube-rbac-proxy:v4.12 12 | args: 13 | - "--secure-listen-address=0.0.0.0:8443" 14 | - "--upstream=http://127.0.0.1:9090/" 15 | - "--logtostderr=true" 16 | - "--v=6" 17 | securityContext: 18 | allowPrivilegeEscalation: false 19 | readOnlyRootFilesystem: true 20 | runAsNonRoot: true 21 | seccompProfile: 22 | type: RuntimeDefault 23 | capabilities: 24 | drop: 25 | - ALL 26 | ports: 27 | - containerPort: 8443 28 | protocol: TCP 29 | name: watchermetrics 30 | resources: 31 | limits: 32 | cpu: 500m 33 | memory: 128Mi 34 | requests: 35 | cpu: 5m 36 | memory: 64Mi 37 | -------------------------------------------------------------------------------- /operator/gitops/argocd/pipeline-service/tekton-results/watcher-logging-rbac.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: tekton-results-watcher-logs 6 | annotations: 7 | argocd.argoproj.io/sync-wave: "0" 8 | subjects: 9 | - kind: ServiceAccount 10 | name: tekton-results-watcher 11 | roleRef: 12 | apiGroup: rbac.authorization.k8s.io 13 | kind: ClusterRole 14 | name: tekton-results-admin 15 | -------------------------------------------------------------------------------- /operator/gitops/argocd/pipeline-service/tekton-results/watcher-logging.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: tekton-results-config-logging 6 | annotations: 7 | argocd.argoproj.io/sync-wave: "0" 8 | data: 9 | # Adjust zap-logger config according to ADR 6 10 | zap-logger-config: | 11 | { 12 | "level": "info", 13 | "development": false, 14 | "outputPaths": ["stdout"], 15 | "errorOutputPaths": ["stderr"], 16 | "encoding": "json", 17 | "encoderConfig": { 18 | "timeKey": "ts", 19 | "levelKey": "level", 20 | "nameKey": "logger", 21 | "callerKey": "caller", 22 | "messageKey": "msg", 23 | "stacktraceKey": "stacktrace", 24 | "lineEnding": "", 25 | "levelEncoder": "", 26 | "timeEncoder": "iso8601", 27 | "durationEncoder": "string", 28 | "callerEncoder": "" 29 | } 30 | } 31 | # Adjust logging level of the watcher and controller 32 | loglevel.watcher: info 33 | loglevel.controller: info 34 | -------------------------------------------------------------------------------- /operator/gitops/argocd/pipeline-service/tekton-results/watcher-rbac.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: tekton-results-watcher-rbac 6 | annotations: 7 | argocd.argoproj.io/sync-wave: "0" 8 | rules: 9 | # Watcher needs to be able to verify incoming auth tokens. 10 | - apiGroups: ["authentication.k8s.io"] 11 | resources: ["tokenreviews"] 12 | verbs: ["create"] 13 | # Watcher needs to be able to use RBAC to verify user authorization. 14 | - apiGroups: ["authorization.k8s.io"] 15 | resources: ["subjectaccessreviews"] 16 | verbs: ["create"] 17 | 18 | --- 19 | apiVersion: rbac.authorization.k8s.io/v1 20 | kind: ClusterRoleBinding 21 | metadata: 22 | name: tekton-results-watcher-rbac 23 | annotations: 24 | argocd.argoproj.io/sync-wave: "0" 25 | subjects: 26 | - kind: ServiceAccount 27 | name: watcher 28 | roleRef: 29 | apiGroup: rbac.authorization.k8s.io 30 | kind: ClusterRole 31 | name: tekton-results-watcher-rbac 32 | -------------------------------------------------------------------------------- /operator/gitops/argocd/pipeline-service/tekton-results/watcher-service-patch.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - op: replace 3 | path: /spec/ports/0/name 4 | value: watchermetrics 5 | - op: replace 6 | path: /spec/ports/0/port 7 | value: 8443 8 | - op: add 9 | path: /spec/ports/0/targetPort 10 | value: watchermetrics 11 | -------------------------------------------------------------------------------- /operator/gitops/argocd/pipeline-service/tekton-results/watcher-service-sync.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: tekton-results-watcher 6 | annotations: 7 | argocd.argoproj.io/sync-wave: "2" 8 | -------------------------------------------------------------------------------- /operator/gitops/argocd/pipeline-service/tekton-results/watcher-sync.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: tekton-results-watcher 6 | annotations: 7 | argocd.argoproj.io/sync-wave: "2" 8 | -------------------------------------------------------------------------------- /operator/gitops/compute/pipeline-service-manager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | resources: 3 | - namespace.yaml 4 | - pipeline-service-manager.yaml 5 | - role.yaml 6 | - rolebinding.yaml 7 | -------------------------------------------------------------------------------- /operator/gitops/compute/pipeline-service-manager/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: pipeline-service 6 | -------------------------------------------------------------------------------- /operator/gitops/compute/pipeline-service-manager/pipeline-service-manager.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: pipeline-service-manager 6 | namespace: pipeline-service 7 | --- 8 | apiVersion: v1 9 | kind: Secret 10 | type: kubernetes.io/service-account-token 11 | metadata: 12 | name: pipeline-service-manager-token 13 | namespace: pipeline-service 14 | annotations: 15 | kubernetes.io/service-account.name: pipeline-service-manager 16 | -------------------------------------------------------------------------------- /operator/gitops/compute/pipeline-service-manager/role.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: pipeline-service-admin 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - pods 11 | - pods/logs 12 | - pods/log 13 | verbs: 14 | - get 15 | - list 16 | - watch 17 | - apiGroups: 18 | - "" 19 | resources: 20 | - namespaces 21 | verbs: 22 | - get 23 | - list 24 | - watch 25 | - create 26 | - patch 27 | - delete 28 | - apiGroups: 29 | - "" 30 | resources: 31 | - configmaps 32 | - secrets 33 | - serviceaccounts 34 | - services 35 | verbs: 36 | - "*" 37 | - apiGroups: 38 | - apps 39 | resources: 40 | - deployments 41 | - statefulsets 42 | verbs: 43 | - "*" 44 | - apiGroups: 45 | - apiextensions.k8s.io 46 | resources: 47 | - customresourcedefinitions 48 | verbs: 49 | - get 50 | - list 51 | - watch 52 | - apiGroups: 53 | - networking.k8s.io 54 | resources: 55 | - ingresses 56 | - networkpolicies 57 | verbs: 58 | - "*" 59 | - apiGroups: 60 | - batch 61 | resources: 62 | - jobs 63 | verbs: 64 | - get 65 | - list 66 | - watch 67 | - create 68 | - update 69 | - patch 70 | - delete 71 | - deletecollection 72 | - apiGroups: 73 | - rbac.authorization.k8s.io 74 | resources: 75 | - roles 76 | - rolebindings 77 | - clusterroles 78 | - clusterrolebindings 79 | verbs: 80 | - get 81 | - list 82 | - create 83 | - bind 84 | - apiGroups: 85 | - argoproj.io 86 | resources: 87 | - applications 88 | verbs: 89 | - get 90 | - list 91 | - watch 92 | - create 93 | - update 94 | - patch 95 | - delete 96 | - deletecollection 97 | - apiGroups: 98 | - tekton.dev 99 | resources: 100 | - pipelineruns 101 | - pipelines 102 | - tasks 103 | verbs: 104 | - "*" 105 | - apiGroups: 106 | - pipelinesascode.tekton.dev 107 | resources: 108 | - repositories 109 | verbs: 110 | - "*" 111 | - nonResourceURLs: 112 | - "*" 113 | verbs: 114 | - get 115 | - list 116 | - watch 117 | - create 118 | - update 119 | - patch 120 | - delete 121 | - deletecollection 122 | - apiGroups: 123 | - "operators.coreos.com" 124 | resources: 125 | - subscriptions 126 | verbs: 127 | - get 128 | - list 129 | - watch 130 | -------------------------------------------------------------------------------- /operator/gitops/compute/pipeline-service-manager/rolebinding.yaml: -------------------------------------------------------------------------------- 1 | # This should be moved to the gitops folder and be synced by gitops 2 | # to guarantee that there's no configuration drift between clusters. 3 | --- 4 | apiVersion: rbac.authorization.k8s.io/v1 5 | kind: ClusterRoleBinding 6 | metadata: 7 | name: "pipeline-service-manager-binding" 8 | roleRef: 9 | apiGroup: rbac.authorization.k8s.io 10 | kind: ClusterRole 11 | name: pipeline-service-admin 12 | subjects: 13 | - kind: ServiceAccount 14 | name: pipeline-service-manager 15 | namespace: pipeline-service 16 | -------------------------------------------------------------------------------- /operator/gitops/sre/credentials/kubeconfig/.gitignore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openshift-pipelines/pipeline-service/5b34974a82703dfba477bbec73b0658d49ddc69b/operator/gitops/sre/credentials/kubeconfig/.gitignore -------------------------------------------------------------------------------- /operator/gitops/sre/credentials/kubeconfig/README.md: -------------------------------------------------------------------------------- 1 | # Clusters credentials 2 | 3 | This directory and subdirectories' purpose is to contain the kubeconfig files used for registering the clusters to Argo CD. 4 | 5 | --- 6 | **_NOTES:_** 7 | 8 | The information contained in kubeconfig files is confidential. Measures should be taken to protect it from being disclosed. This directory and sub-directories should not contain these files in a public repository. Don't forget to amend the `.gitignore` file if you want to add other files to a private fork of this repository. 9 | 10 | --- 11 | -------------------------------------------------------------------------------- /operator/gitops/sre/credentials/manifests/README.md: -------------------------------------------------------------------------------- 1 | # Confidential manifests 2 | 3 | This directory and subdirectories' purpose is to contain the manifests with confidential information required to setup the service. 4 | 5 | One such example is the `signing-secrets` secret required by tekton-chains during signing and which must be shared across all clusters. 6 | 7 | --- 8 | **_NOTES:_** 9 | 10 | The information contained in the manifests files is confidential. Measures should be taken to protect it from being disclosed. This directory and sub-directories should not contain these files in a public repository. Don't forget to amend the `.gitignore` file if you want to add other files to a private fork of this repository. 11 | 12 | --- 13 | -------------------------------------------------------------------------------- /operator/gitops/sre/credentials/secrets/README.md: -------------------------------------------------------------------------------- 1 | # Secrets from secret managers 2 | 3 | This directory contains files that store secrets from secret managers like Bitwarden, Vault, AWS Secret manager etc. 4 | An example of such a secret could be credentials (username and password) to connect to a remote database such as AWS RDS. 5 | 6 | The contents of the file under secrets directory should follow the below structure: 7 | ``` 8 | credentials: 9 | - id: 10 | path: 11 | - id: 12 | path: 13 | ``` 14 | 15 | ### Bitwarden Example 16 | 17 | Create a new file named `bitwarden.yaml` under secrets directory and provide a list of id & path values for each secret. 18 | ``` 19 | credentials: 20 | # tekton chains signing secrets 21 | - id: 1234abcd-abcd-1234-abcd-1234abcd1234 22 | path: credentials/manifests/compute/tekton-chains/signing-secrets.yaml 23 | # tekton results secrets 24 | - id: 1234abcd-abcd-1234-abcd-1234abcd1234 25 | path: credentials/manifests/compute/tekton-results/tekton-results-secret.yaml 26 | ``` 27 | 28 | - At the moment, only Bitwarden is supported. Please raise an issue/PR for the support of any other secret manager tools. 29 | - The secret stored in the secret manager tools must be the content of the file you're trying to protect in base64 encoded form. 30 | - We then parse the file, fetch the secret from Bitwarden based on the value of the ID and replace that secret at the value of path. 31 | 32 | Please check [sre examples](operator/docs/sre/examples) directory for more details. 33 | -------------------------------------------------------------------------------- /operator/gitops/sre/environment/compute/.gitignore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openshift-pipelines/pipeline-service/5b34974a82703dfba477bbec73b0658d49ddc69b/operator/gitops/sre/environment/compute/.gitignore -------------------------------------------------------------------------------- /operator/test/manifests/setup/pipeline-service/appstudio-pipeline-service-account.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: appstudio-pipeline 6 | namespace: plnsvc-tests 7 | --- 8 | apiVersion: rbac.authorization.k8s.io/v1 9 | kind: ClusterRole 10 | metadata: 11 | name: appstudio-pipelines-runner-clusterrole 12 | rules: 13 | - apiGroups: 14 | - tekton.dev 15 | resources: 16 | - pipelineruns 17 | verbs: 18 | - get 19 | - list 20 | - update 21 | - patch 22 | - apiGroups: 23 | - tekton.dev 24 | resources: 25 | - taskruns 26 | verbs: 27 | - get 28 | - patch 29 | - apiGroups: 30 | - tekton.dev 31 | resources: 32 | - taskruns/status 33 | verbs: 34 | - patch 35 | - apiGroups: 36 | - "" 37 | resources: 38 | - secrets 39 | verbs: 40 | - get 41 | - create 42 | - patch 43 | - delete 44 | --- 45 | apiVersion: rbac.authorization.k8s.io/v1 46 | kind: RoleBinding 47 | metadata: 48 | name: appstudio-pipelines-runner-rolebinding 49 | namespace: plnsvc-tests 50 | roleRef: 51 | apiGroup: rbac.authorization.k8s.io 52 | kind: ClusterRole 53 | name: appstudio-pipelines-runner-clusterrole 54 | subjects: 55 | - kind: ServiceAccount 56 | name: appstudio-pipeline 57 | namespace: plnsvc-tests 58 | -------------------------------------------------------------------------------- /operator/test/manifests/setup/pipeline-service/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - appstudio-pipeline-service-account.yaml 6 | - namespace.yaml 7 | -------------------------------------------------------------------------------- /operator/test/manifests/setup/pipeline-service/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: plnsvc-tests 6 | -------------------------------------------------------------------------------- /operator/test/manifests/test/metrics/curl-metrics-service-pipeline.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: PipelineRun 3 | apiVersion: tekton.dev/v1 4 | metadata: 5 | generateName: metrics-curl-test- 6 | spec: 7 | pipelineSpec: 8 | tasks: 9 | - name: metrics-curl 10 | taskSpec: 11 | steps: 12 | - image: registry.access.redhat.com/ubi8/ubi-minimal:8.7 13 | name: metrics-curl 14 | securityContext: 15 | runAsNonRoot: true 16 | script: | 17 | stats="$(curl http://pipeline-metrics-exporter-service.openshift-pipelines.svc.cluster.local:9117/metrics 2> /dev/null | grep pipelinerun_duration)" 18 | if [ -z "$stats" ]; then 19 | echo "FAILED: did not get pipelinerun_duration stats" 20 | echo $stats 21 | exit 1 22 | fi 23 | echo "SUCCESS: got pipelinerun_duration stats:" 24 | echo $stats 25 | taskRunTemplate: 26 | serviceAccountName: appstudio-pipeline 27 | -------------------------------------------------------------------------------- /operator/test/manifests/test/tekton-chains/chains-test-service-account.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: chains-test 6 | namespace: plnsvc-tests 7 | --- 8 | apiVersion: rbac.authorization.k8s.io/v1 9 | kind: RoleBinding 10 | metadata: 11 | name: chains-test-edit-rolebinding 12 | namespace: plnsvc-tests 13 | roleRef: 14 | apiGroup: rbac.authorization.k8s.io 15 | kind: ClusterRole 16 | name: edit 17 | subjects: 18 | - kind: ServiceAccount 19 | name: chains-test 20 | namespace: plnsvc-tests 21 | --- 22 | apiVersion: rbac.authorization.k8s.io/v1 23 | kind: RoleBinding 24 | metadata: 25 | name: chains-test-scc-rolebinding 26 | namespace: plnsvc-tests 27 | roleRef: 28 | apiGroup: rbac.authorization.k8s.io 29 | kind: ClusterRole 30 | name: pipelines-scc-clusterrole 31 | subjects: 32 | - kind: ServiceAccount 33 | name: chains-test 34 | namespace: plnsvc-tests 35 | -------------------------------------------------------------------------------- /operator/test/manifests/test/tekton-chains/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - chains-test-service-account.yaml 6 | - simple-copy-pipeline.yaml 7 | -------------------------------------------------------------------------------- /operator/test/manifests/test/tekton-chains/public-key.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: tekton.dev/v1 3 | kind: PipelineRun 4 | metadata: 5 | generateName: tekton-chains-key- 6 | spec: 7 | pipelineSpec: 8 | tasks: 9 | - name: tekton-chains-key 10 | taskSpec: 11 | description: >- 12 | Test tekton-chains public key access. 13 | steps: 14 | - name: cat 15 | image: quay.io/openshift/origin-cli:latest 16 | script: | 17 | set -o errexit 18 | set -o nounset 19 | set -o pipefail 20 | set -x 21 | PUBLIC_KEY=$(oc get secret public-key -n openshift-pipelines -o jsonpath='{.data.cosign\.pub}') 22 | if [[ -z "$PUBLIC_KEY" ]]; then 23 | echo "[ERROR] Public key is empty." 24 | exit 1 25 | fi 26 | echo "Public key exists." 27 | echo "$PUBLIC_KEY" | base64 -d 28 | taskRunTemplate: 29 | serviceAccountName: chains-test 30 | -------------------------------------------------------------------------------- /operator/test/manifests/test/tekton-chains/simple-copy-pipeline.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: tekton.dev/v1 3 | kind: Pipeline 4 | metadata: 5 | annotations: 6 | name: simple-copy 7 | namespace: plnsvc-tests 8 | spec: 9 | params: 10 | - description: Reference of the image the pipeline will copy from. 11 | name: image-src 12 | type: string 13 | - description: Reference of the image the pipeline will copy to. 14 | name: image-dst 15 | type: string 16 | results: 17 | - description: Reference of the image the pipeline will produce. 18 | name: IMAGE_URL 19 | value: $(tasks.image-copy.results.IMAGE_URL) 20 | - description: Digest of the image the pipeline will produce. 21 | name: IMAGE_DIGEST 22 | value: $(tasks.image-copy.results.IMAGE_DIGEST) 23 | tasks: 24 | - name: image-copy 25 | taskSpec: 26 | description: >- 27 | Task to copy a container image from a container repository to another. 28 | params: 29 | - name: IMAGE_SRC 30 | description: Reference of the image skopeo will pull. 31 | - name: IMAGE_DST 32 | description: Reference of the image skopeo will push. 33 | - name: SKOPEO_IMAGE 34 | description: The location of the skopeo image. 35 | default: quay.io/skopeo/stable:v1.13 36 | - name: srcTLSverify 37 | description: Verify the TLS on the src registry endpoint 38 | type: string 39 | default: "true" 40 | - name: destTLSverify 41 | description: Verify the TLS on the dest registry endpoint 42 | type: string 43 | default: "true" 44 | results: 45 | - name: IMAGE_DIGEST 46 | description: Digest of the image just built. 47 | - name: IMAGE_URL 48 | description: Reference of the image the pipeline will produce. 49 | steps: 50 | - name: copy 51 | env: 52 | - name: HOME 53 | value: /tekton/home 54 | image: $(params.SKOPEO_IMAGE) 55 | script: | 56 | set -o errexit 57 | set -o pipefail 58 | echo "Bypass image push temporarily" 59 | echo "foobar" > "$(results.IMAGE_URL.path)" 60 | echo "foobar" > "$(results.IMAGE_DIGEST.path)" 61 | # if [ "$(params.IMAGE_SRC)" != "" ] && [ "$(params.IMAGE_DST)" != "" ] ; then 62 | # skopeo copy \ 63 | # docker://"$(params.IMAGE_SRC)" docker://"$(params.IMAGE_DST)" \ 64 | # --digestfile /tmp/image-digest \ 65 | # --src-tls-verify="$(params.srcTLSverify)" \ 66 | # --dest-tls-verify="$(params.destTLSverify)" 67 | # echo "$(params.IMAGE_DST)" > "$(results.IMAGE_URL.path)" 68 | # cat "/tmp/image-digest" > "$(results.IMAGE_DIGEST.path)" 69 | # else 70 | # return 1 71 | # fi 72 | securityContext: 73 | runAsNonRoot: true 74 | params: 75 | - name: IMAGE_SRC 76 | value: $(params.image-src) 77 | - name: IMAGE_DST 78 | value: $(params.image-dst) 79 | -------------------------------------------------------------------------------- /operator/test/manifests/test/tekton-chains/tekton-chains-metrics.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: PipelineRun 3 | apiVersion: tekton.dev/v1 4 | metadata: 5 | generateName: tekton-chains-metrics- 6 | spec: 7 | pipelineSpec: 8 | tasks: 9 | - name: chains-metrics-curl 10 | taskSpec: 11 | steps: 12 | - image: registry.access.redhat.com/ubi8/ubi-minimal:8.7 13 | name: chains-metrics-curl 14 | securityContext: 15 | runAsNonRoot: true 16 | script: | 17 | stats="$(curl http://tekton-chains.openshift-pipelines.svc.cluster.local:9090/metrics 2> /dev/null | grep 'github.com.tektoncd.chains')" 18 | if [ -z "$stats" ]; then 19 | echo "FAILED: tekton chains metrics is not available/working" 20 | exit 1 21 | fi 22 | echo "SUCCESS: tekton chains metrics is available/working" 23 | taskRunTemplate: 24 | serviceAccountName: chains-test 25 | -------------------------------------------------------------------------------- /shared/config/dependencies.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # To prevent a version to be upgraded by 'update_binaries.sh', 3 | # add '# Freeze' after the export. 4 | # E.g.: 5 | # export FOOBAR_VERSION="1.2.3" # Freeze 6 | export ARGOCD_VERSION="v2.11.3" 7 | export BITWARDEN_VERSION="v2024.6.0" 8 | export CHECKOV_VERSION="3.2.140" 9 | export GO_VERSION="1.22.4" 10 | export HADOLINT_VERSION="v2.12.0" 11 | export JQ_VERSION="1.7.1" 12 | export KUBECTL_VERSION="v1.30.2" 13 | export OC_VERSION="4.15.17" 14 | export ROSA_VERSION="1.2.40" 15 | export SHELLCHECK_VERSION="v0.10.0" 16 | export TEKTONCD_CLI_VERSION="v0.37.0" 17 | export TERRAFORM_VERSION="1.8.5" 18 | export YAMLLINT_VERSION="1.35.1" 19 | export YQ_VERSION="v4.44.2" 20 | --------------------------------------------------------------------------------