├── api └── .gitkeep ├── deploy ├── crds │ └── .gitkeep ├── 03_role_binding.yaml ├── 04_operator.yaml └── 01_role.yaml ├── boilerplate ├── _data │ ├── backing-image-tag │ └── last-boilerplate-commit ├── openshift │ ├── golang-osd-operator │ │ ├── py-requirements.txt │ │ ├── .ci-operator.yaml │ │ ├── fips.go.tmplt │ │ ├── configure-fips.sh │ │ ├── dependabot.yml │ │ ├── .codecov.yml │ │ ├── rvmo-bundle.sh │ │ ├── migrate_build_pipeline.py │ │ ├── golangci.yml │ │ ├── Dockerfile.olm-registry │ │ ├── validate-yaml.py │ │ ├── csv-generate │ │ │ ├── common.sh │ │ │ ├── catalog-build.sh │ │ │ ├── csv-generate.mk │ │ │ └── catalog-publish.sh │ │ ├── project.mk │ │ ├── OWNERS_ALIASES │ │ ├── codecov.sh │ │ ├── app-sre-build-deploy.sh │ │ ├── ensure.sh │ │ ├── prow-config │ │ ├── app-sre.md │ │ ├── update │ │ └── README.md │ ├── golang-osd-e2e │ │ ├── OWNERS │ │ ├── project.mk │ │ ├── README.md │ │ ├── e2e-template.yml │ │ ├── standard.mk │ │ └── update │ └── golang-osd-operator-osde2e │ │ ├── project.mk │ │ ├── e2e-image-build-push.sh │ │ ├── test-harness-template.yml │ │ ├── update │ │ ├── standard.mk │ │ └── README.md ├── update.cfg ├── _lib │ ├── boilerplate.mk │ ├── subscriber │ ├── subscriber-propose │ ├── subscriber-report │ ├── subscriber-report-onboarding │ ├── subscriber-report-pr │ ├── container-make │ ├── freeze-check │ ├── subscriber-propose-update │ ├── subscriber-report-release │ ├── release.sh │ ├── subscriber.sh │ ├── boilerplate-commit │ └── common.sh └── generated-includes.mk ├── .tekton ├── OWNERS ├── e2e-master-push.yaml ├── configure-alertmanager-operator-master-push.yaml ├── e2e-master-pull-request.yaml └── configure-alertmanager-operator-master-pull-request.yaml ├── .ci-operator.yaml ├── .dockerignore ├── manifests ├── 02_service_account.yaml └── 05_prometheusrules.yaml ├── .github ├── renovate.json └── dependabot.yml ├── config ├── manifests │ ├── kustomization.yaml │ └── bases │ │ └── configure-alertmanager-operator.clusterserviceversion.yaml ├── metadata │ └── additional-labels.txt ├── templates │ └── csv-template.yaml └── config.go ├── hack ├── app_sre_build_deploy.sh ├── boilerplate.go.txt └── codecov.sh ├── PROJECT ├── fips.go ├── OWNERS ├── test └── e2e │ ├── Dockerfile │ ├── Dockerfile.konflux │ ├── README.md │ ├── configure_alertmanager_operator_runner_test.go │ ├── e2e-template.yml │ └── configure_alertmanager_operator_tests.go ├── .codecov.yml ├── .gitattributes ├── Makefile ├── .gitignore ├── OWNERS_ALIASES ├── pkg ├── metrics │ ├── service.go │ └── metrics.go ├── readiness │ ├── zz_generated_mocks.go │ └── cluster_ready.go └── types │ └── alertmanagerconfig.go ├── functions.mk ├── go.mod └── main.go /api/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /deploy/crds/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /boilerplate/_data/backing-image-tag: -------------------------------------------------------------------------------- 1 | image-v8.3.1 2 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/py-requirements.txt: -------------------------------------------------------------------------------- 1 | pyyaml>=5.3.1 2 | -------------------------------------------------------------------------------- /.tekton/OWNERS: -------------------------------------------------------------------------------- 1 | reviewers: 2 | - srep-infra-cicd 3 | approvers: 4 | - srep-infra-cicd 5 | -------------------------------------------------------------------------------- /boilerplate/_data/last-boilerplate-commit: -------------------------------------------------------------------------------- 1 | 85881913332596625626c78ade05b4ce7d04fcda 2 | -------------------------------------------------------------------------------- /boilerplate/update.cfg: -------------------------------------------------------------------------------- 1 | openshift/golang-osd-operator 2 | openshift/golang-osd-e2e 3 | -------------------------------------------------------------------------------- /.ci-operator.yaml: -------------------------------------------------------------------------------- 1 | build_root_image: 2 | name: boilerplate 3 | namespace: openshift 4 | tag: image-v8.3.1 5 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-e2e/OWNERS: -------------------------------------------------------------------------------- 1 | reviewers: 2 | - srep-infra-cicd 3 | approvers: 4 | - srep-infra-cicd 5 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/.ci-operator.yaml: -------------------------------------------------------------------------------- 1 | build_root_image: 2 | name: __NAME__ 3 | namespace: __NAMESPACE__ 4 | tag: __TAG__ 5 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | # More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file 2 | # Ignore build and test binaries. 3 | bin/ 4 | testbin/ 5 | -------------------------------------------------------------------------------- /manifests/02_service_account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: configure-alertmanager-operator 5 | namespace: openshift-monitoring 6 | -------------------------------------------------------------------------------- /.github/renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "extends": [ 4 | "github>openshift/boilerplate//.github/renovate.json" 5 | ] 6 | } 7 | -------------------------------------------------------------------------------- /config/manifests/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | resources: 4 | - bases/configure-alertmanager-operator.clusterserviceversion.yaml 5 | -------------------------------------------------------------------------------- /boilerplate/_lib/boilerplate.mk: -------------------------------------------------------------------------------- 1 | .PHONY: boilerplate-commit 2 | boilerplate-commit: 3 | @boilerplate/_lib/boilerplate-commit 4 | 5 | .PHONY: boilerplate-freeze-check 6 | boilerplate-freeze-check: 7 | @boilerplate/_lib/freeze-check 8 | -------------------------------------------------------------------------------- /hack/app_sre_build_deploy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | cd $(dirname $0)/.. 6 | 7 | if [[ -z $IMAGE_REPOSITORY ]]; then 8 | IMAGE_REPOSITORY=app-sre 9 | fi 10 | 11 | # Build the image 12 | 13 | make IMAGE_REPOSITORY=$IMAGE_REPOSITORY docker-build skopeo-push build-catalog-image 14 | -------------------------------------------------------------------------------- /PROJECT: -------------------------------------------------------------------------------- 1 | domain: managed.openshift.io 2 | layout: 3 | - go.kubebuilder.io/v3 4 | plugins: 5 | manifests.sdk.operatorframework.io/v2: {} 6 | scorecard.sdk.operatorframework.io/v2: {} 7 | projectName: configure-alertmanager-operator 8 | repo: github.com/openshift/configure-alertmanager-operator 9 | version: "3" 10 | -------------------------------------------------------------------------------- /fips.go: -------------------------------------------------------------------------------- 1 | //go:build fips_enabled 2 | // +build fips_enabled 3 | 4 | // BOILERPLATE GENERATED -- DO NOT EDIT 5 | // Run 'make ensure-fips' to regenerate 6 | 7 | package main 8 | 9 | import ( 10 | _ "crypto/tls/fipsonly" 11 | "fmt" 12 | ) 13 | 14 | func init() { 15 | fmt.Println("***** Starting with FIPS crypto enabled *****") 16 | } 17 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-e2e/project.mk: -------------------------------------------------------------------------------- 1 | # Project specific values 2 | OPERATOR_NAME?=$(shell sed -n 's/.*OperatorName .*"\([^"]*\)".*/\1/p' config/config.go) 3 | 4 | E2E_IMAGE_REGISTRY?=quay.io 5 | E2E_IMAGE_REPOSITORY?=app-sre 6 | E2E_IMAGE_NAME?=$(OPERATOR_NAME)-e2e 7 | 8 | 9 | REGISTRY_USER?=$(QUAY_USER) 10 | REGISTRY_TOKEN?=$(QUAY_TOKEN) 11 | -------------------------------------------------------------------------------- /boilerplate/_lib/subscriber: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | REPO_ROOT=$(git rev-parse --show-toplevel) 4 | source $REPO_ROOT/boilerplate/_lib/common.sh 5 | 6 | declare -A SUBCOMMANDS 7 | SUBCOMMANDS=( 8 | [propose]='Propose pull/merge requests for subscribers' 9 | [report]='Print information about subscribers' 10 | ) 11 | 12 | source $REPO_ROOT/boilerplate/_lib/subscriber.sh 13 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator-osde2e/project.mk: -------------------------------------------------------------------------------- 1 | # Project specific values 2 | OPERATOR_NAME?=$(shell sed -n 's/.*OperatorName .*"\([^"]*\)".*/\1/p' config/config.go) 3 | 4 | HARNESS_IMAGE_REGISTRY?=quay.io 5 | HARNESS_IMAGE_REPOSITORY?=app-sre 6 | HARNESS_IMAGE_NAME?=$(OPERATOR_NAME)-test-harness 7 | 8 | 9 | REGISTRY_USER?=$(QUAY_USER) 10 | REGISTRY_TOKEN?=$(QUAY_TOKEN) 11 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/fips.go.tmplt: -------------------------------------------------------------------------------- 1 | //go:build fips_enabled 2 | // +build fips_enabled 3 | 4 | // BOILERPLATE GENERATED -- DO NOT EDIT 5 | // Run 'make ensure-fips' to regenerate 6 | 7 | package main 8 | 9 | import ( 10 | _ "crypto/tls/fipsonly" 11 | "fmt" 12 | ) 13 | 14 | func init() { 15 | fmt.Println("***** Starting with FIPS crypto enabled *****") 16 | } 17 | -------------------------------------------------------------------------------- /config/metadata/additional-labels.txt: -------------------------------------------------------------------------------- 1 | LABEL com.redhat.component="openshift-configure-alertmanager-operator" io.k8s.description="..." description="..." distribution-scope="public" name="openshift/configure-alertmanager-operator" url="https://github.com/openshift/configure-alertmanager-operator" vendor="Red Hat, Inc." release="v0.0.0" version="v0.0.0" 2 | -------------------------------------------------------------------------------- /OWNERS: -------------------------------------------------------------------------------- 1 | reviewers: 2 | - srep-functional-leads 3 | - srep-team-leads 4 | - srep-functional-team-rocket 5 | - clcollins 6 | - robotmaxtron 7 | - typeid 8 | - xiaoyu74 9 | - zmird-r 10 | approvers: 11 | - srep-functional-leads 12 | - srep-team-leads 13 | - srep-functional-team-rocket 14 | - clcollins 15 | - robotmaxtron 16 | - typeid 17 | - xiaoyu74 18 | - zmird-r 19 | maintainers: 20 | - dustman9000 21 | - wanghaoran1988 22 | - clcollins 23 | -------------------------------------------------------------------------------- /test/e2e/Dockerfile: -------------------------------------------------------------------------------- 1 | # THIS FILE IS GENERATED BY BOILERPLATE. DO NOT EDIT. 2 | FROM brew.registry.redhat.io/rh-osbs/openshift-golang-builder:rhel_9_1.24 as builder 3 | WORKDIR /go/src/github.com/openshift/configure-alertmanager-operator/ 4 | COPY . . 5 | RUN CGO_ENABLED=0 GOFLAGS="-mod=mod" go test ./test/e2e -v -c --tags=osde2e -o /e2e.test 6 | 7 | FROM registry.access.redhat.com/ubi8/ubi-minimal:latest 8 | COPY --from=builder ./e2e.test e2e.test 9 | ENTRYPOINT [ "/e2e.test" ] 10 | -------------------------------------------------------------------------------- /boilerplate/_lib/subscriber-propose: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | REPO_ROOT=$(git rev-parse --show-toplevel) 4 | source $REPO_ROOT/boilerplate/_lib/common.sh 5 | 6 | declare -A SUBCOMMANDS 7 | SUBCOMMANDS=( 8 | # TODO: 9 | # [bootstrap]='Bootstrap a new subscriber' 10 | # [prow-config]='Propose standardized prow configuration to openshift/release' 11 | [update]='Update an already-onboarded subscriber' 12 | ) 13 | 14 | source $REPO_ROOT/boilerplate/_lib/subscriber.sh 15 | -------------------------------------------------------------------------------- /test/e2e/Dockerfile.konflux: -------------------------------------------------------------------------------- 1 | # THIS FILE IS GENERATED BY BOILERPLATE. DO NOT EDIT. 2 | FROM brew.registry.redhat.io/rh-osbs/openshift-golang-builder:rhel_8_1.22 as builder 3 | WORKDIR /go/src/github.com/openshift/configure-alertmanager-operator/ 4 | COPY . . 5 | RUN CGO_ENABLED=0 GOFLAGS="-mod=mod" go test ./test/e2e -v -c --tags=osde2e -o /harness.test 6 | 7 | FROM registry.access.redhat.com/ubi8/ubi-minimal:latest 8 | COPY --from=builder ./harness.test harness.test 9 | ENTRYPOINT [ "/harness.test" ] 10 | -------------------------------------------------------------------------------- /boilerplate/_lib/subscriber-report: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | REPO_ROOT=$(git rev-parse --show-toplevel) 4 | source $REPO_ROOT/boilerplate/_lib/common.sh 5 | 6 | declare -A SUBCOMMANDS 7 | SUBCOMMANDS=( 8 | [onboarding]='Prints a CSV report of onboarded boilerplate subscribers.' 9 | [pr]='Finds boilerplate-related pull requests for registered subscribers.' 10 | [release]='Checks openshift/release configuration for onboarded subscribers.' 11 | ) 12 | 13 | source $REPO_ROOT/boilerplate/_lib/subscriber.sh 14 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/configure-fips.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | REPO_ROOT=$(git rev-parse --show-toplevel) 6 | CONVENTION_DIR="$REPO_ROOT/boilerplate/openshift/golang-osd-operator" 7 | PRE_V1_SDK_MANAGER_DIR="$REPO_ROOT/cmd/manager" 8 | 9 | if [[ -d "$PRE_V1_SDK_MANAGER_DIR" ]] 10 | then 11 | MAIN_DIR=$PRE_V1_SDK_MANAGER_DIR 12 | else 13 | MAIN_DIR=$REPO_ROOT 14 | fi 15 | 16 | echo "Writing fips file at $MAIN_DIR/fips.go" 17 | 18 | cp $CONVENTION_DIR/fips.go.tmplt "$MAIN_DIR/fips.go" 19 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "docker" 4 | directory: "/build" 5 | labels: 6 | - "area/dependency" 7 | - "ok-to-test" 8 | schedule: 9 | interval: "weekly" 10 | ignore: 11 | - dependency-name: "redhat-services-prod/openshift/boilerplate" 12 | # don't upgrade boilerplate via these means 13 | - dependency-name: "openshift4/ose-operator-registry" 14 | # don't upgrade ose-operator-registry via these means 15 | -------------------------------------------------------------------------------- /.codecov.yml: -------------------------------------------------------------------------------- 1 | codecov: 2 | notify: 3 | require_ci_to_pass: no 4 | 5 | coverage: 6 | precision: 2 7 | round: down 8 | range: "20...100" 9 | 10 | status: 11 | project: no 12 | patch: no 13 | changes: no 14 | 15 | parsers: 16 | gcov: 17 | branch_detection: 18 | conditional: yes 19 | loop: yes 20 | method: no 21 | macro: no 22 | 23 | comment: 24 | layout: "reach,diff,flags,tree" 25 | behavior: default 26 | require_changes: no 27 | 28 | ignore: 29 | - "**/mocks" 30 | - "**/zz_generated*.go" 31 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # BEGIN boilerplate-managed 2 | version: 2 3 | updates: 4 | - package-ecosystem: "docker" 5 | directory: "/build" 6 | labels: 7 | - "area/dependency" 8 | - "ok-to-test" 9 | schedule: 10 | interval: "weekly" 11 | ignore: 12 | - dependency-name: "redhat-services-prod/openshift/boilerplate" 13 | # don't upgrade boilerplate via these means 14 | - dependency-name: "openshift4/ose-operator-registry" 15 | # don't upgrade ose-operator-registry via these means 16 | # END boilerplate-managed 17 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/.codecov.yml: -------------------------------------------------------------------------------- 1 | codecov: 2 | notify: 3 | require_ci_to_pass: no 4 | 5 | coverage: 6 | precision: 2 7 | round: down 8 | range: "20...100" 9 | 10 | status: 11 | project: no 12 | patch: no 13 | changes: no 14 | 15 | parsers: 16 | gcov: 17 | branch_detection: 18 | conditional: yes 19 | loop: yes 20 | method: no 21 | macro: no 22 | 23 | comment: 24 | layout: "reach,diff,flags,tree" 25 | behavior: default 26 | require_changes: no 27 | 28 | ignore: 29 | - "**/mocks" 30 | - "**/zz_generated*.go" 31 | -------------------------------------------------------------------------------- /boilerplate/generated-includes.mk: -------------------------------------------------------------------------------- 1 | # THIS FILE IS GENERATED BY BOILERPLATE. DO NOT EDIT. 2 | # This file automatically includes any *.mk files in your subscribed 3 | # conventions. Please ensure your base Makefile includes only this file. 4 | include boilerplate/_lib/boilerplate.mk 5 | include boilerplate/openshift/golang-osd-operator/csv-generate/csv-generate.mk 6 | include boilerplate/openshift/golang-osd-operator/project.mk 7 | include boilerplate/openshift/golang-osd-operator/standard.mk 8 | include boilerplate/openshift/golang-osd-e2e/project.mk 9 | include boilerplate/openshift/golang-osd-e2e/standard.mk 10 | -------------------------------------------------------------------------------- /hack/boilerplate.go.txt: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2022. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | ### BEGIN BOILERPLATE GENERATED -- DO NOT EDIT ### 2 | ### This block must be the last thing in your ### 3 | ### .gitattributes file; otherwise the 'validate' ### 4 | ### CI check will fail. ### 5 | # Used to ensure nobody mucked with boilerplate files. 6 | boilerplate/_lib/freeze-check linguist-generated=false 7 | # Show the boilerplate commit hash update. It's only one line anyway. 8 | boilerplate/_data/last-boilerplate-commit linguist-generated=false 9 | # Used by freeze-check. Good place for attackers to inject badness. 10 | boilerplate/update linguist-generated=false 11 | # Make sure attackers can't hide changes to this configuration 12 | .gitattributes linguist-generated=false 13 | ### END BOILERPLATE GENERATED ### 14 | -------------------------------------------------------------------------------- /test/e2e/README.md: -------------------------------------------------------------------------------- 1 | ## Locally running e2e test suite 2 | When updating your operator it's beneficial to add e2e tests for new functionality AND ensure existing functionality is not breaking using e2e tests. 3 | To do this, following steps are recommended 4 | 5 | 1. Run "make e2e-binary-build" to make sure e2e tests build 6 | 2. Deploy your new version of operator in a test cluster 7 | 3. Run "go install github.com/onsi/ginkgo/ginkgo@latest" 8 | 4. Get kubeadmin credentials from your cluster using 9 | 10 | ocm get /api/clusters_mgmt/v1/clusters/(cluster-id)/credentials | jq -r .kubeconfig > /(path-to)/kubeconfig 11 | 12 | 5. Run test suite using 13 | 14 | DISABLE_JUNIT_REPORT=true KUBECONFIG=/(path-to)/kubeconfig ./(path-to)/bin/ginkgo --tags=osde2e -v test/e2e 15 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/rvmo-bundle.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | REPOSITORY=${REPOSITORY:-"https://github.com/openshift/managed-release-bundle-osd.git"} 6 | CURRENT_BRANCH=$(git rev-parse --abbrev-ref HEAD|egrep '^main$|^release-[0-9]+\.[0-9]+$'|cat) 7 | RVMO_BRANCH=${CURRENT_BRANCH:-main} 8 | # You can override any branch detection by setting RELEASE_BRANCH 9 | BRANCH=${RELEASE_BRANCH:-$RVMO_BRANCH} 10 | DELETE_TEMP_DIR=${DELETE_TEMP_DIR:-true} 11 | TMPD=$(mktemp -d -t rvmo-bundle.XXXXXX) 12 | [[ "${DELETE_TEMP_DIR}" == "true" ]] && trap 'rm -rf ${TMPD}' EXIT 13 | 14 | cd "${TMPD}" 15 | echo "Cloning RVMO from ${REPOSITORY}:${BRANCH}" 16 | git clone --single-branch -b "${BRANCH}" "${REPOSITORY}" . 17 | bash hack/update-operator-release.sh 18 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/migrate_build_pipeline.py: -------------------------------------------------------------------------------- 1 | import yaml 2 | import sys 3 | 4 | file_path = sys.argv[1] 5 | with open(file_path, 'r') as f: 6 | data = yaml.safe_load(f) 7 | 8 | spec = data.get('spec', {}) 9 | 10 | # Remove pipelineSpec and taskRunSpecs 11 | spec.pop('pipelineSpec', None) 12 | spec.pop('taskRunSpecs', None) 13 | 14 | # Add pipelineRef 15 | spec['pipelineRef'] = { 16 | 'resolver': 'git', 17 | 'params': [ 18 | {'name': 'url', 'value': 'https://github.com/openshift/boilerplate'}, 19 | {'name': 'revision', 'value': 'master'}, 20 | {'name': 'pathInRepo', 'value': 'pipelines/docker-build-oci-ta/pipeline.yaml'} 21 | ] 22 | } 23 | 24 | # Write back 25 | with open(file_path, 'w') as f: 26 | yaml.dump(data, f, default_flow_style=False, sort_keys=False) 27 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/golangci.yml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | run: 3 | concurrency: 10 4 | linters: 5 | default: none 6 | enable: 7 | - errcheck 8 | - gosec 9 | - govet 10 | - ineffassign 11 | - misspell 12 | - staticcheck 13 | - unused 14 | settings: 15 | misspell: 16 | extra-words: 17 | - typo: openshit 18 | correction: OpenShift 19 | exclusions: 20 | generated: lax 21 | presets: 22 | - comments 23 | - common-false-positives 24 | - legacy 25 | - std-error-handling 26 | paths: 27 | - third_party/ 28 | - builtin/ 29 | - examples/ 30 | issues: 31 | max-issues-per-linter: 0 32 | max-same-issues: 0 33 | formatters: 34 | exclusions: 35 | generated: lax 36 | paths: 37 | - third_party/ 38 | - builtin/ 39 | - examples/ 40 | -------------------------------------------------------------------------------- /test/e2e/configure_alertmanager_operator_runner_test.go: -------------------------------------------------------------------------------- 1 | // THIS FILE IS GENERATED BY BOILERPLATE. DO NOT EDIT. 2 | //go:build osde2e 3 | 4 | package osde2etests 5 | 6 | import ( 7 | "os" 8 | "path/filepath" 9 | "testing" 10 | 11 | . "github.com/onsi/ginkgo/v2" 12 | . "github.com/onsi/gomega" 13 | ) 14 | 15 | const ( 16 | testResultsDirectory = "/test-run-results" 17 | jUnitOutputFilename = "junit-configure-alertmanager-operator.xml" 18 | ) 19 | 20 | // Test entrypoint. osde2e runs this as a test suite on test pod. 21 | func TestConfigureAlertmanagerOperator(t *testing.T) { 22 | RegisterFailHandler(Fail) 23 | suiteConfig, reporterConfig := GinkgoConfiguration() 24 | if _, ok := os.LookupEnv("DISABLE_JUNIT_REPORT"); !ok { 25 | reporterConfig.JUnitReport = filepath.Join(testResultsDirectory, jUnitOutputFilename) 26 | } 27 | RunSpecs(t, "Configure Alertmanager Operator", suiteConfig, reporterConfig) 28 | } 29 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/Dockerfile.olm-registry: -------------------------------------------------------------------------------- 1 | FROM registry.redhat.io/openshift4/ose-operator-registry-rhel9:v4.19 AS builder 2 | ARG SAAS_OPERATOR_DIR 3 | COPY ${SAAS_OPERATOR_DIR} manifests 4 | RUN initializer --permissive 5 | 6 | # ubi-micro does not work for clusters with fips enabled unless we make OpenSSL available 7 | FROM registry.access.redhat.com/ubi9/ubi-minimal:latest 8 | 9 | COPY --from=builder /bin/registry-server /bin/registry-server 10 | COPY --from=builder /bin/grpc_health_probe /bin/grpc_health_probe 11 | COPY --from=builder /bin/initializer /bin/initializer 12 | 13 | WORKDIR /registry 14 | RUN chgrp -R 0 /registry && chmod -R g+rwx /registry 15 | 16 | USER 1001 17 | 18 | COPY --from=builder /registry /registry 19 | 20 | EXPOSE 50051 21 | 22 | CMD ["registry-server", "-t", "/tmp/terminate.log"] 23 | 24 | # Set the DC specific label for the location of the DC database in the image 25 | LABEL operators.operatorframework.io.index.database.v1=/registry/bundles.db 26 | -------------------------------------------------------------------------------- /deploy/03_role_binding.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: RoleBinding 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: configure-alertmanager-operator 6 | namespace: openshift-monitoring 7 | subjects: 8 | - kind: ServiceAccount 9 | name: configure-alertmanager-operator 10 | roleRef: 11 | kind: Role 12 | name: configure-alertmanager-operator 13 | apiGroup: rbac.authorization.k8s.io 14 | --- 15 | apiVersion: authorization.openshift.io/v1 16 | kind: ClusterRoleBinding 17 | metadata: 18 | name: configure-alertmanager-operator-edit 19 | namespace: openshift-monitoring 20 | roleRef: 21 | kind: ClusterRole 22 | apiGroup: rbac.authorization.k8s.io 23 | name: configure-alertmanager-operator-edit 24 | subjects: 25 | - kind: ServiceAccount 26 | name: configure-alertmanager-operator 27 | namespace: openshift-monitoring 28 | --- 29 | apiVersion: authorization.openshift.io/v1 30 | kind: ClusterRoleBinding 31 | metadata: 32 | name: configure-alertmanager-operator-view 33 | roleRef: 34 | kind: ClusterRole 35 | apiGroup: rbac.authorization.k8s.io 36 | name: configure-alertmanager-operator-view 37 | subjects: 38 | - kind: ServiceAccount 39 | name: configure-alertmanager-operator 40 | namespace: openshift-monitoring 41 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/validate-yaml.py: -------------------------------------------------------------------------------- 1 | # Usage 2 | # python validate-yaml.py path/to/file/or/dir 3 | 4 | import sys 5 | import yaml 6 | from os import listdir 7 | from os.path import isdir, isfile, join, splitext 8 | 9 | usage = "Usage: {0:s} path/to/file/or/dir...".format(sys.argv[0]) 10 | 11 | if len(sys.argv) < 2: 12 | print(usage) 13 | sys.exit(0) 14 | 15 | input_paths = sys.argv[1:] 16 | 17 | error = False 18 | 19 | for path in input_paths: 20 | if isfile(path): 21 | files = [path] 22 | elif isdir(path): 23 | files = [join(path, f) for f in listdir(path) if isfile(join(path, f))] 24 | else: 25 | print("Path {0:s} does not exist".format(path)) 26 | error=True 27 | continue 28 | 29 | for file_path in files: 30 | _, ext = splitext(file_path) 31 | if ext not in [".yml", ".yaml"]: 32 | continue 33 | 34 | print("Validating YAML {}".format(file_path)) 35 | with open(file_path, "r") as f: 36 | data = f.read() 37 | try: 38 | for y in yaml.safe_load_all(data): 39 | pass 40 | except Exception as e: 41 | print(e) 42 | error = True 43 | 44 | sys.exit(error) 45 | -------------------------------------------------------------------------------- /boilerplate/_lib/subscriber-report-onboarding: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | REPO_ROOT=$(git rev-parse --show-toplevel) 4 | source $REPO_ROOT/boilerplate/_lib/common.sh 5 | 6 | usage() { 7 | cat < "${COVER_PROFILE}" 16 | rm -f "${COVER_PROFILE}.tmp" 17 | 18 | # Configure the git refs and job link based on how the job was triggered via prow 19 | if [[ "${JOB_TYPE}" == "presubmit" ]]; then 20 | echo "detected PR code coverage job for #${PULL_NUMBER}" 21 | REF_FLAGS="-P ${PULL_NUMBER} -C ${PULL_PULL_SHA}" 22 | JOB_LINK="${CI_SERVER_URL}/pr-logs/pull/${REPO_OWNER}_${REPO_NAME}/${PULL_NUMBER}/${JOB_NAME}/${BUILD_ID}" 23 | elif [[ "${JOB_TYPE}" == "postsubmit" ]]; then 24 | echo "detected branch code coverage job for ${PULL_BASE_REF}" 25 | REF_FLAGS="-B ${PULL_BASE_REF} -C ${PULL_BASE_SHA}" 26 | JOB_LINK="${CI_SERVER_URL}/logs/${JOB_NAME}/${BUILD_ID}" 27 | elif [[ "${JOB_TYPE}" == "local" ]]; then 28 | echo "coverage report available at ${COVER_PROFILE}" 29 | exit 0 30 | else 31 | echo "${JOB_TYPE} jobs not supported" 32 | exit 1 33 | fi 34 | 35 | # Configure certain internal codecov variables with values from prow. 36 | export CI_BUILD_URL="${JOB_LINK}" 37 | export CI_BUILD_ID="${JOB_NAME}" 38 | export CI_JOB_ID="${BUILD_ID}" 39 | 40 | bash <(curl -s https://codecov.io/bash) -Z -K -f "${COVER_PROFILE}" -r "${REPO_OWNER}/${REPO_NAME}" ${REF_FLAGS} 41 | -------------------------------------------------------------------------------- /manifests/05_prometheusrules.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: PrometheusRule 3 | metadata: 4 | name: sre-configure-alertmanager-operator 5 | namespace: openshift-monitoring 6 | spec: 7 | groups: 8 | - name: sre-configure-alertmanager-operator 9 | rules: 10 | - alert: ConfigureAlertmanagerOperatorMismatchDmsSRE 11 | annotations: 12 | message: "Mismatch between DMS secret and DMS AlertManager config" 13 | link_url: "https://access.redhat.com/articles/4165971" 14 | expr: dms_secret_exists + am_secret_contains_dms == 1 15 | for: 5m 16 | labels: 17 | severity: critical 18 | - alert: ConfigureAlertmanagerOperatorMismatchGaSRE 19 | annotations: 20 | message: "Mismatch between GA secret and GA AlertManager config" 21 | link_url: "https://access.redhat.com/articles/4165971" 22 | expr: ga_secret_exists + am_secret_contains_ga == 1 23 | for: 5m 24 | labels: 25 | severity: critical 26 | - alert: ConfigureAlertmanagerOperatorMismatchPdSRE 27 | annotations: 28 | message: "Mismatch between PD secret and PD AlertManager config" 29 | link_url: "https://access.redhat.com/articles/4165971" 30 | expr: pd_secret_exists + am_secret_contains_pd == 1 31 | for: 5m 32 | labels: 33 | severity: critical 34 | - alert: ConfigureAlertmanagerOperatorMissingAlermanagerConfigSRE 35 | annotations: 36 | message: "Alertmanager config secret does not exist" 37 | link_url: "https://access.redhat.com/articles/4165971" 38 | expr: am_secret_exists == 0 39 | for: 5m 40 | labels: 41 | severity: critical 42 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Temporary Build Files 2 | build/_output 3 | build/_test 4 | # Created by https://www.gitignore.io/api/go,vim,emacs,visualstudiocode 5 | ### Emacs ### 6 | # -*- mode: gitignore; -*- 7 | *~ 8 | \#*\# 9 | /.emacs.desktop 10 | /.emacs.desktop.lock 11 | *.elc 12 | auto-save-list 13 | tramp 14 | .\#* 15 | # Org-mode 16 | .org-id-locations 17 | *_archive 18 | # flymake-mode 19 | *_flymake.* 20 | # eshell files 21 | /eshell/history 22 | /eshell/lastdir 23 | # elpa packages 24 | /elpa/ 25 | # reftex files 26 | *.rel 27 | # AUCTeX auto folder 28 | /auto/ 29 | # cask packages 30 | .cask/ 31 | dist/ 32 | # Flycheck 33 | flycheck_*.el 34 | # server auth directory 35 | /server/ 36 | # projectiles files 37 | .projectile 38 | projectile-bookmarks.eld 39 | # directory configuration 40 | .dir-locals.el 41 | # saveplace 42 | places 43 | # url cache 44 | url/cache/ 45 | # cedet 46 | ede-projects.el 47 | # smex 48 | smex-items 49 | # company-statistics 50 | company-statistics-cache.el 51 | # anaconda-mode 52 | anaconda-mode/ 53 | ### Go ### 54 | # Binaries for programs and plugins 55 | *.exe 56 | *.exe~ 57 | *.dll 58 | *.so 59 | *.dylib 60 | # Test binary, build with 'go test -c' 61 | *.test 62 | # Output of the go coverage tool, specifically when used with LiteIDE 63 | *.out 64 | ### Vim ### 65 | # swap 66 | .sw[a-p] 67 | .*.sw[a-p] 68 | *.orig 69 | # session 70 | Session.vim 71 | # temporary 72 | .netrwhist 73 | # auto-generated tag files 74 | tags 75 | ### VisualStudioCode ### 76 | .vscode/* 77 | .history 78 | # End of https://www.gitignore.io/api/go,vim,emacs,visualstudiocode 79 | # Boilerplate: used for make op-generate 80 | .operator-sdk/ 81 | .venv/ 82 | # Generated by local build with boilerplate 83 | saas-configure-alertmanager-operator-bundle/ 84 | .docker 85 | -------------------------------------------------------------------------------- /deploy/04_operator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: configure-alertmanager-operator 5 | namespace: openshift-monitoring 6 | spec: 7 | replicas: 1 8 | selector: 9 | matchLabels: 10 | name: configure-alertmanager-operator 11 | template: 12 | metadata: 13 | labels: 14 | name: configure-alertmanager-operator 15 | annotations: 16 | openshift.io/required-scc: restricted-v2 17 | spec: 18 | serviceAccountName: configure-alertmanager-operator 19 | affinity: 20 | nodeAffinity: 21 | preferredDuringSchedulingIgnoredDuringExecution: 22 | - preference: 23 | matchExpressions: 24 | - key: node-role.kubernetes.io/infra 25 | operator: Exists 26 | weight: 1 27 | tolerations: 28 | - effect: NoSchedule 29 | key: node-role.kubernetes.io/infra 30 | operator: Exists 31 | containers: 32 | - name: configure-alertmanager-operator 33 | image: quay.io/redhat-services-prod/camo-hcm-tenant/configure-alertmanager-operator-master/configure-alertmanager-operator-master@sha256:06210e55ea90935f8e1ccea48b1b3db2694de6bac4564596d306dd94333933b9 34 | command: 35 | - configure-alertmanager-operator 36 | imagePullPolicy: Always 37 | env: 38 | - name: WATCH_NAMESPACE 39 | valueFrom: 40 | fieldRef: 41 | fieldPath: metadata.namespace 42 | - name: POD_NAME 43 | valueFrom: 44 | fieldRef: 45 | fieldPath: metadata.name 46 | - name: OPERATOR_NAME 47 | value: "configure-alertmanager-operator" 48 | - name: FEDRAMP 49 | value: "false" 50 | -------------------------------------------------------------------------------- /config/manifests/bases/configure-alertmanager-operator.clusterserviceversion.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: operators.coreos.com/v1alpha1 2 | kind: ClusterServiceVersion 3 | metadata: 4 | annotations: 5 | alm-examples: '[]' 6 | capabilities: Basic Install 7 | features.operators.openshift.io/cnf: "false" 8 | features.operators.openshift.io/cni: "false" 9 | features.operators.openshift.io/csi: "false" 10 | features.operators.openshift.io/disconnected: "false" 11 | features.operators.openshift.io/fips-compliant: "true" 12 | features.operators.openshift.io/proxy-aware: "false" 13 | features.operators.openshift.io/tls-profiles: "false" 14 | features.operators.openshift.io/token-auth-aws: "false" 15 | features.operators.openshift.io/token-auth-azure: "false" 16 | features.operators.openshift.io/token-auth-gcp: "false" 17 | operators.openshift.io/valid-subscription: '["OpenShift Container Platform"]' 18 | name: configure-alertmanager-operator.v0.0.0 19 | namespace: placeholder 20 | spec: 21 | apiservicedefinitions: {} 22 | customresourcedefinitions: {} 23 | description: Configure Alertmanager Operator description. TODO. 24 | displayName: Configure Alertmanager Operator 25 | icon: 26 | - base64data: "" 27 | mediatype: "" 28 | install: 29 | spec: 30 | deployments: null 31 | strategy: "" 32 | installModes: 33 | - supported: true 34 | type: OwnNamespace 35 | - supported: false 36 | type: SingleNamespace 37 | - supported: true 38 | type: MultiNamespace 39 | - supported: false 40 | type: AllNamespaces 41 | keywords: 42 | - configure-alertmanager-operator 43 | links: 44 | - name: Configure Alertmanager Operator 45 | url: https://configure-alertmanager-operator.domain 46 | maintainers: 47 | - email: your@email.com 48 | name: Maintainer Name 49 | maturity: alpha 50 | provider: 51 | name: Provider Name 52 | url: https://your.domain 53 | version: 0.0.0 54 | -------------------------------------------------------------------------------- /.tekton/e2e-master-push.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: tekton.dev/v1 2 | kind: PipelineRun 3 | metadata: 4 | annotations: 5 | build.appstudio.openshift.io/repo: https://github.com/openshift/configure-alertmanager-operator?rev={{revision}} 6 | build.appstudio.redhat.com/commit_sha: '{{revision}}' 7 | build.appstudio.redhat.com/target_branch: '{{target_branch}}' 8 | pipelinesascode.tekton.dev/max-keep-runs: '25' 9 | pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch 10 | == "master" 11 | creationTimestamp: null 12 | labels: 13 | appstudio.openshift.io/application: configure-alertmanager-operator-master 14 | appstudio.openshift.io/component: e2e-master 15 | pipelines.appstudio.openshift.io/type: build 16 | name: e2e-master-on-push 17 | namespace: camo-hcm-tenant 18 | spec: 19 | params: 20 | - name: git-url 21 | value: '{{source_url}}' 22 | - name: revision 23 | value: '{{revision}}' 24 | - name: output-image 25 | value: quay.io/redhat-user-workloads/camo-hcm-tenant/configure-alertmanager-operator-master/configure-alertmanager-operator-master/e2e:{{revision}} 26 | - name: dockerfile 27 | value: test/e2e/Dockerfile 28 | - name: path-context 29 | value: . 30 | taskRunTemplate: 31 | serviceAccountName: build-pipeline-e2e-master 32 | workspaces: 33 | - name: workspace 34 | volumeClaimTemplate: 35 | metadata: 36 | creationTimestamp: null 37 | spec: 38 | accessModes: 39 | - ReadWriteOnce 40 | resources: 41 | requests: 42 | storage: 1Gi 43 | status: {} 44 | - name: git-auth 45 | secret: 46 | secretName: '{{ git_auth_secret }}' 47 | pipelineRef: 48 | resolver: git 49 | params: 50 | - name: url 51 | value: https://github.com/openshift/boilerplate 52 | - name: revision 53 | value: master 54 | - name: pathInRepo 55 | value: pipelines/docker-build-oci-ta/pipeline.yaml 56 | status: {} 57 | -------------------------------------------------------------------------------- /.tekton/configure-alertmanager-operator-master-push.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: tekton.dev/v1 2 | kind: PipelineRun 3 | metadata: 4 | annotations: 5 | build.appstudio.openshift.io/repo: https://github.com/openshift/configure-alertmanager-operator?rev={{revision}} 6 | build.appstudio.redhat.com/commit_sha: '{{revision}}' 7 | build.appstudio.redhat.com/target_branch: '{{target_branch}}' 8 | pipelinesascode.tekton.dev/max-keep-runs: '25' 9 | pipelinesascode.tekton.dev/on-cel-expression: event == "push" && target_branch 10 | == "master" 11 | creationTimestamp: null 12 | labels: 13 | appstudio.openshift.io/application: configure-alertmanager-operator-master 14 | appstudio.openshift.io/component: configure-alertmanager-operator-master 15 | pipelines.appstudio.openshift.io/type: build 16 | name: configure-alertmanager-operator-master-on-push 17 | namespace: camo-hcm-tenant 18 | spec: 19 | params: 20 | - name: git-url 21 | value: '{{source_url}}' 22 | - name: revision 23 | value: '{{revision}}' 24 | - name: output-image 25 | value: quay.io/redhat-user-workloads/camo-hcm-tenant/configure-alertmanager-operator-master/configure-alertmanager-operator-master:{{revision}} 26 | - name: dockerfile 27 | value: build/Dockerfile 28 | - name: path-context 29 | value: . 30 | taskRunTemplate: 31 | serviceAccountName: build-pipeline-configure-alertmanager-operator-master 32 | workspaces: 33 | - name: workspace 34 | volumeClaimTemplate: 35 | metadata: 36 | creationTimestamp: null 37 | spec: 38 | accessModes: 39 | - ReadWriteOnce 40 | resources: 41 | requests: 42 | storage: 1Gi 43 | status: {} 44 | - name: git-auth 45 | secret: 46 | secretName: '{{ git_auth_secret }}' 47 | pipelineRef: 48 | resolver: git 49 | params: 50 | - name: url 51 | value: https://github.com/openshift/boilerplate 52 | - name: revision 53 | value: master 54 | - name: pathInRepo 55 | value: pipelines/docker-build-oci-ta/pipeline.yaml 56 | status: {} 57 | -------------------------------------------------------------------------------- /deploy/01_role.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Role 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: configure-alertmanager-operator 6 | namespace: openshift-monitoring 7 | rules: 8 | - apiGroups: 9 | - "" 10 | resources: 11 | - pods 12 | - services 13 | - endpoints 14 | - persistentvolumeclaims 15 | - events 16 | - configmaps 17 | - secrets 18 | verbs: 19 | - "*" 20 | - apiGroups: 21 | - "" 22 | resources: 23 | - namespaces 24 | verbs: 25 | - get 26 | - apiGroups: 27 | - apps 28 | resources: 29 | - deployments 30 | - daemonsets 31 | - replicasets 32 | - statefulsets 33 | verbs: 34 | - "*" 35 | - apiGroups: 36 | - monitoring.coreos.com 37 | resources: 38 | - servicemonitors 39 | verbs: 40 | - "get" 41 | - "create" 42 | - apiGroups: 43 | - apps 44 | resources: 45 | - deployments/finalizers 46 | resourceNames: 47 | - configure-alertmanager-operator 48 | verbs: 49 | - "update" 50 | --- 51 | apiVersion: rbac.authorization.k8s.io/v1 52 | kind: ClusterRole 53 | metadata: 54 | name: configure-alertmanager-operator-view 55 | rules: 56 | - apiGroups: 57 | - "" 58 | resources: 59 | - nodes 60 | verbs: 61 | - get 62 | - list 63 | - apiGroups: 64 | - config.openshift.io 65 | attributeRestrictions: null 66 | resources: 67 | - clusterversions 68 | verbs: 69 | - get 70 | - list 71 | - watch 72 | - apiGroups: 73 | - config.openshift.io 74 | resources: 75 | - proxies 76 | - infrastructures 77 | verbs: 78 | - get 79 | - list 80 | - watch 81 | --- 82 | apiVersion: rbac.authorization.k8s.io/v1 83 | kind: ClusterRole 84 | metadata: 85 | name: configure-alertmanager-operator-edit 86 | rules: 87 | - apiGroups: 88 | - "" 89 | attributeRestrictions: null 90 | resources: 91 | - secrets 92 | - configmaps 93 | verbs: 94 | - get 95 | - list 96 | - watch 97 | - patch 98 | - update 99 | - apiGroups: 100 | - batch 101 | resources: 102 | - jobs 103 | verbs: 104 | - get 105 | - list 106 | - watch 107 | -------------------------------------------------------------------------------- /.tekton/e2e-master-pull-request.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: tekton.dev/v1 2 | kind: PipelineRun 3 | metadata: 4 | annotations: 5 | build.appstudio.openshift.io/repo: https://github.com/openshift/configure-alertmanager-operator?rev={{revision}} 6 | build.appstudio.redhat.com/commit_sha: '{{revision}}' 7 | build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}' 8 | build.appstudio.redhat.com/target_branch: '{{target_branch}}' 9 | pipelinesascode.tekton.dev/max-keep-runs: '25' 10 | pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch 11 | == "master" 12 | creationTimestamp: null 13 | labels: 14 | appstudio.openshift.io/application: configure-alertmanager-operator-master 15 | appstudio.openshift.io/component: e2e-master 16 | pipelines.appstudio.openshift.io/type: build 17 | name: e2e-master-on-pull-request 18 | namespace: camo-hcm-tenant 19 | spec: 20 | params: 21 | - name: git-url 22 | value: '{{source_url}}' 23 | - name: revision 24 | value: '{{revision}}' 25 | - name: output-image 26 | value: quay.io/redhat-user-workloads/camo-hcm-tenant/configure-alertmanager-operator-master/configure-alertmanager-operator-master/e2e:on-pr-{{revision}} 27 | - name: image-expires-after 28 | value: 5d 29 | - name: dockerfile 30 | value: test/e2e/Dockerfile 31 | - name: path-context 32 | value: . 33 | taskRunTemplate: 34 | serviceAccountName: build-pipeline-e2e-master 35 | workspaces: 36 | - name: workspace 37 | volumeClaimTemplate: 38 | metadata: 39 | creationTimestamp: null 40 | spec: 41 | accessModes: 42 | - ReadWriteOnce 43 | resources: 44 | requests: 45 | storage: 1Gi 46 | status: {} 47 | - name: git-auth 48 | secret: 49 | secretName: '{{ git_auth_secret }}' 50 | pipelineRef: 51 | resolver: git 52 | params: 53 | - name: url 54 | value: https://github.com/openshift/boilerplate 55 | - name: revision 56 | value: master 57 | - name: pathInRepo 58 | value: pipelines/docker-build-oci-ta/pipeline.yaml 59 | status: {} 60 | -------------------------------------------------------------------------------- /.tekton/configure-alertmanager-operator-master-pull-request.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: tekton.dev/v1 2 | kind: PipelineRun 3 | metadata: 4 | annotations: 5 | build.appstudio.openshift.io/repo: https://github.com/openshift/configure-alertmanager-operator?rev={{revision}} 6 | build.appstudio.redhat.com/commit_sha: '{{revision}}' 7 | build.appstudio.redhat.com/pull_request_number: '{{pull_request_number}}' 8 | build.appstudio.redhat.com/target_branch: '{{target_branch}}' 9 | pipelinesascode.tekton.dev/max-keep-runs: '25' 10 | pipelinesascode.tekton.dev/on-cel-expression: event == "pull_request" && target_branch 11 | == "master" 12 | creationTimestamp: null 13 | labels: 14 | appstudio.openshift.io/application: configure-alertmanager-operator-master 15 | appstudio.openshift.io/component: configure-alertmanager-operator-master 16 | pipelines.appstudio.openshift.io/type: build 17 | name: configure-alertmanager-operator-master-on-pull-request 18 | namespace: camo-hcm-tenant 19 | spec: 20 | params: 21 | - name: git-url 22 | value: '{{source_url}}' 23 | - name: revision 24 | value: '{{revision}}' 25 | - name: output-image 26 | value: quay.io/redhat-user-workloads/camo-hcm-tenant/configure-alertmanager-operator-master/configure-alertmanager-operator-master:on-pr-{{revision}} 27 | - name: image-expires-after 28 | value: 5d 29 | - name: dockerfile 30 | value: build/Dockerfile 31 | - name: path-context 32 | value: . 33 | taskRunTemplate: 34 | serviceAccountName: build-pipeline-configure-alertmanager-operator-master 35 | workspaces: 36 | - name: workspace 37 | volumeClaimTemplate: 38 | metadata: 39 | creationTimestamp: null 40 | spec: 41 | accessModes: 42 | - ReadWriteOnce 43 | resources: 44 | requests: 45 | storage: 1Gi 46 | status: {} 47 | - name: git-auth 48 | secret: 49 | secretName: '{{ git_auth_secret }}' 50 | pipelineRef: 51 | resolver: git 52 | params: 53 | - name: url 54 | value: https://github.com/openshift/boilerplate 55 | - name: revision 56 | value: master 57 | - name: pathInRepo 58 | value: pipelines/docker-build-oci-ta/pipeline.yaml 59 | status: {} 60 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/project.mk: -------------------------------------------------------------------------------- 1 | # Project specific values 2 | OPERATOR_NAME?=$(shell sed -n 's/.*OperatorName .*"\([^"]*\)".*/\1/p' config/config.go) 3 | OPERATOR_NAMESPACE?=$(shell sed -n 's/.*OperatorNamespace .*"\([^"]*\)".*/\1/p' config/config.go) 4 | 5 | IMAGE_REGISTRY?=quay.io 6 | IMAGE_REPOSITORY?=app-sre 7 | IMAGE_NAME?=$(OPERATOR_NAME) 8 | 9 | # Optional additional deployment image 10 | SUPPLEMENTARY_IMAGE_NAME?=$(shell sed -n 's/.*SupplementaryImage .*"\([^"]*\)".*/\1/p' config/config.go) 11 | 12 | # Optional: Enable OLM skip-range 13 | # https://v0-18-z.olm.operatorframework.io/docs/concepts/olm-architecture/operator-catalog/creating-an-update-graph/#skiprange 14 | EnableOLMSkipRange?=$(shell sed -n 's/.*EnableOLMSkipRange .*"\([^"]*\)".*/\1/p' config/config.go) 15 | 16 | VERSION_MAJOR?=0 17 | VERSION_MINOR?=1 18 | 19 | ifdef RELEASE_BRANCHED_BUILDS 20 | # Make sure all called shell scripts know what's up 21 | export RELEASE_BRANCHED_BUILDS 22 | 23 | # RELEASE_BRANCH from env vars takes precedence; if not set, try to figure it out 24 | RELEASE_BRANCH:=${RELEASE_BRANCH} 25 | ifneq ($(RELEASE_BRANCH),) 26 | # Sanity check, just to be nice 27 | RELEASE_BRANCH_TEST := $(shell echo ${RELEASE_BRANCH} | grep -E '^release-[0-9]+\.[0-9]+$$') 28 | ifeq ($(RELEASE_BRANCH_TEST),) 29 | $(warning Provided RELEASE_BRANCH doesn't conform to "release-X.Y" pattern; you sure you didn't make a mistake?) 30 | endif 31 | endif 32 | 33 | ifeq ($(RELEASE_BRANCH),) 34 | # Check git repo's branch first 35 | RELEASE_BRANCH := $(shell git rev-parse --abbrev-ref HEAD | grep -E '^release-[0-9]+\.[0-9]+$$') 36 | endif 37 | 38 | ifeq ($(RELEASE_BRANCH),) 39 | # Try to parse it out of Jenkins' JOB_NAME 40 | RELEASE_BRANCH := $(shell echo ${JOB_NAME} | grep -E --only-matching 'release-[0-9]+\.[0-9]+') 41 | endif 42 | 43 | ifeq ($(RELEASE_BRANCH),) 44 | $(error RELEASE_BRANCHED_BUILDS is set, but couldn't detect a release branch and RELEASE_BRANCH is not set; giving up) 45 | else 46 | SEMVER := $(subst release-,,$(subst ., ,$(RELEASE_BRANCH))) 47 | VERSION_MAJOR := $(firstword $(SEMVER)) 48 | VERSION_MINOR := $(lastword $(SEMVER)) 49 | endif 50 | endif 51 | 52 | REGISTRY_USER?=$(QUAY_USER) 53 | REGISTRY_TOKEN?=$(QUAY_TOKEN) 54 | -------------------------------------------------------------------------------- /OWNERS_ALIASES: -------------------------------------------------------------------------------- 1 | # ================================ DO NOT EDIT ================================ 2 | # This file is managed in https://github.com/openshift/boilerplate 3 | # See the OWNERS_ALIASES docs: https://git.k8s.io/community/contributors/guide/owners.md#OWNERS_ALIASES 4 | # ============================================================================= 5 | aliases: 6 | srep-functional-team-aurora: 7 | - abyrne55 8 | - AlexSmithGH 9 | - dakotalongRH 10 | - eth1030 11 | - joshbranham 12 | - luis-falcon 13 | - reedcort 14 | srep-functional-team-fedramp: 15 | - theautoroboto 16 | - katherinelc321 17 | - rojasreinold 18 | - fsferraz-rh 19 | - jonahbrawley 20 | - digilink 21 | - annelson-rh 22 | - pheckenlWork 23 | - ironcladlou 24 | - MrSantamaria 25 | - PeterCSRE 26 | - cjnovak98 27 | srep-functional-team-hulk: 28 | - ravitri 29 | - devppratik 30 | - Tafhim 31 | - tkong-redhat 32 | - TheUndeadKing 33 | - vaidehi411 34 | - chamalabey 35 | - charlesgong 36 | - rbhilare 37 | srep-functional-team-orange: 38 | - bergmannf 39 | - Makdaam 40 | - Nikokolas3270 41 | - RaphaelBut 42 | - MateSaary 43 | - rolandmkunkel 44 | - petrkotas 45 | - zmird-r 46 | - hectorakemp 47 | srep-functional-team-rocket: 48 | - aliceh 49 | - anispate 50 | - clcollins 51 | - Mhodesty 52 | - nephomaniac 53 | - tnierman 54 | srep-functional-team-security: 55 | - jaybeeunix 56 | - sam-nguyen7 57 | - wshearn 58 | - dem4gus 59 | - npecka 60 | - pshickeydev 61 | - casey-williams-rh 62 | - boranx 63 | srep-functional-team-thor: 64 | - a7vicky 65 | - diakovnec 66 | - MitaliBhalla 67 | - feichashao 68 | - samanthajayasinghe 69 | - xiaoyu74 70 | - Tessg22 71 | - smarthall 72 | srep-infra-cicd: 73 | - ritmun 74 | - yiqinzhang 75 | - varunraokadaparthi 76 | srep-functional-leads: 77 | - abyrne55 78 | - clcollins 79 | - bergmannf 80 | - theautoroboto 81 | - smarthall 82 | - sam-nguyen7 83 | - ravitri 84 | srep-team-leads: 85 | - rafael-azevedo 86 | - iamkirkbater 87 | - rogbas 88 | - dustman9000 89 | - bng0y 90 | - bmeng 91 | - typeid 92 | sre-group-leads: 93 | - apahim 94 | - maorfr 95 | - rogbas 96 | srep-architects: 97 | - jharrington22 98 | - cblecker 99 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/OWNERS_ALIASES: -------------------------------------------------------------------------------- 1 | # ================================ DO NOT EDIT ================================ 2 | # This file is managed in https://github.com/openshift/boilerplate 3 | # See the OWNERS_ALIASES docs: https://git.k8s.io/community/contributors/guide/owners.md#OWNERS_ALIASES 4 | # ============================================================================= 5 | aliases: 6 | srep-functional-team-aurora: 7 | - abyrne55 8 | - AlexSmithGH 9 | - dakotalongRH 10 | - eth1030 11 | - joshbranham 12 | - luis-falcon 13 | - reedcort 14 | srep-functional-team-fedramp: 15 | - theautoroboto 16 | - katherinelc321 17 | - rojasreinold 18 | - fsferraz-rh 19 | - jonahbrawley 20 | - digilink 21 | - annelson-rh 22 | - pheckenlWork 23 | - ironcladlou 24 | - MrSantamaria 25 | - PeterCSRE 26 | - cjnovak98 27 | srep-functional-team-hulk: 28 | - ravitri 29 | - devppratik 30 | - Tafhim 31 | - tkong-redhat 32 | - TheUndeadKing 33 | - vaidehi411 34 | - chamalabey 35 | - charlesgong 36 | - rbhilare 37 | srep-functional-team-orange: 38 | - bergmannf 39 | - Makdaam 40 | - Nikokolas3270 41 | - RaphaelBut 42 | - MateSaary 43 | - rolandmkunkel 44 | - petrkotas 45 | - zmird-r 46 | - hectorakemp 47 | srep-functional-team-rocket: 48 | - aliceh 49 | - anispate 50 | - clcollins 51 | - Mhodesty 52 | - nephomaniac 53 | - tnierman 54 | srep-functional-team-security: 55 | - jaybeeunix 56 | - sam-nguyen7 57 | - wshearn 58 | - dem4gus 59 | - npecka 60 | - pshickeydev 61 | - casey-williams-rh 62 | - boranx 63 | srep-functional-team-thor: 64 | - a7vicky 65 | - diakovnec 66 | - MitaliBhalla 67 | - feichashao 68 | - samanthajayasinghe 69 | - xiaoyu74 70 | - Tessg22 71 | - smarthall 72 | srep-infra-cicd: 73 | - ritmun 74 | - yiqinzhang 75 | - varunraokadaparthi 76 | srep-functional-leads: 77 | - abyrne55 78 | - clcollins 79 | - bergmannf 80 | - theautoroboto 81 | - smarthall 82 | - sam-nguyen7 83 | - ravitri 84 | srep-team-leads: 85 | - rafael-azevedo 86 | - iamkirkbater 87 | - rogbas 88 | - dustman9000 89 | - bng0y 90 | - bmeng 91 | - typeid 92 | sre-group-leads: 93 | - apahim 94 | - maorfr 95 | - rogbas 96 | srep-architects: 97 | - jharrington22 98 | - cblecker 99 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator-osde2e/test-harness-template.yml: -------------------------------------------------------------------------------- 1 | # THIS FILE IS GENERATED BY BOILERPLATE. DO NOT EDIT. 2 | apiVersion: template.openshift.io/v1 3 | kind: Template 4 | metadata: 5 | name: osde2e-focused-tests 6 | parameters: 7 | - name: OSDE2E_CONFIGS 8 | required: true 9 | - name: TEST_HARNESS_IMAGE 10 | required: true 11 | - name: OCM_TOKEN 12 | required: true 13 | - name: OCM_CCS 14 | required: false 15 | - name: AWS_ACCESS_KEY_ID 16 | required: false 17 | - name: AWS_SECRET_ACCESS_KEY 18 | required: false 19 | - name: CLOUD_PROVIDER_REGION 20 | required: false 21 | - name: GCP_CREDS_JSON 22 | required: false 23 | - name: JOBID 24 | generate: expression 25 | from: "[0-9a-z]{7}" 26 | - name: IMAGE_TAG 27 | value: '' 28 | required: true 29 | - name: LOG_BUCKET 30 | value: 'osde2e-logs' 31 | objects: 32 | - apiVersion: batch/v1 33 | kind: Job 34 | metadata: 35 | name: osde2e-${OPERATOR_NAME}-${IMAGE_TAG}-${JOBID} 36 | spec: 37 | backoffLimit: 0 38 | template: 39 | spec: 40 | restartPolicy: Never 41 | containers: 42 | - name: osde2e 43 | image: quay.io/redhat-services-prod/osde2e-cicada-tenant/osde2e:latest 44 | command: 45 | - /osde2e 46 | args: 47 | - test 48 | - --only-health-check-nodes 49 | - --configs 50 | - ${OSDE2E_CONFIGS} 51 | securityContext: 52 | runAsNonRoot: true 53 | allowPrivilegeEscalation: false 54 | capabilities: 55 | drop: ["ALL"] 56 | seccompProfile: 57 | type: RuntimeDefault 58 | env: 59 | - name: TEST_HARNESSES 60 | value: ${TEST_HARNESS_IMAGE}:${IMAGE_TAG} 61 | - name: OCM_TOKEN 62 | value: ${OCM_TOKEN} 63 | - name: OCM_CCS 64 | value: ${OCM_CCS} 65 | - name: AWS_ACCESS_KEY_ID 66 | value: ${AWS_ACCESS_KEY_ID} 67 | - name: AWS_SECRET_ACCESS_KEY 68 | value: ${AWS_SECRET_ACCESS_KEY} 69 | - name: CLOUD_PROVIDER_REGION 70 | value: ${CLOUD_PROVIDER_REGION} 71 | - name: GCP_CREDS_JSON 72 | value: ${GCP_CREDS_JSON} 73 | - name: LOG_BUCKET 74 | value: ${LOG_BUCKET} 75 | -------------------------------------------------------------------------------- /boilerplate/_lib/container-make: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | if [[ "$1" == "-h"* ]] || [[ "$1" == "--h"* ]]; then 4 | echo "Usage: $0 {arguments to the real 'make'}" 5 | echo "Runs 'make' in the boilerplate backing container." 6 | echo "If the command fails, starts a shell in the container so you can debug." 7 | echo "Set NONINTERACTIVE=true (or TRUE) to skip the debug shell and exit with the make return code." 8 | exit -1 9 | fi 10 | 11 | source ${0%/*}/common.sh 12 | 13 | CONTAINER_ENGINE="${CONTAINER_ENGINE:-$(command -v podman || command -v docker)}" 14 | [[ -n "$CONTAINER_ENGINE" ]] || err "Couldn't find a container engine. Are you already in a container?" 15 | 16 | # Make sure the mount inside the container is named in such a way that 17 | # - openapi-gen (which relies on GOPATH) produces absolute paths; and 18 | # - other go-ish paths are writeable, e.g. for `go mod download`. 19 | CONTAINER_MOUNT=/go/src/$(repo_import $REPO_ROOT) 20 | 21 | # First set up a detached container with the repo mounted. 22 | banner "Starting the container" 23 | CE_OPTS="--platform=linux/amd64" 24 | if [[ "${CONTAINER_ENGINE##*/}" == "podman" ]]; then 25 | CE_OPTS="${CE_OPTS} --userns keep-id" 26 | fi 27 | if [[ "${CONTAINER_ENGINE##*/}" == "podman" ]] && [[ $OSTYPE == *"linux"* ]]; then 28 | CE_OPTS="${CE_OPTS} -v $REPO_ROOT:$CONTAINER_MOUNT:Z" 29 | else 30 | CE_OPTS="${CE_OPTS} -v $REPO_ROOT:$CONTAINER_MOUNT" 31 | fi 32 | container_id=$($CONTAINER_ENGINE run -d ${CE_OPTS} $IMAGE_PULL_PATH sleep infinity) 33 | 34 | if [[ $? -ne 0 ]] || [[ -z "$container_id" ]]; then 35 | err "Couldn't start detached container" 36 | fi 37 | 38 | # Now run our `make` command in it with the right UID and working directory 39 | args="exec -it -u $(id -u):0 -w $CONTAINER_MOUNT $container_id" 40 | banner "Running: make $@" 41 | $CONTAINER_ENGINE $args make "$@" 42 | rc=$? 43 | 44 | # If it failed, check if we should drop into a shell or exit 45 | if [[ $rc -ne 0 ]]; then 46 | # Case-insensitive check for NONINTERACTIVE (true, TRUE, True all work) 47 | if [[ "${NONINTERACTIVE,,}" == "true" ]]; then 48 | banner "The 'make' command failed with exit code $rc. Skipping debug shell (NONINTERACTIVE=${NONINTERACTIVE})." 49 | else 50 | banner "The 'make' command failed! Starting a shell in the container for debugging. Just 'exit' when done." 51 | $CONTAINER_ENGINE $args /bin/bash 52 | fi 53 | fi 54 | 55 | # Finally, remove the container 56 | banner "Cleaning up the container" 57 | $CONTAINER_ENGINE rm -f $container_id >/dev/null 58 | 59 | # Exit with the return code from make 60 | exit $rc 61 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/codecov.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -o errexit 4 | set -o nounset 5 | set -o pipefail 6 | 7 | REPO_ROOT=$(git rev-parse --show-toplevel) 8 | CI_SERVER_URL=https://prow.svc.ci.openshift.org/view/gcs/origin-ci-test 9 | COVER_PROFILE=${COVER_PROFILE:-coverage.out} 10 | JOB_TYPE=${JOB_TYPE:-"local"} 11 | 12 | # Default concurrency to four threads. By default it's the number of procs, 13 | # which seems to be 16 in the CI env. Some consumers' coverage jobs were 14 | # regularly getting OOM-killed; so do this rather than boost the pod resources 15 | # unreasonably. 16 | COV_THREAD_COUNT=${COV_THREAD_COUNT:-4} 17 | make -C "${REPO_ROOT}" go-test TESTOPTS="-coverprofile=${COVER_PROFILE}.tmp -covermode=atomic -coverpkg=./... -p ${COV_THREAD_COUNT}" 18 | 19 | # Remove generated files from coverage profile 20 | grep -v "zz_generated" "${COVER_PROFILE}.tmp" > "${COVER_PROFILE}" 21 | rm -f "${COVER_PROFILE}.tmp" 22 | 23 | # Configure the git refs and job link based on how the job was triggered via prow 24 | if [[ "${JOB_TYPE}" == "presubmit" ]]; then 25 | echo "detected PR code coverage job for #${PULL_NUMBER}" 26 | REF_FLAGS="-P ${PULL_NUMBER} -C ${PULL_PULL_SHA}" 27 | JOB_LINK="${CI_SERVER_URL}/pr-logs/pull/${REPO_OWNER}_${REPO_NAME}/${PULL_NUMBER}/${JOB_NAME}/${BUILD_ID}" 28 | elif [[ "${JOB_TYPE}" == "postsubmit" ]]; then 29 | echo "detected branch code coverage job for ${PULL_BASE_REF}" 30 | REF_FLAGS="-B ${PULL_BASE_REF} -C ${PULL_BASE_SHA}" 31 | JOB_LINK="${CI_SERVER_URL}/logs/${JOB_NAME}/${BUILD_ID}" 32 | elif [[ "${JOB_TYPE}" == "local" ]]; then 33 | echo "coverage report available at ${COVER_PROFILE}" 34 | exit 0 35 | else 36 | echo "${JOB_TYPE} jobs not supported" >&2 37 | exit 1 38 | fi 39 | 40 | # Configure certain internal codecov variables with values from prow. 41 | export CI_BUILD_URL="${JOB_LINK}" 42 | export CI_BUILD_ID="${JOB_NAME}" 43 | export CI_JOB_ID="${BUILD_ID}" 44 | 45 | if [[ "${JOB_TYPE}" != "local" ]]; then 46 | if [[ -z "${ARTIFACT_DIR:-}" ]] || [[ ! -d "${ARTIFACT_DIR}" ]] || [[ ! -w "${ARTIFACT_DIR}" ]]; then 47 | echo '${ARTIFACT_DIR} must be set for non-local jobs, and must point to a writable directory' >&2 48 | exit 1 49 | fi 50 | curl -sS https://codecov.io/bash -o "${ARTIFACT_DIR}/codecov.sh" 51 | bash <(cat "${ARTIFACT_DIR}/codecov.sh") -Z -K -f "${COVER_PROFILE}" -r "${REPO_OWNER}/${REPO_NAME}" ${REF_FLAGS} 52 | else 53 | bash <(curl -s https://codecov.io/bash) -Z -K -f "${COVER_PROFILE}" -r "${REPO_OWNER}/${REPO_NAME}" ${REF_FLAGS} 54 | fi 55 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/csv-generate/catalog-build.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | source `dirname $0`/common.sh 6 | 7 | usage() { echo "Usage: $0 -o operator-name -c saas-repository-channel -r registry-image" 1>&2; exit 1; } 8 | 9 | while getopts "o:c:r:" option; do 10 | case "${option}" in 11 | o) 12 | operator_name=${OPTARG} 13 | ;; 14 | c) 15 | operator_channel=${OPTARG} 16 | ;; 17 | r) 18 | # NOTE: This is the URL without the tag/digest 19 | registry_image=${OPTARG} 20 | ;; 21 | *) 22 | usage 23 | esac 24 | done 25 | 26 | # Detect the container engine to use, allowing override from the env 27 | CONTAINER_ENGINE=${CONTAINER_ENGINE:-$(command -v podman || command -v docker || true)} 28 | if [[ -z "$CONTAINER_ENGINE" ]]; then 29 | echo "WARNING: Couldn't find a container engine! Defaulting to docker." 30 | CONTAINER_ENGINE=docker 31 | fi 32 | 33 | # Checking parameters 34 | check_mandatory_params operator_channel operator_name 35 | 36 | # Parameters for the Dockerfile 37 | SAAS_OPERATOR_DIR="saas-${operator_name}-bundle" 38 | BUNDLE_DIR="${SAAS_OPERATOR_DIR}/${operator_name}" 39 | DOCKERFILE_REGISTRY="build/Dockerfile.olm-registry" 40 | 41 | # Checking SAAS_OPERATOR_DIR exist 42 | if [ ! -d "${SAAS_OPERATOR_DIR}/.git" ] ; then 43 | echo "${SAAS_OPERATOR_DIR} should exist and be a git repository" 44 | exit 1 45 | fi 46 | 47 | # Calculate new operator version from bundles inside the saas directory 48 | OPERATOR_NEW_VERSION=$(ls "${BUNDLE_DIR}" | sort -t . -k 3 -g | tail -n 1) 49 | 50 | # Create package yaml 51 | # This must be included in the registry build 52 | # `currentCSV` must reference the latest bundle version included. 53 | # Any version their after `currentCSV` loaded by the initalizer 54 | # will be silently pruned as it's not reachable 55 | PACKAGE_YAML_PATH="${BUNDLE_DIR}/${operator_name}.package.yaml" 56 | 57 | cat < "${PACKAGE_YAML_PATH}" 58 | packageName: ${operator_name} 59 | channels: 60 | - name: ${operator_channel} 61 | currentCSV: ${operator_name}.v${OPERATOR_NEW_VERSION} 62 | EOF 63 | 64 | TAG="${operator_channel}-latest" 65 | if [[ "${RELEASE_BRANCHED_BUILDS}" ]]; then 66 | TAG="v${OPERATOR_NEW_VERSION}" 67 | fi 68 | 69 | ${CONTAINER_ENGINE} build --pull -f "${DOCKERFILE_REGISTRY}" --build-arg "SAAS_OPERATOR_DIR=${SAAS_OPERATOR_DIR}" --tag "${registry_image}:${TAG}" . 70 | 71 | if [ $? -ne 0 ] ; then 72 | echo "docker build failed, exiting..." 73 | exit 1 74 | fi 75 | 76 | # TODO : Test the image and the version it contains 77 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/app-sre-build-deploy.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -ev 4 | 5 | usage() { 6 | cat < "$grpcurl" 70 | chmod +x "$grpcurl" 71 | fi 72 | 73 | ln -fs "$grpcurl" grpcurl 74 | ;; 75 | 76 | venv) 77 | # Set up a python virtual environment 78 | python3 -m venv .venv 79 | # Install required libs, if a requirements file was given 80 | if [[ -n "$2" ]]; then 81 | .venv/bin/python3 -m pip install -r "$2" 82 | fi 83 | ;; 84 | 85 | *) 86 | echo "Unknown dependency: ${DEPENDENCY}" 87 | exit 1 88 | ;; 89 | esac 90 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-e2e/README.md: -------------------------------------------------------------------------------- 1 | # Conventions for Ginkgo based e2e tests 2 | 3 | - [Conventions for Ginkgo based e2e tests](#conventions-for-ginkgo-based-e2e-tests) 4 | - [Consuming](#consuming) 5 | - [`make` targets and functions.](#make-targets-and-functions) 6 | - [E2E Test](#e2e-test) 7 | - [Local Testing](#e2e-local-testing) 8 | 9 | ## Consuming 10 | Currently, this convention is only intended for OSD operators. To adopt this convention, your `boilerplate/update.cfg` should include: 11 | 12 | ``` 13 | openshift/golang-osd-e2e 14 | ``` 15 | 16 | ## `make` targets and functions. 17 | 18 | **Note:** Your repository's main `Makefile` needs to be edited to include: 19 | 20 | ``` 21 | include boilerplate/generated-includes.mk 22 | ``` 23 | 24 | One of the primary purposes of these `make` targets is to allow you to 25 | standardize your prow and app-sre pipeline configurations using the 26 | following: 27 | 28 | ### E2e Test 29 | 30 | | `make` target | Purpose | 31 | |------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| 32 | | `e2e-binary-build` | Compiles ginkgo tests under test/e2e and creates the ginkgo binary. | 33 | | `e2e-image-build-push` | Builds e2e image and pushes to operator's quay repo. Image name is defaulted to -test-harness. Quay repository must be created beforehand. | 34 | 35 | #### E2E Local Testing 36 | 37 | Please follow [this README](https://github.com/openshift/ops-sop/blob/master/v4/howto/osde2e/operator-test-harnesses.md#using-ginkgo) to run your e2e tests locally 38 | 39 | -------------------------------------------------------------------------------- /test/e2e/e2e-template.yml: -------------------------------------------------------------------------------- 1 | # THIS FILE IS GENERATED BY BOILERPLATE. DO NOT EDIT. 2 | apiVersion: template.openshift.io/v1 3 | kind: Template 4 | metadata: 5 | name: osde2e-focused-tests 6 | parameters: 7 | - name: OSDE2E_CONFIGS 8 | required: true 9 | - name: TEST_IMAGE 10 | required: true 11 | - name: OCM_CLIENT_ID 12 | required: false 13 | - name: OCM_CLIENT_SECRET 14 | required: false 15 | - name: OCM_CCS 16 | required: false 17 | - name: AWS_ACCESS_KEY_ID 18 | required: false 19 | - name: AWS_SECRET_ACCESS_KEY 20 | required: false 21 | - name: CLOUD_PROVIDER_REGION 22 | required: false 23 | - name: GCP_CREDS_JSON 24 | required: false 25 | - name: JOBID 26 | generate: expression 27 | from: "[0-9a-z]{7}" 28 | - name: IMAGE_TAG 29 | value: '' 30 | required: true 31 | - name: LOG_BUCKET 32 | value: 'osde2e-logs' 33 | - name: USE_EXISTING_CLUSTER 34 | value: 'TRUE' 35 | - name: CAD_PAGERDUTY_ROUTING_KEY 36 | required: false 37 | objects: 38 | - apiVersion: batch/v1 39 | kind: Job 40 | metadata: 41 | name: osde2e-configure-alertmanager-operator-${IMAGE_TAG}-${JOBID} 42 | spec: 43 | backoffLimit: 0 44 | template: 45 | spec: 46 | restartPolicy: Never 47 | containers: 48 | - name: osde2e 49 | image: quay.io/redhat-services-prod/osde2e-cicada-tenant/osde2e:latest 50 | command: 51 | - /osde2e 52 | args: 53 | - test 54 | - --only-health-check-nodes 55 | - --skip-destroy-cluster 56 | - --skip-must-gather 57 | - --configs 58 | - ${OSDE2E_CONFIGS} 59 | resources: 60 | requests: 61 | cpu: "300m" 62 | memory: "600Mi" 63 | limits: 64 | cpu: "1" 65 | memory: "1200Mi" 66 | securityContext: 67 | runAsNonRoot: true 68 | allowPrivilegeEscalation: false 69 | capabilities: 70 | drop: ["ALL"] 71 | seccompProfile: 72 | type: RuntimeDefault 73 | env: 74 | - name: AD_HOC_TEST_IMAGES 75 | value: ${TEST_IMAGE}:${IMAGE_TAG} 76 | - name: OCM_CLIENT_ID 77 | value: ${OCM_CLIENT_ID} 78 | - name: OCM_CLIENT_SECRET 79 | value: ${OCM_CLIENT_SECRET} 80 | - name: OCM_CCS 81 | value: ${OCM_CCS} 82 | - name: AWS_ACCESS_KEY_ID 83 | value: ${AWS_ACCESS_KEY_ID} 84 | - name: AWS_SECRET_ACCESS_KEY 85 | value: ${AWS_SECRET_ACCESS_KEY} 86 | - name: CLOUD_PROVIDER_REGION 87 | value: ${CLOUD_PROVIDER_REGION} 88 | - name: GCP_CREDS_JSON 89 | value: ${GCP_CREDS_JSON} 90 | - name: LOG_BUCKET 91 | value: ${LOG_BUCKET} 92 | - name: USE_EXISTING_CLUSTER 93 | value: ${USE_EXISTING_CLUSTER} 94 | - name: CAD_PAGERDUTY_ROUTING_KEY 95 | value: ${CAD_PAGERDUTY_ROUTING_KEY} 96 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-e2e/e2e-template.yml: -------------------------------------------------------------------------------- 1 | # THIS FILE IS GENERATED BY BOILERPLATE. DO NOT EDIT. 2 | apiVersion: template.openshift.io/v1 3 | kind: Template 4 | metadata: 5 | name: osde2e-focused-tests 6 | parameters: 7 | - name: OSDE2E_CONFIGS 8 | required: true 9 | - name: TEST_IMAGE 10 | required: true 11 | - name: OCM_CLIENT_ID 12 | required: false 13 | - name: OCM_CLIENT_SECRET 14 | required: false 15 | - name: OCM_CCS 16 | required: false 17 | - name: AWS_ACCESS_KEY_ID 18 | required: false 19 | - name: AWS_SECRET_ACCESS_KEY 20 | required: false 21 | - name: CLOUD_PROVIDER_REGION 22 | required: false 23 | - name: GCP_CREDS_JSON 24 | required: false 25 | - name: JOBID 26 | generate: expression 27 | from: "[0-9a-z]{7}" 28 | - name: IMAGE_TAG 29 | value: '' 30 | required: true 31 | - name: LOG_BUCKET 32 | value: 'osde2e-logs' 33 | - name: USE_EXISTING_CLUSTER 34 | value: 'TRUE' 35 | - name: CAD_PAGERDUTY_ROUTING_KEY 36 | required: false 37 | objects: 38 | - apiVersion: batch/v1 39 | kind: Job 40 | metadata: 41 | name: osde2e-${OPERATOR_NAME}-${IMAGE_TAG}-${JOBID} 42 | spec: 43 | backoffLimit: 0 44 | template: 45 | spec: 46 | restartPolicy: Never 47 | containers: 48 | - name: osde2e 49 | image: quay.io/redhat-services-prod/osde2e-cicada-tenant/osde2e:latest 50 | command: 51 | - /osde2e 52 | args: 53 | - test 54 | - --only-health-check-nodes 55 | - --skip-destroy-cluster 56 | - --skip-must-gather 57 | - --configs 58 | - ${OSDE2E_CONFIGS} 59 | resources: 60 | requests: 61 | cpu: "300m" 62 | memory: "600Mi" 63 | limits: 64 | cpu: "1" 65 | memory: "1200Mi" 66 | securityContext: 67 | runAsNonRoot: true 68 | allowPrivilegeEscalation: false 69 | capabilities: 70 | drop: ["ALL"] 71 | seccompProfile: 72 | type: RuntimeDefault 73 | env: 74 | - name: AD_HOC_TEST_IMAGES 75 | value: ${TEST_IMAGE}:${IMAGE_TAG} 76 | - name: OCM_CLIENT_ID 77 | value: ${OCM_CLIENT_ID} 78 | - name: OCM_CLIENT_SECRET 79 | value: ${OCM_CLIENT_SECRET} 80 | - name: OCM_CCS 81 | value: ${OCM_CCS} 82 | - name: AWS_ACCESS_KEY_ID 83 | value: ${AWS_ACCESS_KEY_ID} 84 | - name: AWS_SECRET_ACCESS_KEY 85 | value: ${AWS_SECRET_ACCESS_KEY} 86 | - name: CLOUD_PROVIDER_REGION 87 | value: ${CLOUD_PROVIDER_REGION} 88 | - name: GCP_CREDS_JSON 89 | value: ${GCP_CREDS_JSON} 90 | - name: LOG_BUCKET 91 | value: ${LOG_BUCKET} 92 | - name: USE_EXISTING_CLUSTER 93 | value: ${USE_EXISTING_CLUSTER} 94 | - name: CAD_PAGERDUTY_ROUTING_KEY 95 | value: ${CAD_PAGERDUTY_ROUTING_KEY} 96 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator-osde2e/update: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | source $CONVENTION_ROOT/_lib/common.sh 6 | 7 | # No PRE 8 | [[ "$1" == "PRE" ]] && exit 0 9 | 10 | # Expect POST 11 | [[ "$1" == "POST" ]] || err "Got a parameter I don't understand: '$1'. Did the infrastructure change?" 12 | 13 | REPO_ROOT=$(git rev-parse --show-toplevel) 14 | OPERATOR_NAME=$(sed -n 's/.*OperatorName .*=.*"\([^"]*\)".*/\1/p' "${REPO_ROOT}/config/config.go") 15 | E2E_SUITE_DIRECTORY=$REPO_ROOT/test/e2e 16 | 17 | # Update operator name in templates 18 | OPERATOR_UNDERSCORE_NAME=${OPERATOR_NAME//-/_} 19 | OPERATOR_PROPER_NAME=$(echo "$OPERATOR_NAME" | sed 's/-/ /g' | awk '{for(i=1;i<=NF;i++){ $i=toupper(substr($i,1,1)) substr($i,2) }}1') 20 | OPERATOR_NAME_CAMEL_CASE=${OPERATOR_PROPER_NAME// /} 21 | 22 | mkdir -p "${E2E_SUITE_DIRECTORY}" 23 | 24 | E2E_SUITE_BUILDER_IMAGE=registry.ci.openshift.org/openshift/release:rhel-8-release-golang-1.23-openshift-4.19 25 | if [[ -n ${KONFLUX_BUILDS} ]]; then 26 | E2E_SUITE_BUILDER_IMAGE="brew.registry.redhat.io/rh-osbs/openshift-golang-builder:rhel_8_1.23" 27 | fi 28 | 29 | echo "syncing ${E2E_SUITE_DIRECTORY}/Dockerfile" 30 | tee "${E2E_SUITE_DIRECTORY}/Dockerfile" <"${E2E_SUITE_DIRECTORY}/test-harness-template.yml" 91 | -------------------------------------------------------------------------------- /boilerplate/_lib/freeze-check: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # NOTE: For security reasons, everything imported or invoked (even 4 | # indirectly) by this script should be audited for vulnerabilities and 5 | # explicitly excluded from `linguist-generated` in the consuming 6 | # repository's .gitattributes. In other words, we want PRs to show 7 | # deltas to this script and all its dependencies by default so that 8 | # attempts to inject or circumvent code are visible. 9 | 10 | set -e 11 | 12 | REPO_ROOT=$(git rev-parse --show-toplevel) 13 | # Hardcoded rather than sourced to reduce attack surface. 14 | BOILERPLATE_GIT_REPO=https://github.com/openshift/boilerplate.git 15 | 16 | # Validate that no subscribed boilerplate artifacts have been changed. 17 | # PR checks may wish to gate on this. 18 | 19 | # This works by grabbing the commit hash of the boilerplate repository 20 | # at which the last update was applied, running the main `update` driver 21 | # against that, and failing if there's a resulting diff. 22 | 23 | # If we can't tell what that commit was, we must assume this is the 24 | # first update, and we'll (noisily) "succeed". 25 | 26 | # Note that this ought to work when you've just committed an update, 27 | # even if you've changed your update.cfg beforehand. We're basically 28 | # making sure you didn't muck with anything after updating. 29 | 30 | # For this to work, you have to be starting from a clean repository 31 | # state (any changes committed). 32 | # TODO(efried): This is not ideal -- it would be nice if I could check 33 | # this before committing my changes -- but how would that work? Diff to 34 | # a file, create a temporary commit, run the rest, remove the commit, 35 | # and reapply the diff? Messy and error-prone -- and I would be 36 | # seriously ticked off if something went wrong and lost my in-flight 37 | # changes. 38 | if ! [ -z "$(git status --porcelain -- ':!build/Dockerfile*')" ]; then 39 | echo "Can't validate boilerplate in a dirty repository. Please commit your changes and try again." >&2 40 | exit 1 41 | fi 42 | 43 | # We glean the last boilerplate commit from the 44 | # last-boilerplate-commit file, which gets laid down by the main 45 | # `update` driver each time it runs. 46 | LBCF=${REPO_ROOT}/boilerplate/_data/last-boilerplate-commit 47 | if ! [[ -f "$LBCF" ]]; then 48 | echo "Couldn't discover last boilerplate commit! Assuming you're bootstrapping." 49 | exit 0 50 | fi 51 | LBC=$(cat $LBCF) 52 | 53 | # Download just that commit 54 | echo "Fetching $LBC from $BOILERPLATE_GIT_REPO" 55 | # boilerplate/update cleans up this temp dir 56 | TMPD=$(mktemp -d) 57 | cd $TMPD 58 | git init 59 | # TODO(efried): DRY this remote. Make it configurable? 60 | git remote add origin $BOILERPLATE_GIT_REPO 61 | git fetch origin $(cat $LBCF) --tags 62 | git reset --hard FETCH_HEAD 63 | 64 | # Now invoke the update script, overriding the source repository we've 65 | # just downloaded at the appropriate commit. 66 | # We invoke the script explicitly rather than via the make target to 67 | # close a security hole whereby the latter is overridden. 68 | echo "Running update" 69 | cd $REPO_ROOT 70 | BOILERPLATE_GIT_REPO="${TMPD}" boilerplate/update 71 | 72 | # Okay, if anything has changed, that's bad. 73 | if [[ $(git status --porcelain -- ':!build/Dockerfile*' | wc -l) -ne 0 ]]; then 74 | echo "Your boilerplate is dirty!" >&2 75 | git status --porcelain -- ':!build/Dockerfile*' 76 | exit 1 77 | fi 78 | 79 | echo "Your boilerplate is clean!" 80 | exit 0 81 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-e2e/standard.mk: -------------------------------------------------------------------------------- 1 | # Validate variables in project.mk exist 2 | ifndef OPERATOR_NAME 3 | $(error OPERATOR_NAME is not set; only operators should consume this convention; check project.mk file) 4 | endif 5 | ifndef E2E_IMAGE_REGISTRY 6 | $(error E2E_IMAGE_REGISTRY is not set; check project.mk file) 7 | endif 8 | ifndef E2E_IMAGE_REPOSITORY 9 | $(error E2E_IMAGE_REPOSITORY is not set; check project.mk file) 10 | endif 11 | 12 | # Use current commit as e2e image tag 13 | CURRENT_COMMIT=$(shell git rev-parse --short=7 HEAD) 14 | E2E_IMAGE_TAG=$(CURRENT_COMMIT) 15 | 16 | ### Accommodate docker or podman 17 | # 18 | # The docker/podman creds cache needs to be in a location unique to this 19 | # invocation; otherwise it could collide across jenkins jobs. We'll use 20 | # a .docker folder relative to pwd (the repo root). 21 | CONTAINER_ENGINE_CONFIG_DIR = .docker 22 | JENKINS_DOCKER_CONFIG_FILE = /var/lib/jenkins/.docker/config.json 23 | export REGISTRY_AUTH_FILE = ${CONTAINER_ENGINE_CONFIG_DIR}/config.json 24 | 25 | # If this configuration file doesn't exist, podman will error out. So 26 | # we'll create it if it doesn't exist. 27 | ifeq (,$(wildcard $(REGISTRY_AUTH_FILE))) 28 | $(shell mkdir -p $(CONTAINER_ENGINE_CONFIG_DIR)) 29 | # Copy the node container auth file so that we get access to the registries the 30 | # parent node has access to 31 | $(shell if test -f $(JENKINS_DOCKER_CONFIG_FILE); then cp $(JENKINS_DOCKER_CONFIG_FILE) $(REGISTRY_AUTH_FILE); fi) 32 | endif 33 | 34 | # ==> Docker uses --config=PATH *before* (any) subcommand; so we'll glue 35 | # that to the CONTAINER_ENGINE variable itself. (NOTE: I tried half a 36 | # dozen other ways to do this. This was the least ugly one that actually 37 | # works.) 38 | ifndef CONTAINER_ENGINE 39 | CONTAINER_ENGINE=$(shell command -v podman 2>/dev/null || echo docker --config=$(CONTAINER_ENGINE_CONFIG_DIR)) 40 | endif 41 | 42 | REGISTRY_USER ?= 43 | REGISTRY_TOKEN ?= 44 | 45 | # TODO: Figure out how to discover this dynamically 46 | OSDE2E_CONVENTION_DIR := boilerplate/openshift/golang-osd-operator-osde2e 47 | 48 | # log into quay.io 49 | .PHONY: container-engine-login 50 | container-engine-login: 51 | @test "${REGISTRY_USER}" != "" && test "${REGISTRY_TOKEN}" != "" || (echo "REGISTRY_USER and REGISTRY_TOKEN must be defined" && exit 1) 52 | mkdir -p ${CONTAINER_ENGINE_CONFIG_DIR} 53 | @${CONTAINER_ENGINE} login -u="${REGISTRY_USER}" -p="${REGISTRY_TOKEN}" quay.io 54 | 55 | ###################### 56 | # Targets used by e2e test suite 57 | ###################### 58 | 59 | # create binary 60 | .PHONY: e2e-binary-build 61 | e2e-binary-build: GOFLAGS_MOD=-mod=mod 62 | e2e-binary-build: GOENV=GOOS=${GOOS} GOARCH=${GOARCH} CGO_ENABLED=0 GOFLAGS="${GOFLAGS_MOD}" 63 | e2e-binary-build: 64 | go mod tidy 65 | go test ./test/e2e -v -c --tags=osde2e -o e2e.test 66 | 67 | # push e2e image tagged as latest and as repo commit hash 68 | .PHONY: e2e-image-build-push 69 | e2e-image-build-push: container-engine-login 70 | ${CONTAINER_ENGINE} build --pull -f test/e2e/Dockerfile -t $(E2E_IMAGE_REGISTRY)/$(E2E_IMAGE_REPOSITORY)/$(E2E_IMAGE_NAME):$(E2E_IMAGE_TAG) . 71 | ${CONTAINER_ENGINE} tag $(E2E_IMAGE_REGISTRY)/$(E2E_IMAGE_REPOSITORY)/$(E2E_IMAGE_NAME):$(E2E_IMAGE_TAG) $(E2E_IMAGE_REGISTRY)/$(E2E_IMAGE_REPOSITORY)/$(E2E_IMAGE_NAME):latest 72 | ${CONTAINER_ENGINE} push $(E2E_IMAGE_REGISTRY)/$(E2E_IMAGE_REPOSITORY)/$(E2E_IMAGE_NAME):$(E2E_IMAGE_TAG) 73 | ${CONTAINER_ENGINE} push $(E2E_IMAGE_REGISTRY)/$(E2E_IMAGE_REPOSITORY)/$(E2E_IMAGE_NAME):latest 74 | -------------------------------------------------------------------------------- /boilerplate/_lib/subscriber-propose-update: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | REPO_ROOT=$(git rev-parse --show-toplevel) 4 | source $REPO_ROOT/boilerplate/_lib/common.sh 5 | 6 | usage() { 7 | cat < Docker uses --config=PATH *before* (any) subcommand; so we'll glue 34 | # that to the CONTAINER_ENGINE variable itself. (NOTE: I tried half a 35 | # dozen other ways to do this. This was the least ugly one that actually 36 | # works.) 37 | ifndef CONTAINER_ENGINE 38 | CONTAINER_ENGINE=$(shell command -v podman 2>/dev/null || echo docker --config=$(CONTAINER_ENGINE_CONFIG_DIR)) 39 | endif 40 | 41 | REGISTRY_USER ?= 42 | REGISTRY_TOKEN ?= 43 | 44 | # TODO: Figure out how to discover this dynamically 45 | OSDE2E_CONVENTION_DIR := boilerplate/openshift/golang-osd-operator-osde2e 46 | 47 | # TODO: figure out how to container-engine-login only once across multiple `make` calls 48 | .PHONY: container-build-push-one 49 | container-build-push-one: container-engine-login 50 | @(if [[ -z "${IMAGE_URI}" ]]; then echo "Must specify IMAGE_URI"; exit 1; fi) 51 | @(if [[ -z "${DOCKERFILE_PATH}" ]]; then echo "Must specify DOCKERFILE_PATH"; exit 1; fi) 52 | ${CONTAINER_ENGINE} build --pull -f $(DOCKERFILE_PATH) -t $(IMAGE_URI) . 53 | ${CONTAINER_ENGINE} push ${IMAGE_URI} 54 | 55 | # log into quay.io 56 | .PHONY: container-engine-login 57 | container-engine-login: 58 | @test "${REGISTRY_USER}" != "" && test "${REGISTRY_TOKEN}" != "" || (echo "REGISTRY_USER and REGISTRY_TOKEN must be defined" && exit 1) 59 | mkdir -p ${CONTAINER_ENGINE_CONFIG_DIR} 60 | @${CONTAINER_ENGINE} login -u="${REGISTRY_USER}" -p="${REGISTRY_TOKEN}" quay.io 61 | 62 | ###################### 63 | # Targets used by e2e test harness 64 | ###################### 65 | 66 | # create binary 67 | .PHONY: e2e-harness-build 68 | e2e-harness-build: GOFLAGS_MOD=-mod=mod 69 | e2e-harness-build: GOENV=GOOS=${GOOS} GOARCH=${GOARCH} CGO_ENABLED=0 GOFLAGS="${GOFLAGS_MOD}" 70 | e2e-harness-build: 71 | go mod tidy 72 | go test ./test/e2e -v -c --tags=osde2e -o harness.test 73 | 74 | # TODO: Push to a known image tag and commit id 75 | # push harness image 76 | .PHONY: e2e-image-build-push 77 | e2e-image-build-push: 78 | ${OSDE2E_CONVENTION_DIR}/e2e-image-build-push.sh "./test/e2e/Dockerfile $(IMAGE_REGISTRY)/$(HARNESS_IMAGE_REPOSITORY)/$(HARNESS_IMAGE_NAME):$(HARNESS_IMAGE_TAG)" 79 | ${OSDE2E_CONVENTION_DIR}/e2e-image-build-push.sh "./test/e2e/Dockerfile $(IMAGE_REGISTRY)/$(HARNESS_IMAGE_REPOSITORY)/$(HARNESS_IMAGE_NAME):latest" 80 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator-osde2e/README.md: -------------------------------------------------------------------------------- 1 | # Conventions for Ginkgo based e2e tests 2 | 3 | - [Conventions for Ginkgo based e2e tests](#conventions-for-ginkgo-based-e2e-tests) 4 | - [Consuming](#consuming) 5 | - [`make` targets and functions.](#make-targets-and-functions) 6 | - [E2E Test Harness](#e2e-test-harness) 7 | - [Local Testing](#e2e-harness-local-testing) 8 | 9 | ## Consuming 10 | Currently, this convention is only intended for OSD operators. To adopt this convention, your `boilerplate/update.cfg` should include: 11 | 12 | ``` 13 | openshift/golang-osd-operator-osde2e 14 | ``` 15 | 16 | ## `make` targets and functions. 17 | 18 | **Note:** Your repository's main `Makefile` needs to be edited to include the 19 | "nexus makefile include": 20 | 21 | ``` 22 | include boilerplate/generated-includes.mk 23 | ``` 24 | 25 | One of the primary purposes of these `make` targets is to allow you to 26 | standardize your prow and app-sre pipeline configurations using the 27 | following: 28 | 29 | ### E2e Test Harness 30 | 31 | | `make` target | Purpose | 32 | |--------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| 33 | | `e2e-harness-generate` | Generate scaffolding for an end to end test harness. The `test/e2e` directory is created where the tests and test runner reside. The harness has access to cloud client and harness passthrough secrets within the test job cluster. Add your operator related ginkgo e2e tests under the `test/e2e/_tests.go` file. See [this README](https://github.com/openshift/osde2e-example-test-harness/blob/main/README.md#locally-running-this-example) for more details on test harness. | 34 | | `e2e-harness-build`| Compiles ginkgo tests under test/e2e and creates the ginkgo binary. | 35 | | `e2e-image-build-push` | Builds e2e test harness image and pushes to operator's quay repo. Image name is defaulted to -test-harness. Quay repository must be created beforehand. | 36 | 37 | #### E2E Harness Local Testing 38 | 39 | Please follow [this README](https://github.com/openshift/osde2e-example-test-harness/blob/main/README.md#locally-running-this-example) to run your e2e tests locally 40 | 41 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/prow-config: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | REPO_ROOT=$(git rev-parse --show-toplevel) 6 | source $REPO_ROOT/boilerplate/_lib/common.sh 7 | source $REPO_ROOT/boilerplate/_lib/release.sh 8 | 9 | cmd=${0##*/} 10 | 11 | usage() { 12 | cat < $config_dir/$config 54 | build_root: 55 | from_repository: true 56 | images: 57 | - dockerfile_path: build/Dockerfile 58 | to: unused 59 | resources: 60 | '*': 61 | limits: 62 | memory: 4Gi 63 | requests: 64 | cpu: 100m 65 | memory: 200Mi 66 | tests: 67 | - as: e2e-binary-build-success 68 | commands: | 69 | make e2e-binary-build 70 | container: 71 | from: src 72 | run_if_changed: ^(test/e2e/\.*|go\.mod|go\.sum)$ 73 | - as: coverage 74 | commands: | 75 | export CODECOV_TOKEN=\$(cat /tmp/secret/CODECOV_TOKEN) 76 | make coverage 77 | container: 78 | from: src 79 | skip_if_only_changed: ^(?:\.tekton|\.github)|\.md$|^(?:\.gitignore|OWNERS|LICENSE)$ 80 | secret: 81 | mount_path: /tmp/secret 82 | name: ${CONSUMER_NAME}-codecov-token 83 | - as: publish-coverage 84 | commands: | 85 | export CODECOV_TOKEN=\$(cat /tmp/secret/CODECOV_TOKEN) 86 | make coverage 87 | container: 88 | from: src 89 | postsubmit: true 90 | secret: 91 | mount_path: /tmp/secret 92 | name: ${CONSUMER_NAME}-codecov-token 93 | - as: lint 94 | commands: make lint 95 | container: 96 | from: src 97 | skip_if_only_changed: ^(?:\.tekton|\.github)|\.md$|^(?:\.gitignore|OWNERS|LICENSE)$ 98 | - as: test 99 | commands: make test 100 | container: 101 | from: src 102 | skip_if_only_changed: ^(?:\.tekton|\.github)|\.md$|^(?:\.gitignore|OWNERS|LICENSE)$ 103 | - as: validate 104 | commands: make validate 105 | container: 106 | from: src 107 | skip_if_only_changed: ^(?:\.tekton|\.github)|\.md$|^(?:\.gitignore|OWNERS|LICENSE)$ 108 | zz_generated_metadata: 109 | branch: ${DEFAULT_BRANCH} 110 | org: ${CONSUMER_ORG} 111 | repo: ${CONSUMER_NAME} 112 | EOF 113 | 114 | make jobs 115 | 116 | release_done_msg $release_branch 117 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/csv-generate/csv-generate.mk: -------------------------------------------------------------------------------- 1 | .PHONY: staging-csv-build 2 | staging-csv-build: 3 | @${CONVENTION_DIR}/csv-generate/csv-generate.sh -o $(OPERATOR_NAME) -i $(OPERATOR_IMAGE) -V $(OPERATOR_VERSION) -c staging -H $(CURRENT_COMMIT) -n $(COMMIT_NUMBER) -s $(SUPPLEMENTARY_IMAGE) -e $(SKIP_RANGE_ENABLED) 4 | 5 | .PHONY: staging-catalog-build 6 | staging-catalog-build: 7 | @${CONVENTION_DIR}/csv-generate/catalog-build.sh -o $(OPERATOR_NAME) -c staging -r ${REGISTRY_IMAGE} 8 | 9 | .PHONY: staging-saas-bundle-push 10 | staging-saas-bundle-push: 11 | @${CONVENTION_DIR}/csv-generate/catalog-publish.sh -o $(OPERATOR_NAME) -c staging -H $(CURRENT_COMMIT) -n $(COMMIT_NUMBER) -r ${REGISTRY_IMAGE} 12 | 13 | .PHONY: staging-catalog-publish 14 | staging-catalog-publish: 15 | @${CONVENTION_DIR}/csv-generate/catalog-publish.sh -o $(OPERATOR_NAME) -c staging -H $(CURRENT_COMMIT) -n $(COMMIT_NUMBER) -p -r ${REGISTRY_IMAGE} 16 | 17 | .PHONY: staging-catalog-build-and-publish 18 | staging-catalog-build-and-publish: 19 | @$(MAKE) -s staging-csv-build --no-print-directory 20 | @$(MAKE) -s staging-catalog-build --no-print-directory 21 | @$(MAKE) -s staging-catalog-publish --no-print-directory 22 | 23 | .PHONY: production-hack-csv-build 24 | production-hack-csv-build: 25 | @${CONVENTION_DIR}/csv-generate/csv-generate.sh -o $(OPERATOR_NAME) -i $(OPERATOR_IMAGE) -V $(OPERATOR_VERSION) -c production -H $(CURRENT_COMMIT) -n $(COMMIT_NUMBER) -s $(SUPPLEMENTARY_IMAGE) -e $(SKIP_RANGE_ENABLED) -g hack 26 | 27 | .PHONY: production-csv-build 28 | production-csv-build: 29 | @${CONVENTION_DIR}/csv-generate/csv-generate.sh -o $(OPERATOR_NAME) -i $(OPERATOR_IMAGE) -V $(OPERATOR_VERSION) -c production -H $(CURRENT_COMMIT) -n $(COMMIT_NUMBER) -s $(SUPPLEMENTARY_IMAGE) -e $(SKIP_RANGE_ENABLED) 30 | 31 | .PHONY: production-catalog-build 32 | production-catalog-build: 33 | @${CONVENTION_DIR}/csv-generate/catalog-build.sh -o $(OPERATOR_NAME) -c production -r ${REGISTRY_IMAGE} 34 | 35 | .PHONY: production-saas-bundle-push 36 | production-saas-bundle-push: 37 | @${CONVENTION_DIR}/csv-generate/catalog-publish.sh -o $(OPERATOR_NAME) -c production -H $(CURRENT_COMMIT) -n $(COMMIT_NUMBER) -r ${REGISTRY_IMAGE} 38 | 39 | .PHONY: production-catalog-publish 40 | production-catalog-publish: 41 | @${CONVENTION_DIR}/csv-generate/catalog-publish.sh -o $(OPERATOR_NAME) -c production -H $(CURRENT_COMMIT) -n $(COMMIT_NUMBER) -p -r ${REGISTRY_IMAGE} 42 | 43 | .PHONY: production-catalog-build-and-publish 44 | production-catalog-build-and-publish: 45 | @$(MAKE) -s production-csv-build --no-print-directory 46 | @$(MAKE) -s production-catalog-build --no-print-directory 47 | @$(MAKE) -s production-catalog-publish --no-print-directory 48 | 49 | .PHONY: stable-csv-build 50 | stable-csv-build: 51 | @${CONVENTION_DIR}/csv-generate/csv-generate.sh -o $(OPERATOR_NAME) -i $(OPERATOR_IMAGE) -V $(OPERATOR_VERSION) -c stable -H $(CURRENT_COMMIT) -n $(COMMIT_NUMBER) -s $(SUPPLEMENTARY_IMAGE) -e $(SKIP_RANGE_ENABLED) 52 | 53 | .PHONY: stable-catalog-build 54 | stable-catalog-build: 55 | @${CONVENTION_DIR}/csv-generate/catalog-build.sh -o $(OPERATOR_NAME) -c stable -r ${REGISTRY_IMAGE} 56 | 57 | .PHONY: stable-saas-bundle-push 58 | stable-saas-bundle-push: 59 | @${CONVENTION_DIR}/csv-generate/catalog-publish.sh -o $(OPERATOR_NAME) -c stable -H $(CURRENT_COMMIT) -n $(COMMIT_NUMBER) -r ${REGISTRY_IMAGE} 60 | 61 | .PHONY: stable-catalog-publish 62 | stable-catalog-publish: 63 | @${CONVENTION_DIR}/csv-generate/catalog-publish.sh -o $(OPERATOR_NAME) -c stable -H $(CURRENT_COMMIT) -n $(COMMIT_NUMBER) -p -r ${REGISTRY_IMAGE} 64 | 65 | .PHONY: stable-catalog-build-and-publish 66 | stable-catalog-build-and-publish: 67 | @$(MAKE) -s stable-csv-build --no-print-directory 68 | @$(MAKE) -s stable-catalog-build --no-print-directory 69 | @$(MAKE) -s stable-catalog-publish --no-print-directory 70 | -------------------------------------------------------------------------------- /functions.mk: -------------------------------------------------------------------------------- 1 | # Arguments 2 | # 1 - Channel (the branch name in the 'operator bundle' repo) 3 | # 2 - Bundle github name (eg foo/bar) 4 | # 3 - Automator git push token (for "app" username) 5 | # 4 - Whether or not to remove any versions more recent than deployed hash (true or false) 6 | # 5 - saasherder config github repo name (eg bip/bop) 7 | # 6 - saasherder config path (absolute within repo, eg /name/hive.yaml) 8 | # 7 - relative path to bundle generator python script (eg ./build/generate-operator-bundle.py) 9 | # 8 - Catalog registry quay.io organization name (eg openshift-sre) 10 | # Uses these variables (from project.mk or standard.mk): 11 | # Operator image 12 | # Git hash 13 | # Commit count 14 | # Operator version 15 | define create_push_catalog_image 16 | set -e ;\ 17 | git clone --branch $(1) "https://app:$(3)@gitlab.cee.redhat.com/$(2).git" bundles-$(1) ;\ 18 | mkdir -p bundles-$(1)/$(OPERATOR_NAME) ;\ 19 | removed_versions="" ;\ 20 | if [[ "$$(echo $(4) | tr [:upper:] [:lower:])" == "true" ]]; then \ 21 | deployed_hash=$$(curl -s 'https://gitlab.cee.redhat.com/$(5)/raw/master/$(6)' | $(CONTAINER_ENGINE) run --rm -i quay.io/app-sre/yq:3.4.1 yq r - 'resourceTemplates[*].targets(namespace.$$ref==/services/osd-operators/namespaces/hivep01ue1/cluster-scope.yml).ref') ;\ 22 | echo "Current deployed production HASH: $$deployed_hash" ;\ 23 | if [[ ! "$${deployed_hash}" =~ [0-9a-f]{40} ]]; then \ 24 | echo "Error discovering current production deployed HASH" ;\ 25 | exit 1 ;\ 26 | fi ;\ 27 | delete=false ;\ 28 | for bundle_path in $$(find bundles-$(1) -mindepth 2 -maxdepth 2 -type d | grep -v .git | sort -V); do \ 29 | if [[ "$${delete}" == false ]]; then \ 30 | bundle=$$(echo $$bundle_path | cut -d / -f 3-) ;\ 31 | version_hash=$$(echo $$bundle | cut -d - -f 2) ;\ 32 | if [[ $(OPERATOR_VERSION) == "$${version_hash}"* ]]; then \ 33 | delete=true ;\ 34 | fi ;\ 35 | else \ 36 | \rm -rf "$${bundle_path}" ;\ 37 | removed_versions="$$bundle $$removed_versions" ;\ 38 | fi ;\ 39 | done ;\ 40 | fi ;\ 41 | previous_version=$$(find bundles-$(1) -mindepth 2 -maxdepth 2 -type d | grep -v .git | sort -V | tail -n 1| cut -d / -f 3-) ;\ 42 | if [[ -z $$previous_version ]]; then \ 43 | previous_version=__undefined__ ;\ 44 | else \ 45 | previous_version="$(OPERATOR_NAME).v$${previous_version}" ;\ 46 | fi ;\ 47 | python $(7) bundles-$(1)/$(OPERATOR_NAME) $(OPERATOR_NAME) $(OPERATOR_NAMESPACE) $(OPERATOR_VERSION) $(OPERATOR_IMAGE_URI) $(1) true $$previous_version ;\ 48 | new_version=$$(find bundles-$(1) -mindepth 2 -maxdepth 2 -type d | grep -v .git | sort -V | tail -n 1 | cut -d / -f 3-) ;\ 49 | if [[ $(OPERATOR_NAME).v$${new_version} == $$previous_version ]]; then \ 50 | echo "Already built this, so no need to continue" ;\ 51 | exit 0 ;\ 52 | fi ;\ 53 | sed -e "s/!CHANNEL!/$(1)/g" \ 54 | -e "s/!OPERATOR_NAME!/$(OPERATOR_NAME)/g" \ 55 | -e "s/!VERSION!/$${new_version}/g" \ 56 | build/templates/package.yaml.tmpl > bundles-$(1)/$(OPERATOR_NAME)/$(OPERATOR_NAME).package.yaml ;\ 57 | cd bundles-$(1) ;\ 58 | git add . ;\ 59 | git commit -m "add version $(COMMIT_NUMBER)-$(CURRENT_COMMIT)" -m "replaces: $$previous_version" -m "removed versions: $$removed_versions" ;\ 60 | git push origin $(1) ;\ 61 | cd .. ;\ 62 | $(CONTAINER_ENGINE) build \ 63 | -f build/Dockerfile.catalog_registry \ 64 | --build-arg=SRC_BUNDLES=$$(find bundles-$(1) -mindepth 1 -maxdepth 1 -type d | grep -v .git) \ 65 | -t quay.io/$(8)/$(OPERATOR_NAME)-registry:$(1)-latest \ 66 | . ;\ 67 | skopeo copy --dest-creds $$QUAY_USER:$$QUAY_TOKEN \ 68 | "docker-daemon:quay.io/$(8)/$(OPERATOR_NAME)-registry:$(1)-latest" \ 69 | "docker://quay.io/$(8)/$(OPERATOR_NAME)-registry:$(1)-latest" ;\ 70 | skopeo copy --dest-creds $$QUAY_USER:$$QUAY_TOKEN \ 71 | "docker-daemon:quay.io/$(8)/$(OPERATOR_NAME)-registry:$(1)-latest" \ 72 | "docker://quay.io/$(8)/$(OPERATOR_NAME)-registry:$(1)-$(CURRENT_COMMIT)" 73 | endef 74 | -------------------------------------------------------------------------------- /boilerplate/_lib/subscriber-report-release: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | REPO_ROOT=$(git rev-parse --show-toplevel) 4 | source $REPO_ROOT/boilerplate/_lib/common.sh 5 | source $REPO_ROOT/boilerplate/_lib/release.sh 6 | 7 | usage() { 8 | cat < $TMPD/$f 46 | echo $TMPD/$f 47 | return 48 | fi 49 | done 50 | } 51 | 52 | ## expected_prow_config ORG PROJ BRANCH 53 | # 54 | # Prints to stdout the expected prow configuration for the specified 55 | # ORG/PROJ. 56 | expected_prow_config() { 57 | local org=$1 58 | local consumer_name=$2 59 | local branch=$3 60 | # TODO: DRY this with what's in prow-config. 61 | # Do it by making it a template in the convention dir. 62 | cat < Generate Encrypted Password. 50 | # Even if you're not using quay, the pipeline expects these variables to 51 | # be named QUAY_* 52 | export QUAY_USER= 53 | export QUAY_TOKEN= 54 | 55 | # Tell the scripts where to find your fork of the SaaS bundle repository. 56 | # Except for the authentication part, this should correspond to what you see in the 57 | # https "clone" button in your fork. 58 | # Generate an access token via Settings => Access Tokens. Enable `write_repository`. 59 | # - {gitlab-user} is your username in gitlab 60 | # - {gitlab-token} is the authentication token you generated above 61 | # - {operator} is the name of the consumer repository, e.g. `deadmanssnitch-operator` 62 | export GIT_PATH=https://{gitlab-user}:{gitlab-token}@gitlab.cee.redhat.com/{gitlab-user}/saas-{operator}-bundle.git 63 | ``` 64 | 65 | ## Execute 66 | At this point you should be able to run 67 | ``` 68 | make build-push 69 | ``` 70 | 71 | This will create the following artifacts if it succeeds 72 | (`{hash}` is the 7-digit SHA of the current git commit in the repository under test): 73 | - Operator image in your personal operator repository, tagged `v{major}.{minor}.{commit-count}-{hash}` (e.g. `v0.1.228-e0b6129`) and `latest` 74 | - Two catalog images in your personal registry repository: 75 | - One image tagged `staging-{hash}` and `staging-latest` 76 | - The other tagged `production-{hash}` and `production-latest` 77 | - Two commits in your fork of the SaaS bundle repository: 78 | - One in the `staging` branch 79 | - The other in the `production` branch 80 | These are also present locally in a `saas-{operator-name}-bundle` subdirectory of your operator repository clone. 81 | You can inspect the artifacts therein to make sure e.g. the CSV was generated correctly. 82 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-e2e/update: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | source $CONVENTION_ROOT/_lib/common.sh 6 | 7 | # No PRE 8 | [[ "$1" == "PRE" ]] && exit 0 9 | 10 | # Expect POST 11 | [[ "$1" == "POST" ]] || err "Got a parameter I don't understand: '$1'. Did the infrastructure change?" 12 | 13 | REPO_ROOT=$(git rev-parse --show-toplevel) 14 | OPERATOR_NAME=$(sed -n 's/.*OperatorName .*=.*"\([^"]*\)".*/\1/p' "${REPO_ROOT}/config/config.go") 15 | E2E_SUITE_DIRECTORY=$REPO_ROOT/test/e2e 16 | 17 | # Update operator name in templates 18 | OPERATOR_UNDERSCORE_NAME=${OPERATOR_NAME//-/_} 19 | OPERATOR_PROPER_NAME=$(echo "$OPERATOR_NAME" | sed 's/-/ /g' | awk '{for(i=1;i<=NF;i++){ $i=toupper(substr($i,1,1)) substr($i,2) }}1') 20 | OPERATOR_NAME_CAMEL_CASE=${OPERATOR_PROPER_NAME// /} 21 | 22 | mkdir -p "${E2E_SUITE_DIRECTORY}" 23 | 24 | E2E_SUITE_BUILDER_IMAGE=registry.ci.openshift.org/openshift/release:rhel-9-release-golang-1.24-openshift-4.21 25 | if [[ -n ${KONFLUX_BUILDS} ]]; then 26 | E2E_SUITE_BUILDER_IMAGE="brew.registry.redhat.io/rh-osbs/openshift-golang-builder:rhel_9_1.24" 27 | fi 28 | 29 | echo "syncing ${E2E_SUITE_DIRECTORY}/Dockerfile" 30 | tee "${E2E_SUITE_DIRECTORY}/Dockerfile" < /(path-to)/kubeconfig 101 | 102 | 5. Run test suite using 103 | 104 | DISABLE_JUNIT_REPORT=true KUBECONFIG=/(path-to)/kubeconfig ./(path-to)/bin/ginkgo --tags=osde2e -v test/e2e 105 | EOF 106 | 107 | sed -e "s/\${OPERATOR_NAME}/${OPERATOR_NAME}/" $(dirname $0)/e2e-template.yml >"${E2E_SUITE_DIRECTORY}/e2e-template.yml" 108 | 109 | # todo: remove after file is renamed in ALL consumer repos 110 | if [ -f "${E2E_SUITE_DIRECTORY}/test-harness-template.yml" ]; then 111 | rm -f "${E2E_SUITE_DIRECTORY}/test-harness-template.yml" 112 | fi 113 | -------------------------------------------------------------------------------- /boilerplate/_lib/release.sh: -------------------------------------------------------------------------------- 1 | # Helpers and variables for dealing with openshift/release 2 | 3 | # NOTE: This library is sourced from user-run scripts. It should not be 4 | # sourced in CI, as it relies on git config that's not necessarily 5 | # present there. 6 | 7 | RELEASE_REPO=openshift/release 8 | 9 | ## Information about the boilerplate consumer 10 | # E.g. "openshift/my-wizbang-operator" 11 | CONSUMER=$(repo_name .) 12 | [[ -z "$CONSUMER" ]] && err " 13 | Failed to determine current repository name" 14 | # 15 | # E.g. "openshift" 16 | CONSUMER_ORG=${CONSUMER%/*} 17 | [[ -z "$CONSUMER_ORG" ]] && err " 18 | Failed to determine consumer org" 19 | # 20 | # E.g. "my-wizbang-operator" 21 | CONSUMER_NAME=${CONSUMER#*/} 22 | [[ -z "$CONSUMER_NAME" ]] && err " 23 | Failed to determine consumer name" 24 | # 25 | # E.g. "master" 26 | # This will produce something like refs/remotes/origin/master 27 | DEFAULT_BRANCH=$(git symbolic-ref refs/remotes/upstream/HEAD 2>/dev/null || git symbolic-ref refs/remotes/origin/HEAD 2>/dev/null || echo defaulting/to/master) 28 | # Strip off refs/remotes/{upstream|origin}/ 29 | DEFAULT_BRANCH=${DEFAULT_BRANCH##*/} 30 | [[ -z "$DEFAULT_BRANCH" ]] && err " 31 | Failed to determine default branch name" 32 | 33 | ## release_process_args "$@" 34 | # 35 | # This is for use by commands expecting one optional argument which is 36 | # the file system path to a clone of the $RELEASE_REPO. 37 | # 38 | # Will invoke `usage` -- which must be defined by the caller -- if 39 | # the wrong number of arguments are received, or if the single argument 40 | # is `help` or a flag. 41 | # 42 | # If exactly one argument is specified and it is valid, it is assigned 43 | # to the global RELEASE_CLONE variable. 44 | release_process_args() { 45 | if [[ $# -eq 1 ]]; then 46 | # Special cases for usage queries 47 | if [[ "$1" == '-'* ]] || [[ "$1" == help ]]; then 48 | usage 49 | fi 50 | 51 | [[ -d $1 ]] || err " 52 | $1: Not a directory." 53 | 54 | [[ $(repo_name $1) == "$RELEASE_REPO" ]] || err " 55 | $1 is not a clone of $RELEASE_REPO; or its 'origin' remote is not set properly." 56 | 57 | # Got a usable clone of openshift/release 58 | RELEASE_CLONE="$1" 59 | 60 | elif [[ $# -ne 0 ]]; then 61 | usage 62 | fi 63 | } 64 | 65 | ## release_validate_invocation 66 | # 67 | # Make sure we were called from a reasonable place, that being: 68 | # - A boilerplate consumer 69 | # - ...that's actually subscribed to a convention 70 | # - ...containing the script being invoked 71 | release_validate_invocation() { 72 | # Make sure we were invoked from a boilerplate consumer. 73 | [[ -z "$CONVENTION_NAME" ]] && err " 74 | $cmd must be invoked from a consumer of an appropriate convention. Where did you get this script from?" 75 | # Or at least not from boilerplate itself 76 | [[ "$CONSUMER" == "openshift/boilerplate" ]] && err " 77 | $cmd must be invoked from a boilerplate consumer, not from boilerplate itself." 78 | 79 | [[ -s $CONVENTION_ROOT/_data/last-boilerplate-commit ]] || err " 80 | $cmd must be invoked from a boilerplate consumer!" 81 | 82 | grep -E -q "^$CONVENTION_NAME(\s.*)?$" $CONVENTION_ROOT/update.cfg || err " 83 | $CONSUMER is not subscribed to $CONVENTION_NAME!" 84 | } 85 | 86 | ## release_prep_clone 87 | # 88 | # If $RELEASE_CLONE is already set: 89 | # - It should represent a directory containing a clean checkout of the 90 | # release repository; otherwise we error. 91 | # - We checkout and pull master. 92 | # Otherwise: 93 | # - We clone the release repo to a temporary directory. 94 | # - We set the $RELEASE_CLONE global variable to point to that 95 | # directory. 96 | release_prep_clone() { 97 | # If a release repo clone wasn't specified, create one 98 | if [[ -z "$RELEASE_CLONE" ]]; then 99 | RELEASE_CLONE=$(mktemp -dt openshift_release_XXXXXXX) 100 | git clone --depth=1 git@github.com:${RELEASE_REPO}.git $RELEASE_CLONE 101 | else 102 | [[ -z "$(git -C $RELEASE_CLONE status --porcelain)" ]] || err " 103 | Your release clone must start clean." 104 | # These will blow up if it's misconfigured 105 | git -C $RELEASE_CLONE checkout master 106 | git -C $RELEASE_CLONE pull 107 | fi 108 | } 109 | 110 | ## release_done_msg BRANCH 111 | # 112 | # Print exit instructions for submitting the release PR. 113 | # BRANCH is a suggested branch name. 114 | release_done_msg() { 115 | echo 116 | git status 117 | 118 | cat <&2; exit 1; } 8 | 9 | while getopts "o:c:n:H:pr:" option; do 10 | case "${option}" in 11 | c) 12 | operator_channel=${OPTARG} 13 | ;; 14 | H) 15 | operator_commit_hash=${OPTARG} 16 | ;; 17 | n) 18 | operator_commit_number=${OPTARG} 19 | ;; 20 | o) 21 | operator_name=${OPTARG} 22 | ;; 23 | p) 24 | push_catalog=true 25 | ;; 26 | r) 27 | # NOTE: This is the URL without the tag/digest 28 | registry_image=${OPTARG} 29 | ;; 30 | *) 31 | usage 32 | esac 33 | done 34 | 35 | # Checking parameters 36 | check_mandatory_params operator_channel operator_name operator_commit_hash operator_commit_number registry_image 37 | 38 | # Calculate previous version 39 | SAAS_OPERATOR_DIR="saas-${operator_name}-bundle" 40 | BUNDLE_DIR="${SAAS_OPERATOR_DIR}/${operator_name}" 41 | OPERATOR_NEW_VERSION=$(ls "${BUNDLE_DIR}" | sort -t . -k 3 -g | tail -n 1) 42 | OPERATOR_PREV_VERSION=$(ls "${BUNDLE_DIR}" | sort -t . -k 3 -g | tail -n 2 | head -n 1) 43 | 44 | if [[ "$OPERATOR_NEW_VERSION" == "$OPERATOR_PREV_VERSION" ]]; then 45 | echo "New version and previous version are identical. Exiting." 46 | exit 1 47 | fi 48 | 49 | # Get container engine 50 | CONTAINER_ENGINE=$(command -v podman || command -v docker || true) 51 | [[ -n "$CONTAINER_ENGINE" ]] || echo "WARNING: Couldn't find a container engine. Assuming you already in a container, running unit tests." >&2 52 | 53 | # Set SRC container transport based on container engine 54 | if [[ "${CONTAINER_ENGINE##*/}" == "podman" ]]; then 55 | SRC_CONTAINER_TRANSPORT="containers-storage" 56 | else 57 | SRC_CONTAINER_TRANSPORT="docker-daemon" 58 | fi 59 | 60 | # Checking SAAS_OPERATOR_DIR exist 61 | if [ ! -d "${SAAS_OPERATOR_DIR}/.git" ] ; then 62 | echo "${SAAS_OPERATOR_DIR} should exist and be a git repository" 63 | exit 1 64 | fi 65 | 66 | # Read the bundle version we're attempting to publish 67 | # in the OLM catalog from the package yaml 68 | PACKAGE_YAML_PATH="${BUNDLE_DIR}/${operator_name}.package.yaml" 69 | PACKAGE_YAML_VERSION=$(awk '$1 == "currentCSV:" {print $2}' ${PACKAGE_YAML_PATH}) 70 | 71 | # Ensure we're commiting and pushing the version we think we are pushing 72 | # Since we build the bundle in catalog-build.sh this script could be run 73 | # independently and push a version we're not expecting. 74 | # if ! [ "${operator_name}.v${OPERATOR_NEW_VERSION}" = "${PACKAGE_YAML_VERSION}" ]; then 75 | # echo "You are attemping to push a bundle that's pointing to a version of this catalog you are not building" 76 | # echo "You are building version: ${operator_name}.v${OPERATOR_NEW_VERSION}" 77 | # echo "Your local package yaml version is: ${PACKAGE_YAML_VERSION}" 78 | # exit 1 79 | # fi 80 | 81 | # add, commit & push 82 | pushd "${SAAS_OPERATOR_DIR}" 83 | 84 | git add . 85 | 86 | MESSAGE="add version ${operator_commit_number}-${operator_commit_hash} 87 | 88 | replaces ${OPERATOR_PREV_VERSION} 89 | removed versions: ${REMOVED_VERSIONS}" 90 | 91 | git commit -m "${MESSAGE}" 92 | git push origin HEAD 93 | 94 | if [ $? -ne 0 ] ; then 95 | echo "git push failed, exiting..." 96 | exit 1 97 | fi 98 | 99 | popd 100 | 101 | if [ "$push_catalog" = true ] ; then 102 | # push image 103 | if [[ "${RELEASE_BRANCHED_BUILDS}" ]]; then 104 | skopeo copy --dest-creds "${QUAY_USER}:${QUAY_TOKEN}" \ 105 | "${SRC_CONTAINER_TRANSPORT}:${registry_image}:v${OPERATOR_NEW_VERSION}" \ 106 | "docker://${registry_image}:v${OPERATOR_NEW_VERSION}" 107 | 108 | if [ $? -ne 0 ] ; then 109 | echo "skopeo push of ${registry_image}:v${OPERATOR_NEW_VERSION}-latest failed, exiting..." 110 | exit 1 111 | fi 112 | 113 | exit 0 114 | fi 115 | 116 | skopeo copy --dest-creds "${QUAY_USER}:${QUAY_TOKEN}" \ 117 | "${SRC_CONTAINER_TRANSPORT}:${registry_image}:${operator_channel}-latest" \ 118 | "docker://${registry_image}:${operator_channel}-latest" 119 | 120 | if [ $? -ne 0 ] ; then 121 | echo "skopeo push of ${registry_image}:${operator_channel}-latest failed, exiting..." 122 | exit 1 123 | fi 124 | 125 | skopeo copy --dest-creds "${QUAY_USER}:${QUAY_TOKEN}" \ 126 | "${SRC_CONTAINER_TRANSPORT}:${registry_image}:${operator_channel}-latest" \ 127 | "docker://${registry_image}:${operator_channel}-${operator_commit_hash}" 128 | 129 | if [ $? -ne 0 ] ; then 130 | echo "skopeo push of ${registry_image}:${operator_channel}-${operator_commit_hash} failed, exiting..." 131 | exit 1 132 | fi 133 | fi 134 | -------------------------------------------------------------------------------- /boilerplate/_lib/subscriber.sh: -------------------------------------------------------------------------------- 1 | # Helpers and variables for subscriber automation 2 | # 3 | # Source this file from subscriber[-*]. 4 | # 5 | # If your command has subcommands, define SUBCOMMANDS as a map of 6 | # [subcmd_name]='Description one-liner' *before* sourcing this library 7 | # and it will parse the command line up to that point for you, setting 8 | # the SUBCOMMAND variable and leaving everything else in $@. No explicit 9 | # usage function is necessary. 10 | # 11 | # Otherwise, define your usage() function *before* sourcing this library 12 | # and it will handle variants of [-[-]]h[elp] for you. 13 | 14 | CMD=${SOURCER##*/} 15 | 16 | _subcommand_usage() { 17 | echo "Usage: $CMD SUBCOMMAND ..." 18 | for subcommand in "${!SUBCOMMANDS[@]}"; do 19 | echo 20 | echo "===========" 21 | echo "$CMD $subcommand" 22 | echo " ${SUBCOMMANDS[$subcommand]}" 23 | done 24 | exit -1 25 | } 26 | 27 | # Regex for help, -h, -help, --help, etc. 28 | # NOTE: This will match a raw 'h'. That's probably okay, since if 29 | # there's a conflict, 'h' would be ambiguous anyway. 30 | _helpre='^-*h(elp)?$' 31 | 32 | # Subcommand processing 33 | if [[ ${#SUBCOMMANDS[@]} -ne 0 ]]; then 34 | 35 | # No subcommand specified 36 | [[ $# -eq 0 ]] && _subcommand_usage 37 | 38 | subcmd=$1 39 | shift 40 | 41 | [[ "$subcmd" =~ $_helpre ]] && _subcommand_usage 42 | 43 | # Allow unique prefixes 44 | SUBCOMMAND= 45 | for key in "${!SUBCOMMANDS[@]}"; do 46 | if [[ $key == "$subcmd"* ]]; then 47 | # If SUBCOMMAND is already set, this is an ambiguous prefix. 48 | if [[ -n "$SUBCOMMAND" ]]; then 49 | err "Ambiguous subcommand prefix: '$subcmd' matches (at least): ['$SUBCOMMAND', '$key']" 50 | fi 51 | SUBCOMMAND=$key 52 | fi 53 | done 54 | [[ -n "$SUBCOMMAND" ]] || err "Unknown subcommand '$subcmd'. Try 'help' for usage." 55 | 56 | # We got a valid, unique subcommand. Run the helper with the remaining CLI args. 57 | exec $HERE/$CMD-$SUBCOMMAND "$@" 58 | fi 59 | 60 | [[ "$1" =~ $_helpre ]] && usage 61 | 62 | SUBSCRIBERS_FILE=$REPO_ROOT/subscribers.yaml 63 | 64 | ## subscriber_list FILTER 65 | # 66 | # Prints a list of subscribers registered in the $SUBSCRIBERS_FILE. 67 | # 68 | # FILTER: 69 | # all: Prints all subscribers 70 | # onboarded: Prints only onboarded subscribers 71 | subscriber_list() { 72 | case $1 in 73 | all) yq '.subscribers[] | .name' $SUBSCRIBERS_FILE;; 74 | # TODO: Right now subscribers are only "manual". 75 | onboarded) yq '.subscribers[] | select(.conventions[].status == "manual") | .name' $SUBSCRIBERS_FILE;; 76 | esac 77 | } 78 | 79 | ## last_bp_commit ORG/PROJ 80 | # 81 | # Prints the commit hash of the specified repository's boilerplate 82 | # level, or the empty string if the repository is not onboarded. 83 | # 84 | # ORG/PROJ: github organization and project name, e.g. 85 | # "openshift/my-wizbang-operator". 86 | last_bp_commit() { 87 | local repo=$1 88 | local lbc 89 | for default_branch in master main; do 90 | lbc=$(curl -s https://raw.githubusercontent.com/$repo/$default_branch/boilerplate/_data/last-boilerplate-commit) 91 | if [[ "$lbc" != "404: Not Found" ]]; then 92 | echo $lbc | cut -c 1-7 93 | return 94 | fi 95 | done 96 | } 97 | 98 | ## commits_behind_bp_master HASH 99 | # 100 | # Prints how many merge commits behind boilerplate master HASH is. If 101 | # HASH is empty/unspecified, prints the total number of merge commits in 102 | # the boilerplate repo. 103 | commits_behind_bp_master() { 104 | local hash=$1 105 | local range=master 106 | if [[ -n "$hash" ]]; then 107 | range=$hash..master 108 | fi 109 | git rev-list --count --merges $range 110 | } 111 | 112 | ## subscriber_args SUBSCRIBER ... 113 | # 114 | # Processes arguments as a list of onboarded subscribers of the form 115 | # "org/name" (e.g. "openshift/deadmanssnitch-operator"); or the special 116 | # keyword "ALL". 117 | # 118 | # Outputs to stderr a space-separated list of subscribers. If "ALL" was 119 | # specified, these are all onboarded subscribers. 120 | # 121 | # Errors if: 122 | # - "ALL" is specified along with one or more explicit subscriber names. 123 | # - Any specified subscriber is nonexistent or not listed as onboarded 124 | # in the config. 125 | subscriber_args() { 126 | local -A to_process 127 | local ALL=0 128 | local subscriber 129 | local a 130 | 131 | if [[ $# -eq 1 ]] && [[ "$1" == ALL ]]; then 132 | ALL=1 133 | shift 134 | fi 135 | for subscriber in $(subscriber_list onboarded); do 136 | to_process[$subscriber]=$ALL 137 | done 138 | 139 | # Parse specified subscribers 140 | for a in "$@"; do 141 | [[ $a == ALL ]] && err "Can't specify ALL with explicit subscribers" 142 | 143 | [[ -n "${to_process[$a]}" ]] || err "Not an onboarded subscriber: '$a'" 144 | if [[ "${to_process[$a]}" -eq 1 ]]; then 145 | echo "Ignoring duplicate: '$a'" >&2 146 | continue 147 | fi 148 | to_process[$a]=1 149 | done 150 | 151 | for subscriber in "${!to_process[@]}"; do 152 | [[ "${to_process[$subscriber]}" -eq 1 ]] || continue 153 | echo -n "${subscriber} " 154 | done 155 | } 156 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/update: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | source $CONVENTION_ROOT/_lib/common.sh 6 | 7 | # No PRE 8 | [[ "$1" == "PRE" ]] && exit 0 9 | 10 | # Expect POST 11 | [[ "$1" == "POST" ]] || err "Got a parameter I don't understand: '$1'. Did the infrastructure change?" 12 | 13 | # Add codecov configuration 14 | echo "Copying .codecov.yml to your repository root." 15 | cp ${HERE}/.codecov.yml $REPO_ROOT 16 | 17 | # Add OWNERS_ALIASES to $REPO_ROOT 18 | echo "Copying OWNERS_ALIASES to your repository root." 19 | cp -L ${HERE}/OWNERS_ALIASES $REPO_ROOT 20 | 21 | # Add CICD owners to .tekton if exists 22 | if [[ -d "${REPO_ROOT}/.tekton/" ]]; then 23 | echo "Adding Konflux subdirectory OWNERS file to .tekton/" 24 | cat >"${REPO_ROOT}/.tekton/OWNERS" </dev/null; then 41 | echo "Wrapping existing dependabot.yml (which matches boilerplate) with boilerplate-managed markers..." 42 | mv "$TARGET_FILE" "${TARGET_FILE}.bak" 43 | { 44 | echo "# BEGIN boilerplate-managed" 45 | cat "${TARGET_FILE}.bak" 46 | echo "# END boilerplate-managed" 47 | } > "$TARGET_FILE" 48 | rm -f "${TARGET_FILE}.bak" 49 | else 50 | echo "[WARNING] dependabot.yml exists and differs from boilerplate template but has no boilerplate-managed markers." 51 | echo "[WARNING] Please review manually to avoid config duplication." 52 | fi 53 | else 54 | echo "Copying boilerplate-managed dependabot.yml" 55 | cp "$BOILERPLATE_FILE" "$TARGET_FILE" 56 | fi 57 | 58 | # Add olm-registry Dockerfile 59 | mkdir -p $REPO_ROOT/build 60 | echo "Copying Dockerfile.olm-registry to build/Dockerfile.olm-registry" 61 | cp ${HERE}/Dockerfile.olm-registry ${REPO_ROOT}/build/Dockerfile.olm-registry 62 | # if the gitignore file exists, remove the olm-registry line 63 | if [[ -f ${REPO_ROOT}/.gitignore ]]; then 64 | ${SED?} -i "/Dockerfile.olm-registry/d" ${REPO_ROOT}/.gitignore 65 | fi 66 | 67 | OPERATOR_NAME=$(sed -n 's/.*OperatorName .*"\([^"]*\)".*/\1/p' "${REPO_ROOT}/config/config.go") 68 | 69 | if [[ ! -f ${REPO_ROOT}/config/metadata/additional-labels.txt ]]; then 70 | mkdir -p ${REPO_ROOT}/config/metadata 71 | cat >${REPO_ROOT}/config/metadata/additional-labels.txt <$REPO_ROOT/.ci-operator.yaml 112 | 113 | # Check for pipeline files in .tekton directory and centralize them 114 | TEKTON_DIR="${REPO_ROOT}/.tekton" 115 | if [ -d "$TEKTON_DIR" ]; then 116 | for pipeline_file in "$TEKTON_DIR"/*.yaml; do 117 | if [ -f "$pipeline_file" ] && grep -q buildah "${pipeline_file}" && ! grep -q "pipelineRef:" "$pipeline_file"; then 118 | echo "Centralizing pipeline: $(basename "$pipeline_file")" 119 | python3 "${HERE}/migrate_build_pipeline.py" "${pipeline_file}" 120 | fi 121 | done 122 | fi 123 | 124 | cat <= maxCSVFailures { 70 | // If maxCSVFailures has been exceeded, handle errors with Expect()... 71 | csvErrCounter++ 72 | GinkgoLogr.Error(err, fmt.Sprintf("CSV error counter: %d, tolerated errors: %d", csvErrCounter, maxCSVFailures)) 73 | Expect(err).NotTo(HaveOccurred(), "Failed to retrieve CSV from namespace %s", namespace) 74 | Expect(csvList.Items).Should(HaveLen(1)) 75 | } 76 | if err != nil { 77 | GinkgoLogr.Error(err, fmt.Sprintf("Err, fetching CSV for NS:'%s' LABEL:'%s'", namespace, labelSelector)) 78 | csvErrCounter++ 79 | return false 80 | } 81 | if csvList == nil { 82 | GinkgoLogr.Error(nil, fmt.Sprintf("Err, nil CSV list fetching CSV for NS:'%s' LABEL:'%s'", namespace, labelSelector)) 83 | csvErrCounter++ 84 | return false 85 | } 86 | if len(csvList.Items) != 1 { 87 | GinkgoLogr.Error(nil, fmt.Sprintf("Err, expected 1 CSV for NS:'%s' LABEL:'%s'. Got %d", namespace, labelSelector, len(csvList.Items))) 88 | csvErrCounter++ 89 | return false 90 | } 91 | statusPhase, _, _ := unstructured.NestedFieldCopy(csvList.Items[0].Object, "status", "phase") 92 | if statusPhase == "Succeeded" { 93 | GinkgoLogr.Info("csv phase", "phase", statusPhase) 94 | return true 95 | } 96 | GinkgoLogr.Info("csv phase", "phase", statusPhase) 97 | return false 98 | }, ctx).WithTimeout(timeoutDuration).WithPolling(pollingDuration).Should(BeTrue(), "CSV %s should exist and have Succeeded status", operatorName) 99 | }) 100 | 101 | It("service accounts exist", func(ctx context.Context) { 102 | for _, serviceAccount := range serviceAccounts { 103 | err := client.Get(ctx, serviceAccount, namespace, &v1.ServiceAccount{}) 104 | Expect(err).ShouldNot(HaveOccurred(), "Service account %s not found", serviceAccount) 105 | } 106 | }) 107 | 108 | It("deployment exists", func(ctx context.Context) { 109 | err := wait.For(conditions.New(client).DeploymentAvailable(operatorName, namespace)) 110 | Expect(err).ShouldNot(HaveOccurred(), "Deployment %s not available", operatorName) 111 | }) 112 | 113 | It("roles exist", func(ctx context.Context) { 114 | var roles rbacv1.RoleList 115 | err := client.WithNamespace(namespace).List(ctx, &roles, resources.WithLabelSelector(labelSelector)) 116 | Expect(err).ShouldNot(HaveOccurred(), "Failed to get roles") 117 | Expect(roles.Items).ShouldNot(BeZero(), "no roles found") 118 | }) 119 | 120 | It("role bindings exist", func(ctx context.Context) { 121 | var roleBindings rbacv1.RoleBindingList 122 | err := client.WithNamespace(namespace).List(ctx, &roleBindings, resources.WithLabelSelector(labelSelector)) 123 | Expect(err).ShouldNot(HaveOccurred(), "Failed to get role bindings") 124 | Expect(roleBindings.Items).ShouldNot(BeZero(), "no rolebindings found") 125 | }) 126 | 127 | It("cluster roles exist", func(ctx context.Context) { 128 | var clusterRoles rbacv1.ClusterRoleList 129 | err := client.WithNamespace(namespace).List(ctx, &clusterRoles, resources.WithLabelSelector(labelSelector)) 130 | Expect(err).ShouldNot(HaveOccurred(), "Failed to get cluster roles") 131 | Expect(clusterRoles.Items).ShouldNot(BeZero(), "no clusterroles found") 132 | }) 133 | 134 | It("cluster role bindings exist", func(ctx context.Context) { 135 | var clusterRoleBindings rbacv1.ClusterRoleBindingList 136 | err := client.List(ctx, &clusterRoleBindings, resources.WithLabelSelector(labelSelector)) 137 | Expect(err).ShouldNot(HaveOccurred(), "Failed to get cluster role bindings") 138 | Expect(clusterRoleBindings.Items).ShouldNot(BeZero(), "no clusterrolebindingss found") 139 | }) 140 | 141 | It("config map exists", func(ctx context.Context) { 142 | err := client.Get(ctx, configMapLockFile, namespace, &v1.ConfigMap{}) 143 | Expect(err).ShouldNot(HaveOccurred(), "Failed to get config map %s", configMapLockFile) 144 | }) 145 | 146 | It("secrets exist", func(ctx context.Context) { 147 | for _, secret := range secrets { 148 | err := client.Get(ctx, secret, namespace, &v1.Secret{}) 149 | Expect(err).ShouldNot(HaveOccurred(), "Secret %s not found", secret) 150 | } 151 | }) 152 | 153 | PIt("can be upgraded", func(ctx context.Context) { 154 | log.SetLogger(GinkgoLogr) 155 | k8sClient, err := openshift.New(ginkgo.GinkgoLogr) 156 | Expect(err).ShouldNot(HaveOccurred(), "unable to setup k8s client") 157 | 158 | ginkgo.By("forcing operator upgrade") 159 | err = k8sClient.UpgradeOperator(ctx, operatorName, namespace) 160 | Expect(err).NotTo(HaveOccurred(), "operator upgrade failed") 161 | }) 162 | }) 163 | -------------------------------------------------------------------------------- /boilerplate/_lib/boilerplate-commit: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | if [ "$BOILERPLATE_SET_X" ]; then 5 | set -x 6 | fi 7 | 8 | REPO_ROOT=$(git rev-parse --show-toplevel) 9 | source $REPO_ROOT/boilerplate/_lib/common.sh 10 | 11 | tmpd=$(mktemp -d) 12 | trap "rm -fr $tmpd" EXIT 13 | git_status=$tmpd/git_status 14 | bp_log=$tmpd/bp_git_log 15 | bp_clone=$tmpd/bp_git_clone 16 | convention_status=$tmpd/convention_status 17 | commit_message=$tmpd/commit_msg 18 | 19 | # Variables to keep track of what's happening in this commit. Empty 20 | # means we're not doing whatever it is. 21 | #### 22 | # - Bootstrapping: bringing boilerplate into the repo for the first 23 | # time. Nonempty if bootstrapping. 24 | bootstrap= 25 | #### 26 | # - If we were already bootstrapped, and boilerplate-update brought in a 27 | # newer boilerplate commit, we'll put "{from_hash}...{to_hash}" here. 28 | # This should be mutually exclusive with `bootstrap`. 29 | bp_commit_change= 30 | #### 31 | # - Changes in conventions. This is a file containing one line per 32 | # convention indicating what was done in this commit with respect to 33 | # that convention: "Subscribe", "Update", or "No change". (TODO: 34 | # "Unsubscribe".) The file is only empty if update.cfg is 35 | # (substantively) empty. 36 | convention_statuses=$tmpd/convention_statuses 37 | >$convention_statuses 38 | #### 39 | 40 | git status --porcelain > $git_status 41 | 42 | # Bootstrapping includes adding the boilerplate-update target to the 43 | # Makefile and adding boilerplate/update and boilerplate/update.cfg. We 44 | # won't bother with the former. Since the latter are new files in a new 45 | # directory, `git status` will just show the `boilerplate/` directory as 46 | # untracked. 47 | if grep -q '^?? boilerplate/$' $git_status; then 48 | bootstrap=true 49 | 50 | # This wasn't a bootstrap. We can detect it was an update if the 51 | # last-boilerplate-commit file was changed. 52 | elif grep -q '^ M boilerplate/_data/last-boilerplate-commit$' $git_status; then 53 | # Produce a string of the form {old_hash}...{new_hash} 54 | bp_commit_change=$(git diff boilerplate/_data/last-boilerplate-commit | tail -2 | paste -d/ -s - | sed 's/[+-]//g; s,/,...,') 55 | # Handy URL showing the commits and deltas 56 | bp_compare_url="https://github.com/openshift/boilerplate/compare/$bp_commit_change" 57 | # Generate the commit history for this range. This will go in the commit message. 58 | ( 59 | git clone "${BOILERPLATE_GIT_REPO}" "${bp_clone}" 60 | cd "${bp_clone}" 61 | # Matches promote.sh 62 | git log --no-merges --pretty=format:'commit: %H%nauthor: %an%n%s%n%n%b%n%n' $bp_commit_change > $bp_log 63 | ) 64 | 65 | fi 66 | 67 | # Okay, let's look for convention changes. 68 | # TODO: Handle unsubscribes (not yet handled by the main `update` either). 69 | while read convention junk; do 70 | # TODO: These first few conditions, scrubbing the config file, are 71 | # identical to what's in `update`. It would be lovely to library-ize 72 | # them. However, `update` needs to remain self-contained since it's 73 | # part of the bootstrap process. 74 | 75 | # Skip comment lines (which can have leading whitespace) 76 | if [[ "$convention" == '#'* ]]; then 77 | continue 78 | fi 79 | # Skip blank or whitespace-only lines 80 | if [[ "$convention" == "" ]]; then 81 | continue 82 | fi 83 | # Lines like 84 | # valid/path other_junk 85 | # are not acceptable, unless `other_junk` is a comment 86 | if [[ "$junk" != "" ]] && [[ "$junk" != '#'* ]]; then 87 | echo "Invalid config! Only one convention is allowed per line. Found '$junk'. Ignoring." 88 | # `update` bails for this. We're being a bit more forgiving. 89 | continue 90 | fi 91 | 92 | dir_path="boilerplate/${convention}" 93 | # Make sure the directory exists 94 | if ! [[ -d "$dir_path" ]]; then 95 | echo "Invalid convention directory: '$convention'." 96 | echo "(Could be because we don't handle unsubscribing yet.)" 97 | echo "Ignoring." 98 | # `update` bails for this. We're being a bit more forgiving. 99 | continue 100 | fi 101 | 102 | # Okay, we have a legit convention. Let's see if the current checkout 103 | # touches it 104 | # (Note that we're reusing the same temp file on each iteration.) 105 | git status --porcelain $dir_path > $convention_status 106 | if ! [[ -s $convention_status ]]; then 107 | # No deltas here. 108 | echo "- $convention: No change" >> $convention_statuses 109 | 110 | elif grep -q -v '^??' $convention_status; then 111 | # If there's anything *other than* untracked, this was an update 112 | echo "- $convention: Update" >> $convention_statuses 113 | 114 | else 115 | # If we get here, everything is '^??' (untracked), meaning this is a 116 | # new subscription. (Or, I suppose, the convention was previously 117 | # empty? We'll call it a new subscription anyway.) 118 | echo "- $convention: Subscribe" >> $convention_statuses 119 | fi 120 | 121 | done < boilerplate/update.cfg 122 | 123 | # Let's make sure *something* boilerplate-related is happening here. 124 | if [[ -z "$bootstrap" ]] && [[ -z "$bp_commit_change" ]] && ! grep -v -q "No change" $convention_statuses; then 125 | err "No boilerplate-related activity found in the current checkout!" 126 | fi 127 | 128 | # Okay, we're ready to do this. 129 | # Generate the commit title and branch name indicating the *main* action 130 | # we're taking. This is 'bootstrap' or 'update'; or if we're doing 131 | # neither of those things and only changing config, 'subscribe'. 132 | # => Commit titles will be of one of the following forms: 133 | # "Boilerplate: Bootstrap at {hash}" 134 | # "Boilerplate: Update to {hash}" 135 | # "Boilerplate: Subscribe at {hash}" 136 | # => Branch names will be of the form: 137 | # boilerplate-{bootstrap|update|subscribe}-{N}-{hash} 138 | # where {N} is the number of configured conventions (omitted if zero) 139 | title="Boilerplate:" 140 | branch=boilerplate 141 | if [[ -n "$bootstrap" ]]; then 142 | title="$title Bootstrap at" 143 | branch="$branch-bootstrap" 144 | elif [[ -n "$bp_commit_change" ]]; then 145 | title="$title Update to" 146 | branch="$branch-update" 147 | else 148 | title="$title Subscribe at" 149 | branch="$branch-subscribe" 150 | fi 151 | cur_commit=$(cat boilerplate/_data/last-boilerplate-commit) 152 | title="$title $cur_commit" 153 | echo "$title 154 | " > $commit_message 155 | 156 | if [[ -n "$bootstrap" ]]; then 157 | echo "https://github.com/openshift/boilerplate/commit/$cur_commit 158 | ---" >> $commit_message 159 | fi 160 | 161 | echo "Conventions:" >> $commit_message 162 | if [[ -s $convention_statuses ]]; then 163 | cat $convention_statuses >> $commit_message 164 | # Add the number of conventions to the branch name 165 | branch="$branch-"$(wc -l $convention_statuses | sed 's/ .*//') 166 | else 167 | echo " None." >> $commit_message 168 | fi 169 | 170 | branch="$branch-$cur_commit" 171 | 172 | if [[ -n "$bp_commit_change" ]]; then 173 | 174 | echo "--- 175 | $bp_compare_url 176 | " >> $commit_message 177 | cat $bp_log >> $commit_message 178 | 179 | fi 180 | 181 | # TODO: Handle branch name conflict. At the moment, this should really only be 182 | # possible if unsubscribing and subscribing the same number of conventions. 183 | # Since we don't handle unsubscribing (properly), we'll take our chances that 184 | # it "can't" happen for now. 185 | git checkout -b $branch 186 | # We can get away with -A because `update` forces a clean checkout. 187 | git add -A 188 | git commit -F $commit_message 189 | echo "Ready to push branch $branch" 190 | -------------------------------------------------------------------------------- /boilerplate/_lib/common.sh: -------------------------------------------------------------------------------- 1 | err() { 2 | echo "==ERROR== $@" >&2 3 | exit 1 4 | } 5 | 6 | banner() { 7 | echo 8 | echo "==============================" 9 | echo "$@" 10 | echo "==============================" 11 | } 12 | 13 | ## osdk_version BINARY 14 | # 15 | # Print the version of the specified operator-sdk BINARY 16 | osdk_version() { 17 | local osdk=$1 18 | # `operator-sdk version` output looks like 19 | # operator-sdk version: v0.8.2, commit: 28bd2b0d4fd25aa68e15d928ae09d3c18c3b51da 20 | # or 21 | # operator-sdk version: "v0.16.0", commit: "55f1446c5f472e7d8e308dcdf36d0d7fc44fc4fd", go version: "go1.13.8 linux/amd64" 22 | # Peel out the version number, accounting for the optional quotes. 23 | $osdk version | ${SED?} 's/operator-sdk version: "*\([^,"]*\)"*,.*/\1/' 24 | } 25 | 26 | ## opm_version BINARY 27 | # 28 | # Print the version of the specified opm BINARY 29 | opm_version() { 30 | local opm=$1 31 | # `opm version` output looks like: 32 | # Version: version.Version{OpmVersion:"v1.15.2", GitCommit:"fded0bf", BuildDate:"2020-11-18T14:21:24Z", GoOs:"darwin", GoArch:"amd64"} 33 | $opm version | ${SED?} 's/.*OpmVersion:"//;s/".*//' 34 | } 35 | 36 | ## grpcurl_version BINARY 37 | # 38 | # Print the version of the specified grpcurl BINARY 39 | grpcurl_version() { 40 | local grpcurl=$1 41 | # `grpcurl -version` output looks like: grpcurl 1.7.0 42 | $grpcurl -version 2>&1 | cut -d " " -f 2 43 | } 44 | 45 | ## repo_import REPODIR 46 | # 47 | # Print the qualified org/name of the current repository, e.g. 48 | # "openshift/wizbang-foo-operator". This relies on git remotes being set 49 | # reasonably. 50 | repo_name() { 51 | # Just strip off the first component of the import-ish path 52 | repo_import $1 | ${SED?} 's,^[^/]*/,,' 53 | } 54 | 55 | ## repo_import REPODIR 56 | # 57 | # Print the go import-ish path to the current repository, e.g. 58 | # "github.com/openshift/wizbang-foo-operator". This relies on git 59 | # remotes being set reasonably. 60 | repo_import() { 61 | # Account for remotes which are 62 | # - upstream or origin 63 | # - ssh ("git@host.com:org/name.git") or https ("https://host.com/org/name.git") 64 | (git -C $1 config --get remote.upstream.url || git -C $1 config --get remote.origin.url) | ${SED?} 's,git@\([^:]*\):,\1/,; s,https://,,; s/\.git$//' 65 | } 66 | 67 | ## current_branch REPO 68 | # 69 | # Outputs the name of the current branch in the REPO directory 70 | current_branch() { 71 | ( 72 | cd $1 73 | git rev-parse --abbrev-ref HEAD 74 | ) 75 | } 76 | 77 | ## image_exits_in_repo IMAGE_URI 78 | # 79 | # Checks whether IMAGE_URI -- e.g. quay.io/app-sre/osd-metrics-exporter:abcd123 80 | # -- exists in the remote repository. 81 | # If so, returns success. 82 | # If the image does not exist, but the query was otherwise successful, returns 83 | # failure. 84 | # If the query fails for any reason, prints an error and *exits* nonzero. 85 | image_exists_in_repo() { 86 | local image_uri=$1 87 | local output 88 | local rc 89 | 90 | local skopeo_stderr=$(mktemp) 91 | 92 | if ! command -v skopeo &>/dev/null; then 93 | echo "Failed to find the skopeo binary. If you are on Mac: brew install skopeo." >&2 94 | exit 1 95 | fi 96 | 97 | output=$(skopeo inspect docker://${image_uri} 2>$skopeo_stderr) 98 | rc=$? 99 | # So we can delete the temp file right away... 100 | stderr=$(cat $skopeo_stderr) 101 | rm -f $skopeo_stderr 102 | if [[ $rc -eq 0 ]]; then 103 | # The image exists. Sanity check the output. 104 | local digest=$(echo $output | jq -r .Digest) 105 | if [[ -z "$digest" ]]; then 106 | echo "Unexpected error: skopeo inspect succeeded, but output contained no .Digest" 107 | echo "Here's the output:" 108 | echo "$output" 109 | echo "...and stderr:" 110 | echo "$stderr" 111 | exit 1 112 | fi 113 | echo "Image ${image_uri} exists with digest $digest." 114 | return 0 115 | elif [[ "$output" == *"manifest unknown"* || "$stderr" == *"manifest unknown"* ]]; then 116 | # We were able to talk to the repository, but the tag doesn't exist. 117 | # This is the normal "green field" case. 118 | echo "Image ${image_uri} does not exist in the repository." 119 | return 1 120 | elif [[ "$output" == *"manifest unknown"* || "$stderr" == *"was deleted or has expired"* ]]; then 121 | # This should be rare, but accounts for cases where we had to 122 | # manually delete an image. 123 | echo "Image ${image_uri} was deleted from the repository." 124 | echo "Proceeding as if it never existed." 125 | return 1 126 | else 127 | # Any other error. For example: 128 | # - "unauthorized: access to the requested resource is not 129 | # authorized". This happens not just on auth errors, but if we 130 | # reference a repository that doesn't exist. 131 | # - "no such host". 132 | # - Network or other infrastructure failures. 133 | # In all these cases, we want to bail, because we don't know whether 134 | # the image exists (and we'd likely fail to push it anyway). 135 | echo "Error querying the repository for ${image_uri}:" 136 | echo "stdout: $output" 137 | echo "stderr: $stderr" 138 | exit 1 139 | fi 140 | } 141 | 142 | if [ "$BOILERPLATE_SET_X" ]; then 143 | set -x 144 | fi 145 | 146 | # Only used for error messages 147 | _lib=${BASH_SOURCE##*/} 148 | 149 | # When this lib is sourced (which is what it's designed for), $0 is the 150 | # script that did the sourcing. 151 | SOURCER=$(realpath $0) 152 | [[ -n "$SOURCER" ]] || err "$_lib couldn't discover where it was sourced from" 153 | 154 | HERE=${SOURCER%/*} 155 | [[ -n "$HERE" ]] || err "$_lib failed to discover the dirname of sourcer at $SOURCER" 156 | 157 | REPO_ROOT=$(git rev-parse --show-toplevel) 158 | [[ -n "$REPO_ROOT" ]] || err "$_lib couldn't discover the repo root" 159 | 160 | CONVENTION_ROOT=$REPO_ROOT/boilerplate 161 | [[ -d "$CONVENTION_ROOT" ]] || err "$CONVENTION_ROOT: not a directory" 162 | 163 | # Were we sourced from within a convention? 164 | if [[ "$HERE" == "$CONVENTION_ROOT/"* ]]; then 165 | # Okay, figure out the name of the convention 166 | CONVENTION_NAME=${HERE#$CONVENTION_ROOT/} 167 | # If we got here, we really expected to be able to identify the 168 | # convention name. 169 | [[ -n "$CONVENTION_NAME" ]] || err "$_lib couldn't discover the name of the sourcing convention" 170 | fi 171 | 172 | # Set SED variable 173 | if LANG=C sed --help 2>&1 | grep -q GNU; then 174 | SED="sed" 175 | elif command -v gsed &>/dev/null; then 176 | SED="gsed" 177 | else 178 | echo "Failed to find GNU sed as sed or gsed. If you are on Mac: brew install gnu-sed." >&2 179 | exit 1 180 | fi 181 | 182 | if [ -z "$BOILERPLATE_GIT_REPO" ]; then 183 | export BOILERPLATE_GIT_REPO=https://github.com/openshift/boilerplate.git 184 | fi 185 | 186 | # Base image repo url 187 | IMAGE_REPO=quay.io/redhat-services-prod/openshift 188 | # The namespace of the ImageStream by which prow will import the image. 189 | IMAGE_NAMESPACE=openshift 190 | IMAGE_NAME=boilerplate 191 | # LATEST_IMAGE_TAG may be set manually or by `update`, in which case 192 | # that's the value we want to use. 193 | if [[ -z "$LATEST_IMAGE_TAG" ]]; then 194 | # (Non-ancient) consumers will have the tag in this file. 195 | if [[ -f ${CONVENTION_ROOT}/_data/backing-image-tag ]]; then 196 | LATEST_IMAGE_TAG=$(cat ${CONVENTION_ROOT}/_data/backing-image-tag) 197 | 198 | # In boilerplate itself, we can discover the latest from git. 199 | elif [[ $(repo_name .) == openshift/boilerplate ]]; then 200 | LATEST_IMAGE_TAG=$(git describe --tags --abbrev=0 --match image-v*) 201 | fi 202 | fi 203 | # The public image location 204 | IMAGE_PULL_PATH=${IMAGE_PULL_PATH:-$IMAGE_REPO/$IMAGE_NAME:$LATEST_IMAGE_TAG} 205 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/README.md: -------------------------------------------------------------------------------- 1 | # Conventions for OSD operators written in Go 2 | 3 | - [Conventions for OSD operators written in Go](#conventions-for-osd-operators-written-in-go) 4 | - [`make` targets and functions.](#make-targets-and-functions) 5 | - [Prow](#prow) 6 | - [Local Testing](#local-testing) 7 | - [app-sre](#app-sre) 8 | - [Code coverage](#code-coverage) 9 | - [Linting and other static analysis with `golangci-lint`](#linting-and-other-static-analysis-with-golangci-lint) 10 | - [Checks on generated code](#checks-on-generated-code) 11 | - [FIPS](#fips-federal-information-processing-standards) 12 | - [Additional deployment support](#additional-deployment-support) 13 | - [OLM SkipRange](#olm-skiprange) 14 | 15 | This convention is suitable for both cluster- and hive-deployed operators. 16 | 17 | The following components are included: 18 | 19 | ## `make` targets and functions. 20 | 21 | **Note:** Your repository's main `Makefile` needs to be edited to include the 22 | "nexus makefile include": 23 | 24 | ``` 25 | include boilerplate/generated-includes.mk 26 | ``` 27 | 28 | One of the primary purposes of these `make` targets is to allow you to 29 | standardize your prow and app-sre pipeline configurations using the 30 | following: 31 | 32 | ### Prow 33 | 34 | | Test name / `make` target | Purpose | 35 | | ------------------------- | --------------------------------------------------------------------------------------------------------------- | 36 | | `validate` | Ensure code generation has not been forgotten; and ensure generated and boilerplate code has not been modified. | 37 | | `lint` | Perform static analysis. | 38 | | `test` | "Local" unit and functional testing. | 39 | | `coverage` | [Code coverage](#code-coverage) analysis and reporting. | 40 | 41 | To standardize your prow configuration, you may run: 42 | 43 | ```shell 44 | $ make prow-config 45 | ``` 46 | 47 | If you already have the openshift/release repository cloned locally, you 48 | may specify its path via `$RELEASE_CLONE`: 49 | 50 | ```shell 51 | $ make RELEASE_CLONE=/home/me/github/openshift/release prow-config 52 | ``` 53 | 54 | This will generate a delta configuring prow to: 55 | 56 | - Build your `build/Dockerfile`. 57 | - Run the above targets in presubmit tests. 58 | - Run the `coverage` target in a postsubmit. This is the step that 59 | updates your coverage report in codecov.io. 60 | 61 | #### Local Testing 62 | 63 | You can run these `make` targets locally during development to test your 64 | code changes. However, differences in platforms and environments may 65 | lead to unpredictable results. Therefore boilerplate provides a utility 66 | to run targets in a container environment that is designed to be as 67 | similar as possible to CI: 68 | 69 | ```shell 70 | $ make container-{target} 71 | ``` 72 | 73 | or 74 | 75 | ```shell 76 | $ ./boilerplate/_lib/container-make {target} 77 | ``` 78 | 79 | ### app-sre 80 | 81 | The `build-push` target builds and pushes the operator and OLM registry images, 82 | ready to be SaaS-deployed. 83 | By default it is configured to be run from the app-sre jenkins pipelines. 84 | Consult [this doc](app-sre.md) for information on local execution/testing. 85 | 86 | ## Code coverage 87 | 88 | - A `codecov.sh` script, referenced by the `coverage` `make` target, to 89 | run code coverage analysis per [this SOP](https://github.com/openshift/ops-sop/blob/93d100347746ce04ad552591136818f82043c648/services/codecov.md). 90 | 91 | - A `.codecov.yml` configuration file for 92 | [codecov.io](https://docs.codecov.io/docs/codecov-yaml). Note that 93 | this is copied into the repository root, because that's 94 | [where codecov.io expects it](https://docs.codecov.io/docs/codecov-yaml#can-i-name-the-file-codecovyml). 95 | 96 | ## Linting and other static analysis with `golangci-lint` 97 | 98 | - A `go-check` `make` target, which 99 | - ensures the proper version of `golangci-lint` is installed, and 100 | - runs it against 101 | - a `golangci.yml` config. 102 | 103 | ## Checks on generated code 104 | 105 | The convention embeds default checks to ensure generated code generation is current, committed, and unaltered. 106 | To trigger the check, you can use `make generate-check` provided your Makefile properly includes the boilerplate-generated include `boilerplate/generated-includes.mk`. 107 | 108 | Checks consist of: 109 | 110 | - Checking all files are committed to ensure a safe point to revert to in case of error 111 | - Running the `make generate` command (see below) to regenerate the needed code 112 | - Checking if this results in any new uncommitted files in the git project or if all is clean. 113 | 114 | `make generate` does the following: 115 | 116 | - generate crds and deepcopy via controller-gen. This is a no-op if your 117 | operator has no APIs. 118 | - `openapi-gen`. This is a no-op if your operator has no APIs. 119 | - `go generate`. This is a no-op if you have no `//go:generate` 120 | directives in your code. 121 | 122 | ## FIPS (Federal Information Processing Standards) 123 | 124 | To enable FIPS in your build there is a `make ensure-fips` target. 125 | 126 | Add `FIPS_ENABLED=true` to your repos Makefile. Please ensure that this variable is added **before** including boilerplate Makefiles. 127 | 128 | e.g. 129 | 130 | ```.mk 131 | FIPS_ENABLED=true 132 | 133 | include boilerplate/generated-includes.mk 134 | ``` 135 | 136 | `ensure-fips` will add a [fips.go](./fips.go) file in the same directory as the `main.go` file. (Please commit this file as normal) 137 | 138 | `fips.go` will import the necessary packages to restrict all TLS configuration to FIPS-approved settings. 139 | 140 | With `FIPS_ENABLED=true`, `ensure-fips` is always run before `make go-build` 141 | 142 | ## Additional deployment support 143 | 144 | - The convention currently supports a maximum of two deployments. i.e. The operator deployment itself plus an optional additional deployment. 145 | - If an additional deployment image has to be built and appended to the CSV as part of the build process, then the consumer needs to: 146 | - Specify `SupplementaryImage` which is the deployment name in the consuming repository's `config/config.go`. 147 | - Define the image to be built as `ADDITIONAL_IMAGE_SPECS` in the consuming repository's Makefile, Boilerplate later parses this image as part of the build process; [ref](https://github.com/openshift/boilerplate/blob/master/boilerplate/openshift/golang-osd-operator/standard.mk#L56). 148 | 149 | e.g. 150 | 151 | ```.mk 152 | # Additional Deployment Image 153 | define ADDITIONAL_IMAGE_SPECS 154 | build/Dockerfile.webhook $(SUPPLEMENTARY_IMAGE_URI) 155 | end 156 | ``` 157 | - Ensure the CSV template of the consuming repository has the additional deployment name. 158 | 159 | ## OLM SkipRange 160 | 161 | - OLM currently doesn't support cross-catalog upgrades. 162 | - The convention standardizes the catalog repositories to adhere to the naming convention `${OPERATOR_NAME}-registry`. 163 | - For an existing operator that has been deployed looking to onboard Boilerplate is a problem. Once deployed, for an existing operator to upgrade to the new Boilerplate-deployed operator which refers to the new catalog registry with `staging/production` channels, OLM needs to support cross-catalog upgrades. 164 | - Cross catalog upgrades are only possible via [OLM Skiprange](https://v0-18-z.olm.operatorframework.io/docs/concepts/olm-architecture/operator-catalog/creating-an-update-graph/#skiprange). 165 | - The consumer can explictly enable OLM SkipRange for their operator by specifying `EnableOLMSkipRange="true"` in the repository's `config/config.go`. 166 | - If specified, the `olm.skipRange` annotation will be appended to the CSV during the build process creating an upgrade path for the operator. 167 | -------------------------------------------------------------------------------- /pkg/types/alertmanagerconfig.go: -------------------------------------------------------------------------------- 1 | package alertmanagerconfig 2 | 3 | import ( 4 | "fmt" 5 | 6 | yaml "gopkg.in/yaml.v2" 7 | ) 8 | 9 | // PDRegexLP is the regular expression used in Pager Duty for any Layered Product namespaces. 10 | const PDRegexLP string = "^redhat-.*" 11 | 12 | // PDRegexKube is the regular expression used in Pager Duty for any Kube-system namespaces. 13 | const PDRegexKube string = "^kube-.*" 14 | 15 | // PDRegexOS is the regular expression used in Pager Duty for any managed OpenShift namespaces. 16 | // It is not used, unless one of the '*-namespaces' configMaps is improperly formatted or does not exist. 17 | const PDRegexOS string = "^openshift-.*" 18 | 19 | // The following types are taken from the upstream Alertmanager types, and modified 20 | // to allow printing of Secrets so that we can generate valid configs from them. 21 | // The Alertmanager types are not supported as external libraries, and therefore need 22 | // to be recreated for this operator. 23 | // Discussion, for reference, is in this PR: https://github.com/prometheus/alertmanager/pull/1804 24 | 25 | type Config struct { 26 | Global *GlobalConfig `yaml:"global,omitempty" json:"global,omitempty"` 27 | Route *Route `yaml:"route,omitempty" json:"route,omitempty"` 28 | Receivers []*Receiver `yaml:"receivers,omitempty" json:"receivers,omitempty"` 29 | Templates []string `yaml:"templates" json:"templates"` 30 | InhibitRules []*InhibitRule `yaml:"inhibit_rules,omitempty" json:"inhibit_rules,omitempty"` 31 | } 32 | 33 | type InhibitRule struct { 34 | TargetMatch map[string]string `yaml:"target_match,omitempty" json:"target_match,omitempty"` 35 | TargetMatchRE map[string]string `yaml:"target_match_re,omitempty" json:"target_match_re,omitempty"` 36 | SourceMatch map[string]string `yaml:"source_match,omitempty" json:"source_match,omitempty"` 37 | SourceMatchRE map[string]string `yaml:"source_match_re,omitempty" json:"source_match_re,omitempty"` 38 | Equal []string `yaml:"equal,omitempty" json:"equal,omitempty"` 39 | } 40 | 41 | func (c Config) String() string { 42 | b, err := yaml.Marshal(c) 43 | if err != nil { 44 | return fmt.Sprintf("", err) 45 | } 46 | return string(b) 47 | } 48 | 49 | // UnmarshalYAML implements the yaml.Unmarshaler interface for Config. 50 | func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { 51 | // We want to set c to the defaults and then overwrite it with the input. 52 | // To make unmarshal fill the plain data struct rather than calling UnmarshalYAML 53 | // again, we have to hide it using a type indirection. 54 | type plain Config 55 | if err := unmarshal((*plain)(c)); err != nil { 56 | return err 57 | } 58 | 59 | if c.Global == nil { 60 | c.Global = &GlobalConfig{} 61 | } 62 | 63 | names := map[string]struct{}{} 64 | 65 | for _, rcv := range c.Receivers { 66 | if _, ok := names[rcv.Name]; ok { 67 | return fmt.Errorf("notification config name %q is not unique", rcv.Name) 68 | } 69 | for _, pdc := range rcv.PagerdutyConfigs { 70 | if pdc.URL == "" { 71 | if c.Global.PagerdutyURL == "" { 72 | // Set Global default for Pager Duty URL 73 | c.Global.PagerdutyURL = "https://events.pagerduty.com/v2/enqueue" 74 | } 75 | } 76 | } 77 | names[rcv.Name] = struct{}{} 78 | } 79 | return nil 80 | } 81 | 82 | // NotifierConfig contains base options common across all notifier configurations. 83 | type NotifierConfig struct { 84 | VSendResolved bool `yaml:"send_resolved" json:"send_resolved"` 85 | } 86 | 87 | // GlobalConfig defines configuration parameters that are valid globally 88 | // unless overwritten. 89 | type GlobalConfig struct { 90 | // ResolveTimeout is the time after which an alert is declared resolved 91 | // if it has not been updated. 92 | ResolveTimeout string `yaml:"resolve_timeout" json:"resolve_timeout"` 93 | 94 | PagerdutyURL string `yaml:"pagerduty_url,omitempty" json:"pagerduty_url,omitempty"` 95 | } 96 | 97 | // A Route is a node that contains definitions of how to handle alerts. 98 | type Route struct { 99 | Receiver string `yaml:"receiver,omitempty" json:"receiver,omitempty"` 100 | 101 | GroupByStr []string `yaml:"group_by,omitempty" json:"group_by,omitempty"` 102 | 103 | Match map[string]string `yaml:"match,omitempty" json:"match,omitempty"` 104 | MatchRE map[string]string `yaml:"match_re,omitempty" json:"match_re,omitempty"` 105 | Continue bool `yaml:"continue,omitempty" json:"continue,omitempty"` 106 | Routes []*Route `yaml:"routes,omitempty" json:"routes,omitempty"` 107 | 108 | GroupWait string `yaml:"group_wait,omitempty" json:"group_wait,omitempty"` 109 | GroupInterval string `yaml:"group_interval,omitempty" json:"group_interval,omitempty"` 110 | RepeatInterval string `yaml:"repeat_interval,omitempty" json:"repeat_interval,omitempty"` 111 | } 112 | 113 | type HttpConfig struct { 114 | ProxyURL string `yaml:"proxy_url,omitempty" json:"proxy_url,omitempty"` 115 | TLSConfig TLSConfig `yaml:"tls_config,omitempty" json:"tls_config,omitempty"` 116 | } 117 | 118 | type TLSConfig struct { 119 | CAFile string `yaml:"ca_file,omitempty" json:"ca_file,omitempty"` 120 | KeyFile string `yaml:"key_file,omitempty" json:"key_file,omitempty"` 121 | ServerName string `yaml:"server_name,omitempty" json:"server_name,omitempty"` 122 | InsecureSkipVerify bool `yaml:"insecure_skip_verify,omitempty" json:"insecure_skip_verify,omitempty"` 123 | } 124 | 125 | type Receiver struct { 126 | // A unique identifier for this receiver. 127 | Name string `yaml:"name" json:"name"` 128 | 129 | PagerdutyConfigs []*PagerdutyConfig `yaml:"pagerduty_configs,omitempty" json:"pagerduty_configs,omitempty"` 130 | WebhookConfigs []*WebhookConfig `yaml:"webhook_configs,omitempty" json:"webhook_configs,omitempty"` 131 | } 132 | 133 | // WebhookConfig configures notifications via a generic webhook. 134 | type WebhookConfig struct { 135 | NotifierConfig `yaml:",inline" json:",inline"` 136 | 137 | // URL to send POST request to. 138 | URL string `yaml:"url" json:"url"` 139 | 140 | HttpConfig HttpConfig `yaml:"http_config,omitempty" json:"http_config,omitempty"` 141 | } 142 | 143 | // PagerdutyConfig defines the integration point between AlertManager and PagerDuty 144 | // https://prometheus.io/docs/alerting/latest/configuration/#pagerduty_config 145 | type PagerdutyConfig struct { 146 | NotifierConfig `yaml:",inline" json:",inline"` 147 | 148 | RoutingKey string `yaml:"routing_key,omitempty" json:"routing_key,omitempty"` 149 | URL string `yaml:"url,omitempty" json:"url,omitempty"` 150 | Client string `yaml:"client,omitempty" json:"client,omitempty"` 151 | ClientURL string `yaml:"client_url,omitempty" json:"client_url,omitempty"` 152 | Description string `yaml:"description,omitempty" json:"description,omitempty"` 153 | Details map[string]string `yaml:"details,omitempty" json:"details,omitempty"` 154 | Severity string `yaml:"severity,omitempty" json:"severity,omitempty"` 155 | Class string `yaml:"class,omitempty" json:"class,omitempty"` 156 | Component string `yaml:"component,omitempty" json:"component,omitempty"` 157 | Group string `yaml:"group,omitempty" json:"group,omitempty"` 158 | HttpConfig HttpConfig `yaml:"http_config,omitempty" json:"http_config,omitempty"` 159 | } 160 | 161 | type NamespaceConfig struct { 162 | Resources NamespaceList `yaml:"Resources,omitempty" json:"Resources,omitempty"` 163 | } 164 | 165 | type NamespaceList struct { 166 | Namespaces []Namespace `yaml:"Namespace,omitempty" json:"Namespace,omitempty"` 167 | ManagementCluster *ManagementClusterConfig `yaml:"ManagementCluster,omitempty" json:"ManagementCluster,omitempty"` 168 | } 169 | 170 | // ManagementClusterConfig holds configuration specific to HyperShift management clusters 171 | type ManagementClusterConfig struct { 172 | AdditionalNamespaces []Namespace `yaml:"AdditionalNamespaces,omitempty" json:"AdditionalNamespaces,omitempty"` 173 | } 174 | 175 | type Namespace struct { 176 | Name string `yaml:"name,omitempty" json:"name,omitempty"` 177 | } 178 | -------------------------------------------------------------------------------- /pkg/metrics/metrics.go: -------------------------------------------------------------------------------- 1 | // Copyright 2019 RedHat 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package metrics 16 | 17 | import ( 18 | "net/http" 19 | 20 | "github.com/openshift/configure-alertmanager-operator/config" 21 | alertmanager "github.com/openshift/configure-alertmanager-operator/pkg/types" 22 | "github.com/prometheus/client_golang/prometheus" 23 | "github.com/prometheus/client_golang/prometheus/promhttp" 24 | corev1 "k8s.io/api/core/v1" 25 | ) 26 | 27 | const ( 28 | // MetricsEndpoint is the port to export metrics on 29 | MetricsEndpoint = ":8080" 30 | ) 31 | 32 | var ( 33 | metricGASecretExists = prometheus.NewGaugeVec(prometheus.GaugeOpts{ 34 | Name: "ga_secret_exists", 35 | Help: "GoAlert secret exists", 36 | }, []string{"name"}) 37 | metricPDSecretExists = prometheus.NewGaugeVec(prometheus.GaugeOpts{ 38 | Name: "pd_secret_exists", 39 | Help: "Pager Duty secret exists", 40 | }, []string{"name"}) 41 | metricDMSSecretExists = prometheus.NewGaugeVec(prometheus.GaugeOpts{ 42 | Name: "dms_secret_exists", 43 | Help: "Dead Man's Snitch secret exists", 44 | }, []string{"name"}) 45 | metricAMSecretExists = prometheus.NewGaugeVec(prometheus.GaugeOpts{ 46 | Name: "am_secret_exists", 47 | Help: "AlertManager Config secret exists", 48 | }, []string{"name"}) 49 | metricAMSecretContainsGA = prometheus.NewGaugeVec(prometheus.GaugeOpts{ 50 | Name: "am_secret_contains_ga", 51 | Help: "AlertManager Config contains configuration for GoAlert", 52 | }, []string{"name"}) 53 | metricAMSecretContainsPD = prometheus.NewGaugeVec(prometheus.GaugeOpts{ 54 | Name: "am_secret_contains_pd", 55 | Help: "AlertManager Config contains configuration for Pager Duty", 56 | }, []string{"name"}) 57 | metricAMSecretContainsDMS = prometheus.NewGaugeVec(prometheus.GaugeOpts{ 58 | Name: "am_secret_contains_dms", 59 | Help: "AlertManager Config contains configuration for Dead Man's Snitch", 60 | }, []string{"name"}) 61 | metricManNSConfigMapExists = prometheus.NewGaugeVec(prometheus.GaugeOpts{ 62 | Name: "managed_namespaces_configmap_exists", 63 | Help: "managed-namespaces configMap exists", 64 | }, []string{"name"}) 65 | metricOcpNSConfigMapExists = prometheus.NewGaugeVec(prometheus.GaugeOpts{ 66 | Name: "ocp_namespaces_configmap_exists", 67 | Help: "ocp-namespaces configMap exists", 68 | }, []string{"name"}) 69 | 70 | metricsList = []prometheus.Collector{ 71 | metricGASecretExists, 72 | metricPDSecretExists, 73 | metricDMSSecretExists, 74 | metricAMSecretExists, 75 | metricAMSecretContainsGA, 76 | metricAMSecretContainsPD, 77 | metricAMSecretContainsDMS, 78 | metricManNSConfigMapExists, 79 | metricOcpNSConfigMapExists, 80 | } 81 | ) 82 | 83 | // StartMetrics register metrics and exposes them 84 | func StartMetrics() error { 85 | 86 | // Register metrics and start serving them on /metrics endpoint 87 | if err := RegisterMetrics(); err != nil { 88 | return err 89 | } 90 | http.Handle("/metrics", promhttp.Handler()) 91 | // TODO: Check errors from ListenAndServe() 92 | //#nosec G114 -- We don't need timeouts for an internal metrics endpoint 93 | go func() { _ = http.ListenAndServe(MetricsEndpoint, nil) }() 94 | return nil 95 | } 96 | 97 | // RegisterMetrics for the operator 98 | func RegisterMetrics() error { 99 | for _, metric := range metricsList { 100 | err := prometheus.Register(metric) 101 | if err != nil { 102 | return err 103 | } 104 | } 105 | return nil 106 | } 107 | 108 | // UpdateSecretsMetrics updates all metrics related to the existence and contents of Secrets 109 | // used by configure-alertmanager-operator. 110 | func UpdateSecretsMetrics(list *corev1.SecretList, amconfig *alertmanager.Config) { 111 | 112 | // Default to false. 113 | gaSecretExists := false 114 | pdSecretExists := false 115 | dmsSecretExists := false 116 | amSecretExists := false 117 | amSecretContainsGA := false 118 | amSecretContainsPD := false 119 | amSecretContainsDMS := false 120 | 121 | // Update the metric if the secret is found in the SecretList. 122 | for _, secret := range list.Items { 123 | switch secret.Name { 124 | case "goalert-secret": 125 | gaSecretExists = true 126 | case "pd-secret": 127 | pdSecretExists = true 128 | case "dms-secret": 129 | dmsSecretExists = true 130 | case "alertmanager-main": 131 | amSecretExists = true 132 | } 133 | } 134 | 135 | // Check for the presence of GoAlert, PD and DMS configs inside the AlertManager config and report metrics. 136 | if amSecretExists { 137 | if gaSecretExists { 138 | for _, receiver := range amconfig.Receivers { 139 | if receiver.Name == "goalert" { 140 | amSecretContainsGA = true 141 | } 142 | } 143 | } 144 | if pdSecretExists { 145 | for _, receiver := range amconfig.Receivers { 146 | if receiver.Name == "pagerduty" { 147 | amSecretContainsPD = true 148 | } 149 | } 150 | } 151 | if dmsSecretExists { 152 | for _, receiver := range amconfig.Receivers { 153 | if receiver.Name == "watchdog" { 154 | amSecretContainsDMS = true 155 | } 156 | } 157 | } 158 | } 159 | 160 | // Only set metrics once per run. 161 | if gaSecretExists { 162 | metricGASecretExists.With(prometheus.Labels{"name": config.OperatorName}).Set(float64(1)) 163 | } else { 164 | metricGASecretExists.With(prometheus.Labels{"name": config.OperatorName}).Set(float64(0)) 165 | } 166 | if pdSecretExists { 167 | metricPDSecretExists.With(prometheus.Labels{"name": config.OperatorName}).Set(float64(1)) 168 | } else { 169 | metricPDSecretExists.With(prometheus.Labels{"name": config.OperatorName}).Set(float64(0)) 170 | } 171 | if dmsSecretExists { 172 | metricDMSSecretExists.With(prometheus.Labels{"name": config.OperatorName}).Set(float64(1)) 173 | } else { 174 | metricDMSSecretExists.With(prometheus.Labels{"name": config.OperatorName}).Set(float64(0)) 175 | } 176 | if amSecretExists { 177 | metricAMSecretExists.With(prometheus.Labels{"name": config.OperatorName}).Set(float64(1)) 178 | } else { 179 | metricAMSecretExists.With(prometheus.Labels{"name": config.OperatorName}).Set(float64(0)) 180 | } 181 | if amSecretContainsGA { 182 | metricAMSecretContainsGA.With(prometheus.Labels{"name": config.OperatorName}).Set(float64(1)) 183 | } else { 184 | metricAMSecretContainsGA.With(prometheus.Labels{"name": config.OperatorName}).Set(float64(0)) 185 | } 186 | if amSecretContainsPD { 187 | metricAMSecretContainsPD.With(prometheus.Labels{"name": config.OperatorName}).Set(float64(1)) 188 | } else { 189 | metricAMSecretContainsPD.With(prometheus.Labels{"name": config.OperatorName}).Set(float64(0)) 190 | } 191 | if amSecretContainsDMS { 192 | metricAMSecretContainsDMS.With(prometheus.Labels{"name": config.OperatorName}).Set(float64(1)) 193 | } else { 194 | metricAMSecretContainsDMS.With(prometheus.Labels{"name": config.OperatorName}).Set(float64(0)) 195 | } 196 | } 197 | 198 | // UpdateConfigMapMetrics updates all metrics related to the existence and contents of ConfigMaps 199 | // used by configure-alertmanager-operator. 200 | func UpdateConfigMapMetrics(list *corev1.ConfigMapList) { 201 | 202 | // Default to false. 203 | manNsConfigMapExists := false 204 | ocpNsConfigMapExists := false 205 | 206 | // Update the metric if the configmap is found in the ConfigMapList. 207 | for _, configMap := range list.Items { 208 | switch configMap.Name { 209 | case "managed-namespaces": 210 | manNsConfigMapExists = true 211 | case "ocp-namespaces": 212 | ocpNsConfigMapExists = true 213 | } 214 | } 215 | 216 | // Only set metrics once per run. 217 | if manNsConfigMapExists { 218 | metricManNSConfigMapExists.With(prometheus.Labels{"name": config.OperatorName}).Set(float64(1)) 219 | } else { 220 | metricManNSConfigMapExists.With(prometheus.Labels{"name": config.OperatorName}).Set(float64(0)) 221 | } 222 | if ocpNsConfigMapExists { 223 | metricOcpNSConfigMapExists.With(prometheus.Labels{"name": config.OperatorName}).Set(float64(1)) 224 | } else { 225 | metricOcpNSConfigMapExists.With(prometheus.Labels{"name": config.OperatorName}).Set(float64(0)) 226 | } 227 | } 228 | -------------------------------------------------------------------------------- /pkg/readiness/cluster_ready.go: -------------------------------------------------------------------------------- 1 | package readiness 2 | 3 | //go:generate mockgen -destination zz_generated_mocks.go -package readiness -source=cluster_ready.go 4 | 5 | import ( 6 | "context" 7 | "crypto/tls" 8 | "fmt" 9 | "net" 10 | "net/http" 11 | "net/url" 12 | "os" 13 | "strconv" 14 | "time" 15 | 16 | "github.com/openshift/configure-alertmanager-operator/config" 17 | "github.com/prometheus/client_golang/api" 18 | promv1 "github.com/prometheus/client_golang/api/prometheus/v1" 19 | "github.com/prometheus/common/model" 20 | batchv1 "k8s.io/api/batch/v1" 21 | "k8s.io/apimachinery/pkg/api/errors" 22 | "k8s.io/apimachinery/pkg/types" 23 | "sigs.k8s.io/controller-runtime/pkg/client" 24 | logf "sigs.k8s.io/controller-runtime/pkg/log" 25 | "sigs.k8s.io/controller-runtime/pkg/reconcile" 26 | ) 27 | 28 | var log = logf.Log.WithName("readiness") 29 | 30 | // Impl is a concrete instance of the readiness engine. 31 | type Impl struct { 32 | // Client is a controller-runtime client capable of querying k8s. 33 | Client client.Client 34 | // result is what the calling Reconcile should return if it is otherwise successful. 35 | result reconcile.Result 36 | // ready indicates whether the cluster is considered ready. Once this is true, 37 | // Check() is a no-op. 38 | ready bool 39 | // clusterCreationTime caches the birth time of the cluster so we only have to 40 | // query prometheus once. 41 | clusterCreationTime time.Time 42 | // promAPI is a handle to the prometheus API client 43 | promAPI promv1.API 44 | } 45 | 46 | // Interface is the interface for the readiness engine. 47 | type Interface interface { 48 | IsReady() (bool, error) 49 | Result() reconcile.Result 50 | setClusterCreationTime() error 51 | clusterTooOld(int) bool 52 | setPromAPI() error 53 | } 54 | 55 | var _ Interface = &Impl{} 56 | 57 | const ( 58 | // Maximum cluster age, in minutes, after whiche we'll assume we don't need to run health checks. 59 | maxClusterAgeKey = "MAX_CLUSTER_AGE_MINUTES" 60 | // By default, ignore clusters older than two hours 61 | maxClusterAgeDefault = 2 * 60 62 | 63 | jobName = "osd-cluster-ready" 64 | ) 65 | 66 | // IsReady deals with the osd-cluster-ready Job. 67 | // Sets: 68 | // - impl.Ready: 69 | // true if: 70 | // - a previous check has already succeeded (a cluster can't become un-ready once it's ready); 71 | // - an osd-cluster-ready Job has completed; or 72 | // - the cluster is older than maxClusterAgeMinutes 73 | // false otherwise. 74 | // - impl.Result: If the caller's reconcile is otherwise successful, it 75 | // should return the given Result. 76 | // - impl.clusterCreationTime: If it is necessary to check the age of the cluster, this is set so 77 | // we only have to query prometheus once. 78 | func (impl *Impl) IsReady() (bool, error) { 79 | if impl.ready { 80 | log.Info("DEBUG: Using cached positive cluster readiness.") 81 | return impl.ready, nil 82 | } 83 | 84 | // Default Result 85 | impl.result = reconcile.Result{} 86 | 87 | // Readiness job part 1: Grab it, and short out if it has finished (success or failure). 88 | job := &batchv1.Job{} 89 | found := true 90 | if err := impl.Client.Get(context.TODO(), types.NamespacedName{Namespace: config.OperatorNamespace, Name: jobName}, job); err != nil { 91 | if !errors.IsNotFound(err) { 92 | // If we couldn't query k8s, it is fatal for this iteration of the reconcile 93 | return false, fmt.Errorf("failed to retrieve %s Job: %w", jobName, err) 94 | } 95 | found = false 96 | } 97 | // If the job completed, we're done, and we don't need to bother with the age check. 98 | if found && job.Status.Active == 0 { 99 | var msg string 100 | if job.Status.Succeeded > 0 { 101 | msg = fmt.Sprintf("INFO: Found a succeeded %s Job.", jobName) 102 | } else { 103 | msg = fmt.Sprintf("INFO: Found failed %s Job. Declaring ready.", jobName) 104 | } 105 | log.Info(msg) 106 | impl.ready = true 107 | return impl.ready, nil 108 | } 109 | 110 | // Cluster age: short out if the cluster is older than the configured value 111 | if err := impl.setClusterCreationTime(); err != nil { 112 | log.Error(err, "Failed to determine cluster creation time") 113 | // If we failed to query prometheus, the cluster isn't ready. 114 | // We want the main Reconcile loop to proceed, so don't return an error; but 115 | // we want to requeue rapidly so we can keep checking for cluster birth. 116 | impl.result = reconcile.Result{Requeue: true, RequeueAfter: time.Second} 117 | return false, nil 118 | } 119 | maxClusterAge, err := getEnvInt(maxClusterAgeKey, maxClusterAgeDefault) 120 | if err != nil { 121 | // This is likely to result in a hot loop :( 122 | return false, err 123 | } 124 | if impl.clusterTooOld(maxClusterAge) { 125 | log.Info(fmt.Sprintf("INFO: Cluster is older than %d minutes. Ignoring health check.", maxClusterAge)) 126 | impl.ready = true 127 | return impl.ready, nil 128 | } 129 | 130 | // Readiness job part 2: Either the job is still running or doesn't yet exist. We do 131 | // these after the age check because they declare "not ready" and requeue, neither of 132 | // which we want to do if the cluster is too old. 133 | 134 | if found { 135 | // The Job is still running (we checked Active == 0 above). 136 | // Requeue with a short delay. We don't want to thrash, but we want to poll the 137 | // Job fairly frequently so we configure alerts promptly once it finishes. 138 | delay := 10 * time.Second 139 | log.Info(fmt.Sprintf("INFO: Found an Active %s Job. Will requeue after %v.", jobName, delay)) 140 | impl.result = reconcile.Result{Requeue: true, RequeueAfter: delay} 141 | return false, nil 142 | } 143 | 144 | // The Job doesn't exist -- requeue with a delay and keep looking for it. The delay 145 | // here can be longish because the readiness job will take a while to complete once 146 | // it does start. 147 | delay := 5 * time.Minute 148 | log.Info(fmt.Sprintf("INFO: No %s Job found; requeueing after %v to wait for it.", jobName, delay)) 149 | impl.result = reconcile.Result{Requeue: true, RequeueAfter: delay} 150 | return false, nil 151 | } 152 | 153 | func (impl *Impl) Result() reconcile.Result { 154 | return impl.result 155 | } 156 | 157 | func (impl *Impl) setPromAPI() error { 158 | rawToken, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/token") 159 | if err != nil { 160 | return fmt.Errorf("couldn't read token file: %w", err) 161 | } 162 | 163 | client, err := api.NewClient(api.Config{ 164 | Address: "https://prometheus-k8s.openshift-monitoring.svc:9091", 165 | RoundTripper: &http.Transport{ 166 | Proxy: func(request *http.Request) (*url.URL, error) { 167 | request.Header.Add("Authorization", "Bearer "+string(rawToken)) 168 | return http.ProxyFromEnvironment(request) 169 | }, 170 | DialContext: (&net.Dialer{ 171 | Timeout: 30 * time.Second, 172 | KeepAlive: 30 * time.Second, 173 | }).DialContext, 174 | TLSClientConfig: &tls.Config{ 175 | MinVersion: tls.VersionTLS12, 176 | // disable "G402 (CWE-295): TLS InsecureSkipVerify set true." 177 | // #nosec G402 178 | InsecureSkipVerify: true, 179 | }, 180 | TLSHandshakeTimeout: 10 * time.Second, 181 | }, 182 | }) 183 | if err != nil { 184 | return fmt.Errorf("couldn't configure prometheus client: %w", err) 185 | } 186 | 187 | impl.promAPI = promv1.NewAPI(client) 188 | return nil 189 | } 190 | 191 | func (impl *Impl) setClusterCreationTime() error { 192 | // Is it cached? 193 | if !impl.clusterCreationTime.IsZero() { 194 | return nil 195 | } 196 | 197 | if err := impl.setPromAPI(); err != nil { 198 | return fmt.Errorf("couldn't get prometheus API: %w", err) 199 | } 200 | ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) 201 | defer cancel() 202 | when := time.Now() 203 | // For testing, do something like this, subtracting the number of hours 204 | // since you disabled CVO: 205 | // when := time.Now().Add(-32*time.Hour) 206 | result, warnings, err := impl.promAPI.Query(ctx, "cluster_version{type=\"initial\"}", when) 207 | if err != nil { 208 | return fmt.Errorf("error querying Prometheus: %w", err) 209 | } 210 | if len(warnings) > 0 { 211 | log.Info(fmt.Sprintf("Warnings: %v\n", warnings)) 212 | } 213 | 214 | log.Info(fmt.Sprintf("DEBUG: Result of type %s:\n%s\n", result.Type().String(), result.String())) 215 | resultVec := result.(model.Vector) 216 | earliest := time.Time{} 217 | for i := 0; i < resultVec.Len(); i++ { 218 | thisTime := time.Unix(int64(resultVec[i].Value), 0) 219 | if earliest.IsZero() || thisTime.Before(earliest) { 220 | earliest = thisTime 221 | } 222 | } 223 | if earliest.IsZero() { 224 | return fmt.Errorf("failed to determine cluster birth time from prometheus %s result %v", result.Type().String(), result.String()) 225 | } 226 | impl.clusterCreationTime = earliest 227 | log.Info(fmt.Sprintf("INFO: Cluster created %v", earliest.UTC())) 228 | return nil 229 | } 230 | 231 | func (impl *Impl) clusterTooOld(maxAgeMinutes int) bool { 232 | maxAge := time.Now().Add(time.Duration(-maxAgeMinutes) * time.Minute) 233 | return impl.clusterCreationTime.Before(maxAge) 234 | } 235 | 236 | // getEnvInt returns the integer value of the environment variable with the specified `key`. 237 | // If the env var is unspecified/empty, the `def` value is returned. 238 | // The error is non-nil if the env var is nonempty but cannot be parsed as an int. 239 | func getEnvInt(key string, def int) (int, error) { 240 | var intVal int 241 | var err error 242 | 243 | strVal := os.Getenv(key) 244 | 245 | if strVal == "" { 246 | // Env var unset; use the default 247 | return def, nil 248 | } 249 | 250 | if intVal, err = strconv.Atoi(strVal); err != nil { 251 | return 0, fmt.Errorf("invalid value for env var: %s=%s (expected int): %v", key, strVal, err) 252 | } 253 | 254 | return intVal, nil 255 | } 256 | --------------------------------------------------------------------------------