├── boilerplate ├── _data │ ├── backing-image-tag │ └── last-boilerplate-commit ├── update.cfg ├── openshift │ └── golang-osd-operator │ │ ├── py-requirements.txt │ │ ├── .ci-operator.yaml │ │ ├── fips.go.tmplt │ │ ├── configure-fips.sh │ │ ├── dependabot.yml │ │ ├── .codecov.yml │ │ ├── rvmo-bundle.sh │ │ ├── migrate_build_pipeline.py │ │ ├── golangci.yml │ │ ├── Dockerfile.olm-registry │ │ ├── validate-yaml.py │ │ ├── csv-generate │ │ ├── common.sh │ │ ├── catalog-build.sh │ │ ├── csv-generate.mk │ │ └── catalog-publish.sh │ │ ├── project.mk │ │ ├── OWNERS_ALIASES │ │ ├── codecov.sh │ │ ├── app-sre-build-deploy.sh │ │ ├── ensure.sh │ │ ├── prow-config │ │ ├── app-sre.md │ │ ├── update │ │ └── README.md ├── _lib │ ├── boilerplate.mk │ ├── subscriber │ ├── subscriber-propose │ ├── subscriber-report │ ├── subscriber-report-onboarding │ ├── subscriber-report-pr │ ├── container-make │ ├── freeze-check │ ├── subscriber-propose-update │ ├── subscriber-report-release │ ├── release.sh │ ├── subscriber.sh │ ├── boilerplate-commit │ └── common.sh ├── generated-includes.mk └── update ├── version └── version.go ├── .tekton ├── OWNERS ├── deadmanssnitch-operator-push.yaml └── deadmanssnitch-operator-pull-request.yaml ├── .ci-operator.yaml ├── deploy ├── service_account.yaml ├── role_binding.yaml ├── operator.yaml ├── role.yaml └── crds │ └── deadmanssnitch.managed.openshift.io_deadmanssnitchintegrations.yaml ├── .github ├── renovate.json └── dependabot.yml ├── OWNERS ├── fips.go ├── config ├── metadata │ └── additional-labels.txt ├── templates │ └── csv-template.yaml └── config.go ├── api └── v1alpha1 │ ├── zz_generated.openapi.go │ ├── groupversion_info.go │ ├── deadmanssnitchintegration_types.go │ └── zz_generated.deepcopy.go ├── .codecov.yml ├── test └── deploy │ └── deadmanssnitch.managed.openshift.io_v1alpha1_deadmanssnitchintegration_cr.yaml ├── Makefile ├── pkg ├── utils │ ├── secrets.go │ └── utils.go ├── localmetrics │ ├── localmetrics_test.go │ └── localmetrics.go └── dmsclient │ ├── mock │ └── mock_dmsclient.go │ └── dmsclient.go ├── .gitattributes ├── .gitignore ├── docs └── development.md ├── OWNERS_ALIASES ├── hack └── olm-registry │ ├── olm-artifacts-template.fedramp.yaml │ └── olm-artifacts-template.yaml ├── go.mod ├── main.go ├── controllers └── deadmanssnitchintegration │ ├── event_handlers_test.go │ └── event_handlers.go └── README.md /boilerplate/_data/backing-image-tag: -------------------------------------------------------------------------------- 1 | image-v8.2.0 2 | -------------------------------------------------------------------------------- /boilerplate/update.cfg: -------------------------------------------------------------------------------- 1 | openshift/golang-osd-operator 2 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/py-requirements.txt: -------------------------------------------------------------------------------- 1 | pyyaml>=5.3.1 2 | -------------------------------------------------------------------------------- /version/version.go: -------------------------------------------------------------------------------- 1 | package version 2 | 3 | var ( 4 | Version = "0.0.1" 5 | ) 6 | -------------------------------------------------------------------------------- /.tekton/OWNERS: -------------------------------------------------------------------------------- 1 | reviewers: 2 | - srep-infra-cicd 3 | approvers: 4 | - srep-infra-cicd 5 | -------------------------------------------------------------------------------- /boilerplate/_data/last-boilerplate-commit: -------------------------------------------------------------------------------- 1 | 6c37c92165ab2c46308a1c6a5b11d3cffd8a373d 2 | -------------------------------------------------------------------------------- /.ci-operator.yaml: -------------------------------------------------------------------------------- 1 | build_root_image: 2 | name: boilerplate 3 | namespace: openshift 4 | tag: image-v8.2.0 5 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/.ci-operator.yaml: -------------------------------------------------------------------------------- 1 | build_root_image: 2 | name: __NAME__ 3 | namespace: __NAMESPACE__ 4 | tag: __TAG__ 5 | -------------------------------------------------------------------------------- /deploy/service_account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: deadmanssnitch-operator 5 | namespace: deadmanssnitch-operator 6 | -------------------------------------------------------------------------------- /.github/renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "extends": [ 4 | "github>openshift/boilerplate//.github/renovate.json" 5 | ] 6 | } 7 | -------------------------------------------------------------------------------- /boilerplate/_lib/boilerplate.mk: -------------------------------------------------------------------------------- 1 | .PHONY: boilerplate-commit 2 | boilerplate-commit: 3 | @boilerplate/_lib/boilerplate-commit 4 | 5 | .PHONY: boilerplate-freeze-check 6 | boilerplate-freeze-check: 7 | @boilerplate/_lib/freeze-check 8 | -------------------------------------------------------------------------------- /OWNERS: -------------------------------------------------------------------------------- 1 | reviewers: 2 | - robotmaxtron 3 | - rafael-azevedo 4 | approvers: 5 | - robotmaxtron 6 | - rafael-azevedo 7 | - srep-functional-team-rocket 8 | - srep-functional-leads 9 | - srep-team-leads 10 | maintainers: 11 | - rafael-azevedo 12 | - robotmaxtron 13 | -------------------------------------------------------------------------------- /fips.go: -------------------------------------------------------------------------------- 1 | //go:build fips_enabled 2 | // +build fips_enabled 3 | 4 | // BOILERPLATE GENERATED -- DO NOT EDIT 5 | // Run 'make ensure-fips' to regenerate 6 | 7 | package main 8 | 9 | import ( 10 | _ "crypto/tls/fipsonly" 11 | "fmt" 12 | ) 13 | 14 | func init() { 15 | fmt.Println("***** Starting with FIPS crypto enabled *****") 16 | } 17 | -------------------------------------------------------------------------------- /config/metadata/additional-labels.txt: -------------------------------------------------------------------------------- 1 | LABEL com.redhat.component="openshift-deadmanssnitch-operator" io.k8s.description="..." description="..." distribution-scope="public" name="openshift/deadmanssnitch-operator" url="https://github.com/openshift/deadmanssnitch-operator" vendor="Red Hat, Inc." release="v0.0.0" version="v0.0.0" 2 | -------------------------------------------------------------------------------- /boilerplate/_lib/subscriber: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | REPO_ROOT=$(git rev-parse --show-toplevel) 4 | source $REPO_ROOT/boilerplate/_lib/common.sh 5 | 6 | declare -A SUBCOMMANDS 7 | SUBCOMMANDS=( 8 | [propose]='Propose pull/merge requests for subscribers' 9 | [report]='Print information about subscribers' 10 | ) 11 | 12 | source $REPO_ROOT/boilerplate/_lib/subscriber.sh 13 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/fips.go.tmplt: -------------------------------------------------------------------------------- 1 | //go:build fips_enabled 2 | // +build fips_enabled 3 | 4 | // BOILERPLATE GENERATED -- DO NOT EDIT 5 | // Run 'make ensure-fips' to regenerate 6 | 7 | package main 8 | 9 | import ( 10 | _ "crypto/tls/fipsonly" 11 | "fmt" 12 | ) 13 | 14 | func init() { 15 | fmt.Println("***** Starting with FIPS crypto enabled *****") 16 | } 17 | -------------------------------------------------------------------------------- /deploy/role_binding.yaml: -------------------------------------------------------------------------------- 1 | kind: ClusterRoleBinding 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: deadmanssnitch-operator 5 | subjects: 6 | - kind: ServiceAccount 7 | name: deadmanssnitch-operator 8 | # Replace this with the namespace the operator is deployed in. 9 | namespace: deadmanssnitch-operator 10 | roleRef: 11 | kind: ClusterRole 12 | name: deadmanssnitch-operator 13 | apiGroup: rbac.authorization.k8s.io 14 | -------------------------------------------------------------------------------- /boilerplate/_lib/subscriber-propose: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | REPO_ROOT=$(git rev-parse --show-toplevel) 4 | source $REPO_ROOT/boilerplate/_lib/common.sh 5 | 6 | declare -A SUBCOMMANDS 7 | SUBCOMMANDS=( 8 | # TODO: 9 | # [bootstrap]='Bootstrap a new subscriber' 10 | # [prow-config]='Propose standardized prow configuration to openshift/release' 11 | [update]='Update an already-onboarded subscriber' 12 | ) 13 | 14 | source $REPO_ROOT/boilerplate/_lib/subscriber.sh 15 | -------------------------------------------------------------------------------- /boilerplate/generated-includes.mk: -------------------------------------------------------------------------------- 1 | # THIS FILE IS GENERATED BY BOILERPLATE. DO NOT EDIT. 2 | # This file automatically includes any *.mk files in your subscribed 3 | # conventions. Please ensure your base Makefile includes only this file. 4 | include boilerplate/_lib/boilerplate.mk 5 | include boilerplate/openshift/golang-osd-operator/csv-generate/csv-generate.mk 6 | include boilerplate/openshift/golang-osd-operator/project.mk 7 | include boilerplate/openshift/golang-osd-operator/standard.mk 8 | -------------------------------------------------------------------------------- /api/v1alpha1/zz_generated.openapi.go: -------------------------------------------------------------------------------- 1 | //go:build !ignore_autogenerated 2 | // +build !ignore_autogenerated 3 | 4 | // Code generated by openapi-gen. DO NOT EDIT. 5 | 6 | // This file was autogenerated by openapi-gen. Do not edit it manually! 7 | 8 | package v1alpha1 9 | 10 | import ( 11 | common "k8s.io/kube-openapi/pkg/common" 12 | ) 13 | 14 | func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition { 15 | return map[string]common.OpenAPIDefinition{} 16 | } 17 | -------------------------------------------------------------------------------- /boilerplate/_lib/subscriber-report: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | REPO_ROOT=$(git rev-parse --show-toplevel) 4 | source $REPO_ROOT/boilerplate/_lib/common.sh 5 | 6 | declare -A SUBCOMMANDS 7 | SUBCOMMANDS=( 8 | [onboarding]='Prints a CSV report of onboarded boilerplate subscribers.' 9 | [pr]='Finds boilerplate-related pull requests for registered subscribers.' 10 | [release]='Checks openshift/release configuration for onboarded subscribers.' 11 | ) 12 | 13 | source $REPO_ROOT/boilerplate/_lib/subscriber.sh 14 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/configure-fips.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | REPO_ROOT=$(git rev-parse --show-toplevel) 6 | CONVENTION_DIR="$REPO_ROOT/boilerplate/openshift/golang-osd-operator" 7 | PRE_V1_SDK_MANAGER_DIR="$REPO_ROOT/cmd/manager" 8 | 9 | if [[ -d "$PRE_V1_SDK_MANAGER_DIR" ]] 10 | then 11 | MAIN_DIR=$PRE_V1_SDK_MANAGER_DIR 12 | else 13 | MAIN_DIR=$REPO_ROOT 14 | fi 15 | 16 | echo "Writing fips file at $MAIN_DIR/fips.go" 17 | 18 | cp $CONVENTION_DIR/fips.go.tmplt "$MAIN_DIR/fips.go" 19 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "docker" 4 | directory: "/build" 5 | labels: 6 | - "area/dependency" 7 | - "ok-to-test" 8 | schedule: 9 | interval: "weekly" 10 | ignore: 11 | - dependency-name: "redhat-services-prod/openshift/boilerplate" 12 | # don't upgrade boilerplate via these means 13 | - dependency-name: "openshift4/ose-operator-registry" 14 | # don't upgrade ose-operator-registry via these means 15 | -------------------------------------------------------------------------------- /.codecov.yml: -------------------------------------------------------------------------------- 1 | codecov: 2 | notify: 3 | require_ci_to_pass: no 4 | 5 | coverage: 6 | precision: 2 7 | round: down 8 | range: "20...100" 9 | 10 | status: 11 | project: no 12 | patch: no 13 | changes: no 14 | 15 | parsers: 16 | gcov: 17 | branch_detection: 18 | conditional: yes 19 | loop: yes 20 | method: no 21 | macro: no 22 | 23 | comment: 24 | layout: "reach,diff,flags,tree" 25 | behavior: default 26 | require_changes: no 27 | 28 | ignore: 29 | - "**/mocks" 30 | - "**/zz_generated*.go" 31 | -------------------------------------------------------------------------------- /test/deploy/deadmanssnitch.managed.openshift.io_v1alpha1_deadmanssnitchintegration_cr.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: deadmanssnitch.managed.openshift.io/v1alpha1 2 | kind: DeadmansSnitchIntegration 3 | metadata: 4 | name: test-deadmanssnitchintegration 5 | spec: 6 | dmsAPIKeySecretRef: 7 | name: deadmanssnitch-api-key 8 | namespace: deadmanssnitch-operator 9 | clusterDeploymentSelector: 10 | matchLabels: 11 | api.openshift.com/test: "true" 12 | targetSecretRef: 13 | name: dms-secret 14 | namespace: test-monitoring 15 | tags: [test,test2] -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # BEGIN boilerplate-managed 2 | version: 2 3 | updates: 4 | - package-ecosystem: "docker" 5 | directory: "/build" 6 | labels: 7 | - "area/dependency" 8 | - "ok-to-test" 9 | schedule: 10 | interval: "weekly" 11 | ignore: 12 | - dependency-name: "redhat-services-prod/openshift/boilerplate" 13 | # don't upgrade boilerplate via these means 14 | - dependency-name: "openshift4/ose-operator-registry" 15 | # don't upgrade ose-operator-registry via these means 16 | # END boilerplate-managed 17 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/.codecov.yml: -------------------------------------------------------------------------------- 1 | codecov: 2 | notify: 3 | require_ci_to_pass: no 4 | 5 | coverage: 6 | precision: 2 7 | round: down 8 | range: "20...100" 9 | 10 | status: 11 | project: no 12 | patch: no 13 | changes: no 14 | 15 | parsers: 16 | gcov: 17 | branch_detection: 18 | conditional: yes 19 | loop: yes 20 | method: no 21 | macro: no 22 | 23 | comment: 24 | layout: "reach,diff,flags,tree" 25 | behavior: default 26 | require_changes: no 27 | 28 | ignore: 29 | - "**/mocks" 30 | - "**/zz_generated*.go" 31 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | FIPS_ENABLED=true 2 | 3 | include boilerplate/generated-includes.mk 4 | 5 | .PHONY: boilerplate-update 6 | boilerplate-update: ## Make boilerplate update itself 7 | @boilerplate/update 8 | 9 | .PHONY: run 10 | run: ## Run deadmanssnitch-operator locally 11 | OPERATOR_NAME="deadmanssnitch-operator" go run ./main.go 12 | 13 | .PHONY: help 14 | help: ## Show this help screen. 15 | @echo 'Usage: make ... ' 16 | @echo '' 17 | @echo 'Available targets are:' 18 | @echo '' 19 | @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | sed 's/##//g' | awk 'BEGIN {FS = ":"}; {printf "\033[36m%-30s\033[0m %s\n", $$2, $$3}' 20 | -------------------------------------------------------------------------------- /pkg/utils/secrets.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | corev1 "k8s.io/api/core/v1" 8 | "k8s.io/apimachinery/pkg/types" 9 | "sigs.k8s.io/controller-runtime/pkg/client" 10 | ) 11 | 12 | // LoadSecretData loads a given secret key and returns its data as a string. 13 | func LoadSecretData(c client.Client, secretName, namespace, dataKey string) (string, error) { 14 | s := &corev1.Secret{} 15 | err := c.Get(context.TODO(), types.NamespacedName{Name: secretName, Namespace: namespace}, s) 16 | if err != nil { 17 | return "", err 18 | } 19 | retStr, ok := s.Data[dataKey] 20 | if !ok { 21 | return "", fmt.Errorf("secret %s did not contain key %s", secretName, dataKey) 22 | } 23 | return string(retStr), nil 24 | } 25 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | ### BEGIN BOILERPLATE GENERATED -- DO NOT EDIT ### 2 | ### This block must be the last thing in your ### 3 | ### .gitattributes file; otherwise the 'validate' ### 4 | ### CI check will fail. ### 5 | # Used to ensure nobody mucked with boilerplate files. 6 | boilerplate/_lib/freeze-check linguist-generated=false 7 | # Show the boilerplate commit hash update. It's only one line anyway. 8 | boilerplate/_data/last-boilerplate-commit linguist-generated=false 9 | # Used by freeze-check. Good place for attackers to inject badness. 10 | boilerplate/update linguist-generated=false 11 | # Make sure attackers can't hide changes to this configuration 12 | .gitattributes linguist-generated=false 13 | ### END BOILERPLATE GENERATED ### 14 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/rvmo-bundle.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | REPOSITORY=${REPOSITORY:-"https://github.com/openshift/managed-release-bundle-osd.git"} 6 | CURRENT_BRANCH=$(git rev-parse --abbrev-ref HEAD|egrep '^main$|^release-[0-9]+\.[0-9]+$'|cat) 7 | RVMO_BRANCH=${CURRENT_BRANCH:-main} 8 | # You can override any branch detection by setting RELEASE_BRANCH 9 | BRANCH=${RELEASE_BRANCH:-$RVMO_BRANCH} 10 | DELETE_TEMP_DIR=${DELETE_TEMP_DIR:-true} 11 | TMPD=$(mktemp -d -t rvmo-bundle.XXXXXX) 12 | [[ "${DELETE_TEMP_DIR}" == "true" ]] && trap 'rm -rf ${TMPD}' EXIT 13 | 14 | cd "${TMPD}" 15 | echo "Cloning RVMO from ${REPOSITORY}:${BRANCH}" 16 | git clone --single-branch -b "${BRANCH}" "${REPOSITORY}" . 17 | bash hack/update-operator-release.sh 18 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/migrate_build_pipeline.py: -------------------------------------------------------------------------------- 1 | import yaml 2 | import sys 3 | 4 | file_path = sys.argv[1] 5 | with open(file_path, 'r') as f: 6 | data = yaml.safe_load(f) 7 | 8 | spec = data.get('spec', {}) 9 | 10 | # Remove pipelineSpec and taskRunSpecs 11 | spec.pop('pipelineSpec', None) 12 | spec.pop('taskRunSpecs', None) 13 | 14 | # Add pipelineRef 15 | spec['pipelineRef'] = { 16 | 'resolver': 'git', 17 | 'params': [ 18 | {'name': 'url', 'value': 'https://github.com/openshift/boilerplate'}, 19 | {'name': 'revision', 'value': 'master'}, 20 | {'name': 'pathInRepo', 'value': 'pipelines/docker-build-oci-ta/pipeline.yaml'} 21 | ] 22 | } 23 | 24 | # Write back 25 | with open(file_path, 'w') as f: 26 | yaml.dump(data, f, default_flow_style=False, sort_keys=False) 27 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/golangci.yml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | run: 3 | concurrency: 10 4 | linters: 5 | default: none 6 | enable: 7 | - errcheck 8 | - gosec 9 | - govet 10 | - ineffassign 11 | - misspell 12 | - staticcheck 13 | - unused 14 | settings: 15 | misspell: 16 | extra-words: 17 | - typo: openshit 18 | correction: OpenShift 19 | exclusions: 20 | generated: lax 21 | presets: 22 | - comments 23 | - common-false-positives 24 | - legacy 25 | - std-error-handling 26 | paths: 27 | - third_party/ 28 | - builtin/ 29 | - examples/ 30 | issues: 31 | max-issues-per-linter: 0 32 | max-same-issues: 0 33 | formatters: 34 | exclusions: 35 | generated: lax 36 | paths: 37 | - third_party/ 38 | - builtin/ 39 | - examples/ 40 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/Dockerfile.olm-registry: -------------------------------------------------------------------------------- 1 | FROM registry.redhat.io/openshift4/ose-operator-registry-rhel9:v4.19 AS builder 2 | ARG SAAS_OPERATOR_DIR 3 | COPY ${SAAS_OPERATOR_DIR} manifests 4 | RUN initializer --permissive 5 | 6 | # ubi-micro does not work for clusters with fips enabled unless we make OpenSSL available 7 | FROM registry.access.redhat.com/ubi9/ubi-minimal:latest 8 | 9 | COPY --from=builder /bin/registry-server /bin/registry-server 10 | COPY --from=builder /bin/grpc_health_probe /bin/grpc_health_probe 11 | COPY --from=builder /bin/initializer /bin/initializer 12 | 13 | WORKDIR /registry 14 | RUN chgrp -R 0 /registry && chmod -R g+rwx /registry 15 | 16 | USER 1001 17 | 18 | COPY --from=builder /registry /registry 19 | 20 | EXPOSE 50051 21 | 22 | CMD ["registry-server", "-t", "/tmp/terminate.log"] 23 | 24 | # Set the DC specific label for the location of the DC database in the image 25 | LABEL operators.operatorframework.io.index.database.v1=/registry/bundles.db 26 | -------------------------------------------------------------------------------- /deploy/operator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: deadmanssnitch-operator 5 | spec: 6 | replicas: 1 7 | selector: 8 | matchLabels: 9 | name: deadmanssnitch-operator 10 | template: 11 | metadata: 12 | labels: 13 | name: deadmanssnitch-operator 14 | spec: 15 | serviceAccountName: deadmanssnitch-operator 16 | containers: 17 | - name: deadmanssnitch-operator 18 | # Replace this with the built image name 19 | image: REPLACE_IMAGE 20 | command: 21 | - deadmanssnitch-operator 22 | imagePullPolicy: Always 23 | resources: 24 | requests: 25 | memory: "1G" 26 | cpu: "100m" 27 | limits: 28 | memory: "2G" 29 | env: 30 | - name: WATCH_NAMESPACE 31 | value: "" 32 | - name: POD_NAME 33 | valueFrom: 34 | fieldRef: 35 | fieldPath: metadata.name 36 | - name: OPERATOR_NAME 37 | value: "deadmanssnitch-operator" 38 | - name: FEDRAMP 39 | value: "false" 40 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/validate-yaml.py: -------------------------------------------------------------------------------- 1 | # Usage 2 | # python validate-yaml.py path/to/file/or/dir 3 | 4 | import sys 5 | import yaml 6 | from os import listdir 7 | from os.path import isdir, isfile, join, splitext 8 | 9 | usage = "Usage: {0:s} path/to/file/or/dir...".format(sys.argv[0]) 10 | 11 | if len(sys.argv) < 2: 12 | print(usage) 13 | sys.exit(0) 14 | 15 | input_paths = sys.argv[1:] 16 | 17 | error = False 18 | 19 | for path in input_paths: 20 | if isfile(path): 21 | files = [path] 22 | elif isdir(path): 23 | files = [join(path, f) for f in listdir(path) if isfile(join(path, f))] 24 | else: 25 | print("Path {0:s} does not exist".format(path)) 26 | error=True 27 | continue 28 | 29 | for file_path in files: 30 | _, ext = splitext(file_path) 31 | if ext not in [".yml", ".yaml"]: 32 | continue 33 | 34 | print("Validating YAML {}".format(file_path)) 35 | with open(file_path, "r") as f: 36 | data = f.read() 37 | try: 38 | for y in yaml.safe_load_all(data): 39 | pass 40 | except Exception as e: 41 | print(e) 42 | error = True 43 | 44 | sys.exit(error) 45 | -------------------------------------------------------------------------------- /boilerplate/_lib/subscriber-report-onboarding: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | REPO_ROOT=$(git rev-parse --show-toplevel) 4 | source $REPO_ROOT/boilerplate/_lib/common.sh 5 | 6 | usage() { 7 | cat <=1.17) with enabled Go modules. 17 | 18 | ```shell 19 | $ go version 20 | go version go1.17.11 linux/amd64 21 | ``` 22 | 23 | ### operator-sdk 24 | 25 | The Operator is being developed based on the [Operators SDK](https://github.com/operator-framework/operator-sdk). 26 | Ensure this is installed and available in your `$PATH`. 27 | 28 | [v1.21.0](https://github.com/operator-framework/operator-sdk/releases/tag/v1.21.0) is being used for `deadmanssnitch-operator` development. 29 | 30 | ```shell 31 | $ operator-sdk version 32 | operator-sdk version: "v1.21.0", commit: "89d21a133750aee994476736fa9523656c793588", kubernetes version: "1.23", go version: "go1.17.10", GOOS: "linux", GOARCH: "amd64" 33 | ``` 34 | 35 | ## Makefile 36 | 37 | There are some `make` commands that can be run using the top level `Makefile` itself: 38 | 39 | ```shell 40 | $ make help 41 | Usage: make ... 42 | 43 | Available targets are: 44 | 45 | go-build Build binary 46 | go-check Golang linting and other static analysis 47 | boilerplate-update Make boilerplate update itself 48 | help Show this help screen. 49 | run Run deadmanssnitch-operator locally 50 | ``` 51 | 52 | --- 53 | 54 | ## Build using boilerplate container 55 | 56 | To run lint, test and build in `app-sre/boilerplate` container, call `boilerplate/_lib/container-make`. This will call `make` inside the `app-sre/boilerplate` container. 57 | 58 | ```shell 59 | boilerplate/_lib/container-make TARGET 60 | ``` 61 | 62 | Example: 63 | 64 | ```shell 65 | # To run unit tests 66 | boilerplate/_lib/container-make test 67 | 68 | # To run lint tests 69 | boilerplate/_lib/container-make lint 70 | 71 | # To run coverage 72 | boilerplate/_lib/container-make coverage 73 | ``` 74 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/project.mk: -------------------------------------------------------------------------------- 1 | # Project specific values 2 | OPERATOR_NAME?=$(shell sed -n 's/.*OperatorName .*"\([^"]*\)".*/\1/p' config/config.go) 3 | OPERATOR_NAMESPACE?=$(shell sed -n 's/.*OperatorNamespace .*"\([^"]*\)".*/\1/p' config/config.go) 4 | 5 | IMAGE_REGISTRY?=quay.io 6 | IMAGE_REPOSITORY?=app-sre 7 | IMAGE_NAME?=$(OPERATOR_NAME) 8 | 9 | # Optional additional deployment image 10 | SUPPLEMENTARY_IMAGE_NAME?=$(shell sed -n 's/.*SupplementaryImage .*"\([^"]*\)".*/\1/p' config/config.go) 11 | 12 | # Optional: Enable OLM skip-range 13 | # https://v0-18-z.olm.operatorframework.io/docs/concepts/olm-architecture/operator-catalog/creating-an-update-graph/#skiprange 14 | EnableOLMSkipRange?=$(shell sed -n 's/.*EnableOLMSkipRange .*"\([^"]*\)".*/\1/p' config/config.go) 15 | 16 | VERSION_MAJOR?=0 17 | VERSION_MINOR?=1 18 | 19 | ifdef RELEASE_BRANCHED_BUILDS 20 | # Make sure all called shell scripts know what's up 21 | export RELEASE_BRANCHED_BUILDS 22 | 23 | # RELEASE_BRANCH from env vars takes precedence; if not set, try to figure it out 24 | RELEASE_BRANCH:=${RELEASE_BRANCH} 25 | ifneq ($(RELEASE_BRANCH),) 26 | # Sanity check, just to be nice 27 | RELEASE_BRANCH_TEST := $(shell echo ${RELEASE_BRANCH} | grep -E '^release-[0-9]+\.[0-9]+$$') 28 | ifeq ($(RELEASE_BRANCH_TEST),) 29 | $(warning Provided RELEASE_BRANCH doesn't conform to "release-X.Y" pattern; you sure you didn't make a mistake?) 30 | endif 31 | endif 32 | 33 | ifeq ($(RELEASE_BRANCH),) 34 | # Check git repo's branch first 35 | RELEASE_BRANCH := $(shell git rev-parse --abbrev-ref HEAD | grep -E '^release-[0-9]+\.[0-9]+$$') 36 | endif 37 | 38 | ifeq ($(RELEASE_BRANCH),) 39 | # Try to parse it out of Jenkins' JOB_NAME 40 | RELEASE_BRANCH := $(shell echo ${JOB_NAME} | grep -E --only-matching 'release-[0-9]+\.[0-9]+') 41 | endif 42 | 43 | ifeq ($(RELEASE_BRANCH),) 44 | $(error RELEASE_BRANCHED_BUILDS is set, but couldn't detect a release branch and RELEASE_BRANCH is not set; giving up) 45 | else 46 | SEMVER := $(subst release-,,$(subst ., ,$(RELEASE_BRANCH))) 47 | VERSION_MAJOR := $(firstword $(SEMVER)) 48 | VERSION_MINOR := $(lastword $(SEMVER)) 49 | endif 50 | endif 51 | 52 | REGISTRY_USER?=$(QUAY_USER) 53 | REGISTRY_TOKEN?=$(QUAY_TOKEN) 54 | -------------------------------------------------------------------------------- /OWNERS_ALIASES: -------------------------------------------------------------------------------- 1 | # ================================ DO NOT EDIT ================================ 2 | # This file is managed in https://github.com/openshift/boilerplate 3 | # See the OWNERS_ALIASES docs: https://git.k8s.io/community/contributors/guide/owners.md#OWNERS_ALIASES 4 | # ============================================================================= 5 | aliases: 6 | srep-functional-team-aurora: 7 | - abyrne55 8 | - AlexSmithGH 9 | - dakotalongRH 10 | - eth1030 11 | - joshbranham 12 | - luis-falcon 13 | - reedcort 14 | srep-functional-team-fedramp: 15 | - theautoroboto 16 | - katherinelc321 17 | - rojasreinold 18 | - fsferraz-rh 19 | - jonahbrawley 20 | - digilink 21 | - annelson-rh 22 | - pheckenlWork 23 | - ironcladlou 24 | - MrSantamaria 25 | - PeterCSRE 26 | - cjnovak98 27 | srep-functional-team-hulk: 28 | - ravitri 29 | - devppratik 30 | - Tafhim 31 | - tkong-redhat 32 | - TheUndeadKing 33 | - vaidehi411 34 | - chamalabey 35 | - charlesgong 36 | - rbhilare 37 | srep-functional-team-orange: 38 | - bergmannf 39 | - Makdaam 40 | - Nikokolas3270 41 | - RaphaelBut 42 | - MateSaary 43 | - rolandmkunkel 44 | - petrkotas 45 | - zmird-r 46 | - hectorakemp 47 | srep-functional-team-rocket: 48 | - aliceh 49 | - anispate 50 | - clcollins 51 | - Mhodesty 52 | - nephomaniac 53 | - tnierman 54 | srep-functional-team-security: 55 | - jaybeeunix 56 | - sam-nguyen7 57 | - wshearn 58 | - dem4gus 59 | - npecka 60 | - pshickeydev 61 | - casey-williams-rh 62 | - boranx 63 | srep-functional-team-thor: 64 | - a7vicky 65 | - diakovnec 66 | - MitaliBhalla 67 | - feichashao 68 | - samanthajayasinghe 69 | - xiaoyu74 70 | - Tessg22 71 | - smarthall 72 | srep-infra-cicd: 73 | - ritmun 74 | - yiqinzhang 75 | - varunraokadaparthi 76 | srep-functional-leads: 77 | - abyrne55 78 | - clcollins 79 | - bergmannf 80 | - theautoroboto 81 | - smarthall 82 | - sam-nguyen7 83 | - ravitri 84 | srep-team-leads: 85 | - rafael-azevedo 86 | - iamkirkbater 87 | - rogbas 88 | - dustman9000 89 | - bng0y 90 | - bmeng 91 | - typeid 92 | sre-group-leads: 93 | - apahim 94 | - maorfr 95 | - rogbas 96 | srep-architects: 97 | - jharrington22 98 | - cblecker 99 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/OWNERS_ALIASES: -------------------------------------------------------------------------------- 1 | # ================================ DO NOT EDIT ================================ 2 | # This file is managed in https://github.com/openshift/boilerplate 3 | # See the OWNERS_ALIASES docs: https://git.k8s.io/community/contributors/guide/owners.md#OWNERS_ALIASES 4 | # ============================================================================= 5 | aliases: 6 | srep-functional-team-aurora: 7 | - abyrne55 8 | - AlexSmithGH 9 | - dakotalongRH 10 | - eth1030 11 | - joshbranham 12 | - luis-falcon 13 | - reedcort 14 | srep-functional-team-fedramp: 15 | - theautoroboto 16 | - katherinelc321 17 | - rojasreinold 18 | - fsferraz-rh 19 | - jonahbrawley 20 | - digilink 21 | - annelson-rh 22 | - pheckenlWork 23 | - ironcladlou 24 | - MrSantamaria 25 | - PeterCSRE 26 | - cjnovak98 27 | srep-functional-team-hulk: 28 | - ravitri 29 | - devppratik 30 | - Tafhim 31 | - tkong-redhat 32 | - TheUndeadKing 33 | - vaidehi411 34 | - chamalabey 35 | - charlesgong 36 | - rbhilare 37 | srep-functional-team-orange: 38 | - bergmannf 39 | - Makdaam 40 | - Nikokolas3270 41 | - RaphaelBut 42 | - MateSaary 43 | - rolandmkunkel 44 | - petrkotas 45 | - zmird-r 46 | - hectorakemp 47 | srep-functional-team-rocket: 48 | - aliceh 49 | - anispate 50 | - clcollins 51 | - Mhodesty 52 | - nephomaniac 53 | - tnierman 54 | srep-functional-team-security: 55 | - jaybeeunix 56 | - sam-nguyen7 57 | - wshearn 58 | - dem4gus 59 | - npecka 60 | - pshickeydev 61 | - casey-williams-rh 62 | - boranx 63 | srep-functional-team-thor: 64 | - a7vicky 65 | - diakovnec 66 | - MitaliBhalla 67 | - feichashao 68 | - samanthajayasinghe 69 | - xiaoyu74 70 | - Tessg22 71 | - smarthall 72 | srep-infra-cicd: 73 | - ritmun 74 | - yiqinzhang 75 | - varunraokadaparthi 76 | srep-functional-leads: 77 | - abyrne55 78 | - clcollins 79 | - bergmannf 80 | - theautoroboto 81 | - smarthall 82 | - sam-nguyen7 83 | - ravitri 84 | srep-team-leads: 85 | - rafael-azevedo 86 | - iamkirkbater 87 | - rogbas 88 | - dustman9000 89 | - bng0y 90 | - bmeng 91 | - typeid 92 | sre-group-leads: 93 | - apahim 94 | - maorfr 95 | - rogbas 96 | srep-architects: 97 | - jharrington22 98 | - cblecker 99 | -------------------------------------------------------------------------------- /boilerplate/_lib/container-make: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | if [[ "$1" == "-h"* ]] || [[ "$1" == "--h"* ]]; then 4 | echo "Usage: $0 {arguments to the real 'make'}" 5 | echo "Runs 'make' in the boilerplate backing container." 6 | echo "If the command fails, starts a shell in the container so you can debug." 7 | echo "Set NONINTERACTIVE=true (or TRUE) to skip the debug shell and exit with the make return code." 8 | exit -1 9 | fi 10 | 11 | source ${0%/*}/common.sh 12 | 13 | CONTAINER_ENGINE="${CONTAINER_ENGINE:-$(command -v podman || command -v docker)}" 14 | [[ -n "$CONTAINER_ENGINE" ]] || err "Couldn't find a container engine. Are you already in a container?" 15 | 16 | # Make sure the mount inside the container is named in such a way that 17 | # - openapi-gen (which relies on GOPATH) produces absolute paths; and 18 | # - other go-ish paths are writeable, e.g. for `go mod download`. 19 | CONTAINER_MOUNT=/go/src/$(repo_import $REPO_ROOT) 20 | 21 | # First set up a detached container with the repo mounted. 22 | banner "Starting the container" 23 | CE_OPTS="--platform=linux/amd64" 24 | if [[ "${CONTAINER_ENGINE##*/}" == "podman" ]]; then 25 | CE_OPTS="${CE_OPTS} --userns keep-id" 26 | fi 27 | if [[ "${CONTAINER_ENGINE##*/}" == "podman" ]] && [[ $OSTYPE == *"linux"* ]]; then 28 | CE_OPTS="${CE_OPTS} -v $REPO_ROOT:$CONTAINER_MOUNT:Z" 29 | else 30 | CE_OPTS="${CE_OPTS} -v $REPO_ROOT:$CONTAINER_MOUNT" 31 | fi 32 | container_id=$($CONTAINER_ENGINE run -d ${CE_OPTS} $IMAGE_PULL_PATH sleep infinity) 33 | 34 | if [[ $? -ne 0 ]] || [[ -z "$container_id" ]]; then 35 | err "Couldn't start detached container" 36 | fi 37 | 38 | # Now run our `make` command in it with the right UID and working directory 39 | args="exec -it -u $(id -u):0 -w $CONTAINER_MOUNT $container_id" 40 | banner "Running: make $@" 41 | $CONTAINER_ENGINE $args make "$@" 42 | rc=$? 43 | 44 | # If it failed, check if we should drop into a shell or exit 45 | if [[ $rc -ne 0 ]]; then 46 | # Case-insensitive check for NONINTERACTIVE (true, TRUE, True all work) 47 | if [[ "${NONINTERACTIVE,,}" == "true" ]]; then 48 | banner "The 'make' command failed with exit code $rc. Skipping debug shell (NONINTERACTIVE=${NONINTERACTIVE})." 49 | else 50 | banner "The 'make' command failed! Starting a shell in the container for debugging. Just 'exit' when done." 51 | $CONTAINER_ENGINE $args /bin/bash 52 | fi 53 | fi 54 | 55 | # Finally, remove the container 56 | banner "Cleaning up the container" 57 | $CONTAINER_ENGINE rm -f $container_id >/dev/null 58 | 59 | # Exit with the return code from make 60 | exit $rc 61 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/codecov.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -o errexit 4 | set -o nounset 5 | set -o pipefail 6 | 7 | REPO_ROOT=$(git rev-parse --show-toplevel) 8 | CI_SERVER_URL=https://prow.svc.ci.openshift.org/view/gcs/origin-ci-test 9 | COVER_PROFILE=${COVER_PROFILE:-coverage.out} 10 | JOB_TYPE=${JOB_TYPE:-"local"} 11 | 12 | # Default concurrency to four threads. By default it's the number of procs, 13 | # which seems to be 16 in the CI env. Some consumers' coverage jobs were 14 | # regularly getting OOM-killed; so do this rather than boost the pod resources 15 | # unreasonably. 16 | COV_THREAD_COUNT=${COV_THREAD_COUNT:-4} 17 | make -C "${REPO_ROOT}" go-test TESTOPTS="-coverprofile=${COVER_PROFILE}.tmp -covermode=atomic -coverpkg=./... -p ${COV_THREAD_COUNT}" 18 | 19 | # Remove generated files from coverage profile 20 | grep -v "zz_generated" "${COVER_PROFILE}.tmp" > "${COVER_PROFILE}" 21 | rm -f "${COVER_PROFILE}.tmp" 22 | 23 | # Configure the git refs and job link based on how the job was triggered via prow 24 | if [[ "${JOB_TYPE}" == "presubmit" ]]; then 25 | echo "detected PR code coverage job for #${PULL_NUMBER}" 26 | REF_FLAGS="-P ${PULL_NUMBER} -C ${PULL_PULL_SHA}" 27 | JOB_LINK="${CI_SERVER_URL}/pr-logs/pull/${REPO_OWNER}_${REPO_NAME}/${PULL_NUMBER}/${JOB_NAME}/${BUILD_ID}" 28 | elif [[ "${JOB_TYPE}" == "postsubmit" ]]; then 29 | echo "detected branch code coverage job for ${PULL_BASE_REF}" 30 | REF_FLAGS="-B ${PULL_BASE_REF} -C ${PULL_BASE_SHA}" 31 | JOB_LINK="${CI_SERVER_URL}/logs/${JOB_NAME}/${BUILD_ID}" 32 | elif [[ "${JOB_TYPE}" == "local" ]]; then 33 | echo "coverage report available at ${COVER_PROFILE}" 34 | exit 0 35 | else 36 | echo "${JOB_TYPE} jobs not supported" >&2 37 | exit 1 38 | fi 39 | 40 | # Configure certain internal codecov variables with values from prow. 41 | export CI_BUILD_URL="${JOB_LINK}" 42 | export CI_BUILD_ID="${JOB_NAME}" 43 | export CI_JOB_ID="${BUILD_ID}" 44 | 45 | if [[ "${JOB_TYPE}" != "local" ]]; then 46 | if [[ -z "${ARTIFACT_DIR:-}" ]] || [[ ! -d "${ARTIFACT_DIR}" ]] || [[ ! -w "${ARTIFACT_DIR}" ]]; then 47 | echo '${ARTIFACT_DIR} must be set for non-local jobs, and must point to a writable directory' >&2 48 | exit 1 49 | fi 50 | curl -sS https://codecov.io/bash -o "${ARTIFACT_DIR}/codecov.sh" 51 | bash <(cat "${ARTIFACT_DIR}/codecov.sh") -Z -K -f "${COVER_PROFILE}" -r "${REPO_OWNER}/${REPO_NAME}" ${REF_FLAGS} 52 | else 53 | bash <(curl -s https://codecov.io/bash) -Z -K -f "${COVER_PROFILE}" -r "${REPO_OWNER}/${REPO_NAME}" ${REF_FLAGS} 54 | fi 55 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/csv-generate/catalog-build.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | source `dirname $0`/common.sh 6 | 7 | usage() { echo "Usage: $0 -o operator-name -c saas-repository-channel -r registry-image" 1>&2; exit 1; } 8 | 9 | while getopts "o:c:r:" option; do 10 | case "${option}" in 11 | o) 12 | operator_name=${OPTARG} 13 | ;; 14 | c) 15 | operator_channel=${OPTARG} 16 | ;; 17 | r) 18 | # NOTE: This is the URL without the tag/digest 19 | registry_image=${OPTARG} 20 | ;; 21 | *) 22 | usage 23 | esac 24 | done 25 | 26 | # Detect the container engine to use, allowing override from the env 27 | CONTAINER_ENGINE=${CONTAINER_ENGINE:-$(command -v podman || command -v docker || true)} 28 | if [[ -z "$CONTAINER_ENGINE" ]]; then 29 | echo "WARNING: Couldn't find a container engine! Defaulting to docker." 30 | CONTAINER_ENGINE=docker 31 | fi 32 | 33 | # Checking parameters 34 | check_mandatory_params operator_channel operator_name 35 | 36 | # Parameters for the Dockerfile 37 | SAAS_OPERATOR_DIR="saas-${operator_name}-bundle" 38 | BUNDLE_DIR="${SAAS_OPERATOR_DIR}/${operator_name}" 39 | DOCKERFILE_REGISTRY="build/Dockerfile.olm-registry" 40 | 41 | # Checking SAAS_OPERATOR_DIR exist 42 | if [ ! -d "${SAAS_OPERATOR_DIR}/.git" ] ; then 43 | echo "${SAAS_OPERATOR_DIR} should exist and be a git repository" 44 | exit 1 45 | fi 46 | 47 | # Calculate new operator version from bundles inside the saas directory 48 | OPERATOR_NEW_VERSION=$(ls "${BUNDLE_DIR}" | sort -t . -k 3 -g | tail -n 1) 49 | 50 | # Create package yaml 51 | # This must be included in the registry build 52 | # `currentCSV` must reference the latest bundle version included. 53 | # Any version their after `currentCSV` loaded by the initalizer 54 | # will be silently pruned as it's not reachable 55 | PACKAGE_YAML_PATH="${BUNDLE_DIR}/${operator_name}.package.yaml" 56 | 57 | cat < "${PACKAGE_YAML_PATH}" 58 | packageName: ${operator_name} 59 | channels: 60 | - name: ${operator_channel} 61 | currentCSV: ${operator_name}.v${OPERATOR_NEW_VERSION} 62 | EOF 63 | 64 | TAG="${operator_channel}-latest" 65 | if [[ "${RELEASE_BRANCHED_BUILDS}" ]]; then 66 | TAG="v${OPERATOR_NEW_VERSION}" 67 | fi 68 | 69 | ${CONTAINER_ENGINE} build --pull -f "${DOCKERFILE_REGISTRY}" --build-arg "SAAS_OPERATOR_DIR=${SAAS_OPERATOR_DIR}" --tag "${registry_image}:${TAG}" . 70 | 71 | if [ $? -ne 0 ] ; then 72 | echo "docker build failed, exiting..." 73 | exit 1 74 | fi 75 | 76 | # TODO : Test the image and the version it contains 77 | -------------------------------------------------------------------------------- /pkg/localmetrics/localmetrics_test.go: -------------------------------------------------------------------------------- 1 | package localmetrics 2 | 3 | import ( 4 | neturl "net/url" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | ) 9 | 10 | func TestPathParse(t *testing.T) { 11 | tests := []struct { 12 | name string 13 | path string 14 | expected string 15 | }{ 16 | { 17 | name: "core non-namespaced kind", 18 | path: "/api/v1/pods", 19 | expected: "core/v1/pods", 20 | }, 21 | { 22 | name: "core non-namespaced named resource", 23 | path: "/api/v1/nodes/nodename", 24 | expected: "core/v1/nodes/{NAME}", 25 | }, 26 | { 27 | name: "core namespaced named resource", 28 | path: "/api/v1/namespaces/aws-account-operator/configmaps/foo-bar-baz", 29 | expected: "core/v1/namespaces/{NAMESPACE}/configmaps/{NAME}", 30 | }, 31 | { 32 | name: "core namespaced named resource with sub-resource", 33 | path: "/api/v1/namespaces/aws-account-operator/secret/foo-bar-baz/status", 34 | expected: "core/v1/namespaces/{NAMESPACE}/secret/{NAME}/status", 35 | }, 36 | { 37 | name: "extension non-namespaced kind", 38 | path: "/apis/batch/v1/jobs", 39 | expected: "batch/v1/jobs", 40 | }, 41 | { 42 | name: "extension namespaced kind", 43 | path: "/apis/batch/v1/namespaces/aws-account-operator/jobs", 44 | expected: "batch/v1/namespaces/{NAMESPACE}/jobs", 45 | }, 46 | { 47 | name: "extension namespaced named resource", 48 | path: "/apis/batch/v1/namespaces/aws-account-operator/jobs/foo-bar-baz", 49 | expected: "batch/v1/namespaces/{NAMESPACE}/jobs/{NAME}", 50 | }, 51 | { 52 | name: "extension namespaced named resource with sub-resource", 53 | path: "/apis/aws.managed.openshift.io/v1alpha1/namespaces/aws-account-operator/accountpool/foo-bar-baz/status", 54 | expected: "aws.managed.openshift.io/v1alpha1/namespaces/{NAMESPACE}/accountpool/{NAME}/status", 55 | }, 56 | { 57 | name: "core root (discovery)", 58 | path: "/api", 59 | expected: "core", 60 | }, 61 | { 62 | name: "core version (discovery)", 63 | path: "/api/v1", 64 | expected: "core/v1", 65 | }, 66 | { 67 | name: "extension discovery", 68 | path: "/apis/aws.managed.openshift.io/v1", 69 | expected: "aws.managed.openshift.io/v1", 70 | }, 71 | { 72 | name: "unknown root", 73 | path: "/weird/path/to/resource", 74 | expected: "{OTHER}", 75 | }, 76 | { 77 | name: "empty to make Split fail", 78 | path: "", 79 | expected: "{OTHER}", 80 | }, 81 | } 82 | for _, test := range tests { 83 | t.Run(test.name, func(t *testing.T) { 84 | result := resourceFrom(&neturl.URL{Path: test.path}) 85 | assert.Equal(t, test.expected, result) 86 | }) 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/app-sre-build-deploy.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -ev 4 | 5 | usage() { 6 | cat < "$grpcurl" 70 | chmod +x "$grpcurl" 71 | fi 72 | 73 | ln -fs "$grpcurl" grpcurl 74 | ;; 75 | 76 | venv) 77 | # Set up a python virtual environment 78 | python3 -m venv .venv 79 | # Install required libs, if a requirements file was given 80 | if [[ -n "$2" ]]; then 81 | .venv/bin/python3 -m pip install -r "$2" 82 | fi 83 | ;; 84 | 85 | *) 86 | echo "Unknown dependency: ${DEPENDENCY}" 87 | exit 1 88 | ;; 89 | esac 90 | -------------------------------------------------------------------------------- /hack/olm-registry/olm-artifacts-template.fedramp.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Template 3 | metadata: 4 | name: olm-artifacts-template 5 | 6 | parameters: 7 | - name: REGISTRY_IMG 8 | required: true 9 | - name: CHANNEL 10 | value: staging 11 | - name: IMAGE_TAG 12 | value: latest 13 | - name: REPO_DIGEST 14 | value: latest 15 | - name: SILENT_ALERT_LEGALENTITY_IDS 16 | value: '["None"]' 17 | - name: DEADMANSSNITCH_OSD_TAGS 18 | required: true 19 | - name: FEDRAMP 20 | value: "false" 21 | 22 | objects: 23 | - apiVersion: operators.coreos.com/v1alpha1 24 | kind: CatalogSource 25 | metadata: 26 | name: deadmanssnitch-operator-catalog 27 | spec: 28 | sourceType: grpc 29 | grpcPodConfig: 30 | securityContextConfig: restricted 31 | image: ${REPO_DIGEST} 32 | displayName: deadmanssnitch-operator Registry 33 | publisher: SRE 34 | 35 | - apiVersion: operators.coreos.com/v1alpha2 36 | kind: OperatorGroup 37 | metadata: 38 | name: deadmanssnitch-operator-og 39 | spec: 40 | targetNamespaces: 41 | - deadmanssnitch-operator 42 | 43 | - apiVersion: operators.coreos.com/v1alpha1 44 | kind: Subscription 45 | metadata: 46 | name: deadmanssnitch-operator 47 | spec: 48 | channel: ${CHANNEL} 49 | name: deadmanssnitch-operator 50 | source: deadmanssnitch-operator-catalog 51 | sourceNamespace: deadmanssnitch-operator 52 | config: 53 | env: 54 | - name: FEDRAMP 55 | value: "${FEDRAMP}" 56 | 57 | - apiVersion: deadmanssnitch.managed.openshift.io/v1alpha1 58 | kind: DeadmansSnitchIntegration 59 | metadata: 60 | name: osd 61 | spec: 62 | snitchNamePostFix: "" 63 | dmsAPIKeySecretRef: 64 | name: deadmanssnitch-api-key 65 | namespace: deadmanssnitch-operator 66 | clusterDeploymentSelector: 67 | matchExpressions: 68 | # only create DMS service for managed (OSD) clusters 69 | - key: api.openshift.com/managed 70 | operator: In 71 | values: ["true"] 72 | # ignore CD w/ "legacy" noalerts label 73 | - key: api.openshift.com/noalerts 74 | operator: NotIn 75 | values: ["true"] 76 | # ignore CD w/ ext noalerts label 77 | - key: ext-managed.openshift.io/noalerts 78 | operator: NotIn 79 | values: ["true"] 80 | # ignore CD for specific organizations 81 | - key: api.openshift.com/legal-entity-id 82 | operator: NotIn 83 | values: ${{SILENT_ALERT_LEGALENTITY_IDS}} 84 | # ignore CD for any "nightly" clusters 85 | - key: api.openshift.com/channel-group 86 | operator: NotIn 87 | values: ["nightly"] 88 | - key: api.openshift.com/fedramp 89 | operator: In 90 | values: ["true"] 91 | - key: api.openshift.com/environment 92 | operator: NotIn 93 | values: 94 | - "integration" 95 | - "staging" 96 | - "stage" 97 | targetSecretRef: 98 | name: dms-secret 99 | namespace: openshift-monitoring 100 | tags: ${{DEADMANSSNITCH_OSD_TAGS}} 101 | clusterDeploymentAnnotationsToSkip: 102 | - name: hive.openshift.io/fake-cluster 103 | value: "true" 104 | - name: managed.openshift.com/fake 105 | value: "true" 106 | -------------------------------------------------------------------------------- /boilerplate/_lib/freeze-check: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # NOTE: For security reasons, everything imported or invoked (even 4 | # indirectly) by this script should be audited for vulnerabilities and 5 | # explicitly excluded from `linguist-generated` in the consuming 6 | # repository's .gitattributes. In other words, we want PRs to show 7 | # deltas to this script and all its dependencies by default so that 8 | # attempts to inject or circumvent code are visible. 9 | 10 | set -e 11 | 12 | REPO_ROOT=$(git rev-parse --show-toplevel) 13 | # Hardcoded rather than sourced to reduce attack surface. 14 | BOILERPLATE_GIT_REPO=https://github.com/openshift/boilerplate.git 15 | 16 | # Validate that no subscribed boilerplate artifacts have been changed. 17 | # PR checks may wish to gate on this. 18 | 19 | # This works by grabbing the commit hash of the boilerplate repository 20 | # at which the last update was applied, running the main `update` driver 21 | # against that, and failing if there's a resulting diff. 22 | 23 | # If we can't tell what that commit was, we must assume this is the 24 | # first update, and we'll (noisily) "succeed". 25 | 26 | # Note that this ought to work when you've just committed an update, 27 | # even if you've changed your update.cfg beforehand. We're basically 28 | # making sure you didn't muck with anything after updating. 29 | 30 | # For this to work, you have to be starting from a clean repository 31 | # state (any changes committed). 32 | # TODO(efried): This is not ideal -- it would be nice if I could check 33 | # this before committing my changes -- but how would that work? Diff to 34 | # a file, create a temporary commit, run the rest, remove the commit, 35 | # and reapply the diff? Messy and error-prone -- and I would be 36 | # seriously ticked off if something went wrong and lost my in-flight 37 | # changes. 38 | if ! [ -z "$(git status --porcelain -- ':!build/Dockerfile*')" ]; then 39 | echo "Can't validate boilerplate in a dirty repository. Please commit your changes and try again." >&2 40 | exit 1 41 | fi 42 | 43 | # We glean the last boilerplate commit from the 44 | # last-boilerplate-commit file, which gets laid down by the main 45 | # `update` driver each time it runs. 46 | LBCF=${REPO_ROOT}/boilerplate/_data/last-boilerplate-commit 47 | if ! [[ -f "$LBCF" ]]; then 48 | echo "Couldn't discover last boilerplate commit! Assuming you're bootstrapping." 49 | exit 0 50 | fi 51 | LBC=$(cat $LBCF) 52 | 53 | # Download just that commit 54 | echo "Fetching $LBC from $BOILERPLATE_GIT_REPO" 55 | # boilerplate/update cleans up this temp dir 56 | TMPD=$(mktemp -d) 57 | cd $TMPD 58 | git init 59 | # TODO(efried): DRY this remote. Make it configurable? 60 | git remote add origin $BOILERPLATE_GIT_REPO 61 | git fetch origin $(cat $LBCF) --tags 62 | git reset --hard FETCH_HEAD 63 | 64 | # Now invoke the update script, overriding the source repository we've 65 | # just downloaded at the appropriate commit. 66 | # We invoke the script explicitly rather than via the make target to 67 | # close a security hole whereby the latter is overridden. 68 | echo "Running update" 69 | cd $REPO_ROOT 70 | BOILERPLATE_GIT_REPO="${TMPD}" boilerplate/update 71 | 72 | # Okay, if anything has changed, that's bad. 73 | if [[ $(git status --porcelain -- ':!build/Dockerfile*' | wc -l) -ne 0 ]]; then 74 | echo "Your boilerplate is dirty!" >&2 75 | git status --porcelain -- ':!build/Dockerfile*' 76 | exit 1 77 | fi 78 | 79 | echo "Your boilerplate is clean!" 80 | exit 0 81 | -------------------------------------------------------------------------------- /boilerplate/_lib/subscriber-propose-update: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | REPO_ROOT=$(git rev-parse --show-toplevel) 4 | source $REPO_ROOT/boilerplate/_lib/common.sh 5 | 6 | usage() { 7 | cat < $config_dir/$config 54 | build_root: 55 | from_repository: true 56 | images: 57 | - dockerfile_path: build/Dockerfile 58 | to: unused 59 | resources: 60 | '*': 61 | limits: 62 | memory: 4Gi 63 | requests: 64 | cpu: 100m 65 | memory: 200Mi 66 | tests: 67 | - as: e2e-binary-build-success 68 | commands: | 69 | make e2e-binary-build 70 | container: 71 | from: src 72 | run_if_changed: ^(test/e2e/\.*|go\.mod|go\.sum)$ 73 | - as: coverage 74 | commands: | 75 | export CODECOV_TOKEN=\$(cat /tmp/secret/CODECOV_TOKEN) 76 | make coverage 77 | container: 78 | from: src 79 | skip_if_only_changed: ^(?:\.tekton|\.github)|\.md$|^(?:\.gitignore|OWNERS|LICENSE)$ 80 | secret: 81 | mount_path: /tmp/secret 82 | name: ${CONSUMER_NAME}-codecov-token 83 | - as: publish-coverage 84 | commands: | 85 | export CODECOV_TOKEN=\$(cat /tmp/secret/CODECOV_TOKEN) 86 | make coverage 87 | container: 88 | from: src 89 | postsubmit: true 90 | secret: 91 | mount_path: /tmp/secret 92 | name: ${CONSUMER_NAME}-codecov-token 93 | - as: lint 94 | commands: make lint 95 | container: 96 | from: src 97 | skip_if_only_changed: ^(?:\.tekton|\.github)|\.md$|^(?:\.gitignore|OWNERS|LICENSE)$ 98 | - as: test 99 | commands: make test 100 | container: 101 | from: src 102 | skip_if_only_changed: ^(?:\.tekton|\.github)|\.md$|^(?:\.gitignore|OWNERS|LICENSE)$ 103 | - as: validate 104 | commands: make validate 105 | container: 106 | from: src 107 | skip_if_only_changed: ^(?:\.tekton|\.github)|\.md$|^(?:\.gitignore|OWNERS|LICENSE)$ 108 | zz_generated_metadata: 109 | branch: ${DEFAULT_BRANCH} 110 | org: ${CONSUMER_ORG} 111 | repo: ${CONSUMER_NAME} 112 | EOF 113 | 114 | make jobs 115 | 116 | release_done_msg $release_branch 117 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/csv-generate/csv-generate.mk: -------------------------------------------------------------------------------- 1 | .PHONY: staging-csv-build 2 | staging-csv-build: 3 | @${CONVENTION_DIR}/csv-generate/csv-generate.sh -o $(OPERATOR_NAME) -i $(OPERATOR_IMAGE) -V $(OPERATOR_VERSION) -c staging -H $(CURRENT_COMMIT) -n $(COMMIT_NUMBER) -s $(SUPPLEMENTARY_IMAGE) -e $(SKIP_RANGE_ENABLED) 4 | 5 | .PHONY: staging-catalog-build 6 | staging-catalog-build: 7 | @${CONVENTION_DIR}/csv-generate/catalog-build.sh -o $(OPERATOR_NAME) -c staging -r ${REGISTRY_IMAGE} 8 | 9 | .PHONY: staging-saas-bundle-push 10 | staging-saas-bundle-push: 11 | @${CONVENTION_DIR}/csv-generate/catalog-publish.sh -o $(OPERATOR_NAME) -c staging -H $(CURRENT_COMMIT) -n $(COMMIT_NUMBER) -r ${REGISTRY_IMAGE} 12 | 13 | .PHONY: staging-catalog-publish 14 | staging-catalog-publish: 15 | @${CONVENTION_DIR}/csv-generate/catalog-publish.sh -o $(OPERATOR_NAME) -c staging -H $(CURRENT_COMMIT) -n $(COMMIT_NUMBER) -p -r ${REGISTRY_IMAGE} 16 | 17 | .PHONY: staging-catalog-build-and-publish 18 | staging-catalog-build-and-publish: 19 | @$(MAKE) -s staging-csv-build --no-print-directory 20 | @$(MAKE) -s staging-catalog-build --no-print-directory 21 | @$(MAKE) -s staging-catalog-publish --no-print-directory 22 | 23 | .PHONY: production-hack-csv-build 24 | production-hack-csv-build: 25 | @${CONVENTION_DIR}/csv-generate/csv-generate.sh -o $(OPERATOR_NAME) -i $(OPERATOR_IMAGE) -V $(OPERATOR_VERSION) -c production -H $(CURRENT_COMMIT) -n $(COMMIT_NUMBER) -s $(SUPPLEMENTARY_IMAGE) -e $(SKIP_RANGE_ENABLED) -g hack 26 | 27 | .PHONY: production-csv-build 28 | production-csv-build: 29 | @${CONVENTION_DIR}/csv-generate/csv-generate.sh -o $(OPERATOR_NAME) -i $(OPERATOR_IMAGE) -V $(OPERATOR_VERSION) -c production -H $(CURRENT_COMMIT) -n $(COMMIT_NUMBER) -s $(SUPPLEMENTARY_IMAGE) -e $(SKIP_RANGE_ENABLED) 30 | 31 | .PHONY: production-catalog-build 32 | production-catalog-build: 33 | @${CONVENTION_DIR}/csv-generate/catalog-build.sh -o $(OPERATOR_NAME) -c production -r ${REGISTRY_IMAGE} 34 | 35 | .PHONY: production-saas-bundle-push 36 | production-saas-bundle-push: 37 | @${CONVENTION_DIR}/csv-generate/catalog-publish.sh -o $(OPERATOR_NAME) -c production -H $(CURRENT_COMMIT) -n $(COMMIT_NUMBER) -r ${REGISTRY_IMAGE} 38 | 39 | .PHONY: production-catalog-publish 40 | production-catalog-publish: 41 | @${CONVENTION_DIR}/csv-generate/catalog-publish.sh -o $(OPERATOR_NAME) -c production -H $(CURRENT_COMMIT) -n $(COMMIT_NUMBER) -p -r ${REGISTRY_IMAGE} 42 | 43 | .PHONY: production-catalog-build-and-publish 44 | production-catalog-build-and-publish: 45 | @$(MAKE) -s production-csv-build --no-print-directory 46 | @$(MAKE) -s production-catalog-build --no-print-directory 47 | @$(MAKE) -s production-catalog-publish --no-print-directory 48 | 49 | .PHONY: stable-csv-build 50 | stable-csv-build: 51 | @${CONVENTION_DIR}/csv-generate/csv-generate.sh -o $(OPERATOR_NAME) -i $(OPERATOR_IMAGE) -V $(OPERATOR_VERSION) -c stable -H $(CURRENT_COMMIT) -n $(COMMIT_NUMBER) -s $(SUPPLEMENTARY_IMAGE) -e $(SKIP_RANGE_ENABLED) 52 | 53 | .PHONY: stable-catalog-build 54 | stable-catalog-build: 55 | @${CONVENTION_DIR}/csv-generate/catalog-build.sh -o $(OPERATOR_NAME) -c stable -r ${REGISTRY_IMAGE} 56 | 57 | .PHONY: stable-saas-bundle-push 58 | stable-saas-bundle-push: 59 | @${CONVENTION_DIR}/csv-generate/catalog-publish.sh -o $(OPERATOR_NAME) -c stable -H $(CURRENT_COMMIT) -n $(COMMIT_NUMBER) -r ${REGISTRY_IMAGE} 60 | 61 | .PHONY: stable-catalog-publish 62 | stable-catalog-publish: 63 | @${CONVENTION_DIR}/csv-generate/catalog-publish.sh -o $(OPERATOR_NAME) -c stable -H $(CURRENT_COMMIT) -n $(COMMIT_NUMBER) -p -r ${REGISTRY_IMAGE} 64 | 65 | .PHONY: stable-catalog-build-and-publish 66 | stable-catalog-build-and-publish: 67 | @$(MAKE) -s stable-csv-build --no-print-directory 68 | @$(MAKE) -s stable-catalog-build --no-print-directory 69 | @$(MAKE) -s stable-catalog-publish --no-print-directory 70 | -------------------------------------------------------------------------------- /boilerplate/_lib/subscriber-report-release: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | REPO_ROOT=$(git rev-parse --show-toplevel) 4 | source $REPO_ROOT/boilerplate/_lib/common.sh 5 | source $REPO_ROOT/boilerplate/_lib/release.sh 6 | 7 | usage() { 8 | cat < $TMPD/$f 46 | echo $TMPD/$f 47 | return 48 | fi 49 | done 50 | } 51 | 52 | ## expected_prow_config ORG PROJ BRANCH 53 | # 54 | # Prints to stdout the expected prow configuration for the specified 55 | # ORG/PROJ. 56 | expected_prow_config() { 57 | local org=$1 58 | local consumer_name=$2 59 | local branch=$3 60 | # TODO: DRY this with what's in prow-config. 61 | # Do it by making it a template in the convention dir. 62 | cat < Generate Encrypted Password. 50 | # Even if you're not using quay, the pipeline expects these variables to 51 | # be named QUAY_* 52 | export QUAY_USER= 53 | export QUAY_TOKEN= 54 | 55 | # Tell the scripts where to find your fork of the SaaS bundle repository. 56 | # Except for the authentication part, this should correspond to what you see in the 57 | # https "clone" button in your fork. 58 | # Generate an access token via Settings => Access Tokens. Enable `write_repository`. 59 | # - {gitlab-user} is your username in gitlab 60 | # - {gitlab-token} is the authentication token you generated above 61 | # - {operator} is the name of the consumer repository, e.g. `deadmanssnitch-operator` 62 | export GIT_PATH=https://{gitlab-user}:{gitlab-token}@gitlab.cee.redhat.com/{gitlab-user}/saas-{operator}-bundle.git 63 | ``` 64 | 65 | ## Execute 66 | At this point you should be able to run 67 | ``` 68 | make build-push 69 | ``` 70 | 71 | This will create the following artifacts if it succeeds 72 | (`{hash}` is the 7-digit SHA of the current git commit in the repository under test): 73 | - Operator image in your personal operator repository, tagged `v{major}.{minor}.{commit-count}-{hash}` (e.g. `v0.1.228-e0b6129`) and `latest` 74 | - Two catalog images in your personal registry repository: 75 | - One image tagged `staging-{hash}` and `staging-latest` 76 | - The other tagged `production-{hash}` and `production-latest` 77 | - Two commits in your fork of the SaaS bundle repository: 78 | - One in the `staging` branch 79 | - The other in the `production` branch 80 | These are also present locally in a `saas-{operator-name}-bundle` subdirectory of your operator repository clone. 81 | You can inspect the artifacts therein to make sure e.g. the CSV was generated correctly. 82 | -------------------------------------------------------------------------------- /boilerplate/_lib/release.sh: -------------------------------------------------------------------------------- 1 | # Helpers and variables for dealing with openshift/release 2 | 3 | # NOTE: This library is sourced from user-run scripts. It should not be 4 | # sourced in CI, as it relies on git config that's not necessarily 5 | # present there. 6 | 7 | RELEASE_REPO=openshift/release 8 | 9 | ## Information about the boilerplate consumer 10 | # E.g. "openshift/my-wizbang-operator" 11 | CONSUMER=$(repo_name .) 12 | [[ -z "$CONSUMER" ]] && err " 13 | Failed to determine current repository name" 14 | # 15 | # E.g. "openshift" 16 | CONSUMER_ORG=${CONSUMER%/*} 17 | [[ -z "$CONSUMER_ORG" ]] && err " 18 | Failed to determine consumer org" 19 | # 20 | # E.g. "my-wizbang-operator" 21 | CONSUMER_NAME=${CONSUMER#*/} 22 | [[ -z "$CONSUMER_NAME" ]] && err " 23 | Failed to determine consumer name" 24 | # 25 | # E.g. "master" 26 | # This will produce something like refs/remotes/origin/master 27 | DEFAULT_BRANCH=$(git symbolic-ref refs/remotes/upstream/HEAD 2>/dev/null || git symbolic-ref refs/remotes/origin/HEAD 2>/dev/null || echo defaulting/to/master) 28 | # Strip off refs/remotes/{upstream|origin}/ 29 | DEFAULT_BRANCH=${DEFAULT_BRANCH##*/} 30 | [[ -z "$DEFAULT_BRANCH" ]] && err " 31 | Failed to determine default branch name" 32 | 33 | ## release_process_args "$@" 34 | # 35 | # This is for use by commands expecting one optional argument which is 36 | # the file system path to a clone of the $RELEASE_REPO. 37 | # 38 | # Will invoke `usage` -- which must be defined by the caller -- if 39 | # the wrong number of arguments are received, or if the single argument 40 | # is `help` or a flag. 41 | # 42 | # If exactly one argument is specified and it is valid, it is assigned 43 | # to the global RELEASE_CLONE variable. 44 | release_process_args() { 45 | if [[ $# -eq 1 ]]; then 46 | # Special cases for usage queries 47 | if [[ "$1" == '-'* ]] || [[ "$1" == help ]]; then 48 | usage 49 | fi 50 | 51 | [[ -d $1 ]] || err " 52 | $1: Not a directory." 53 | 54 | [[ $(repo_name $1) == "$RELEASE_REPO" ]] || err " 55 | $1 is not a clone of $RELEASE_REPO; or its 'origin' remote is not set properly." 56 | 57 | # Got a usable clone of openshift/release 58 | RELEASE_CLONE="$1" 59 | 60 | elif [[ $# -ne 0 ]]; then 61 | usage 62 | fi 63 | } 64 | 65 | ## release_validate_invocation 66 | # 67 | # Make sure we were called from a reasonable place, that being: 68 | # - A boilerplate consumer 69 | # - ...that's actually subscribed to a convention 70 | # - ...containing the script being invoked 71 | release_validate_invocation() { 72 | # Make sure we were invoked from a boilerplate consumer. 73 | [[ -z "$CONVENTION_NAME" ]] && err " 74 | $cmd must be invoked from a consumer of an appropriate convention. Where did you get this script from?" 75 | # Or at least not from boilerplate itself 76 | [[ "$CONSUMER" == "openshift/boilerplate" ]] && err " 77 | $cmd must be invoked from a boilerplate consumer, not from boilerplate itself." 78 | 79 | [[ -s $CONVENTION_ROOT/_data/last-boilerplate-commit ]] || err " 80 | $cmd must be invoked from a boilerplate consumer!" 81 | 82 | grep -E -q "^$CONVENTION_NAME(\s.*)?$" $CONVENTION_ROOT/update.cfg || err " 83 | $CONSUMER is not subscribed to $CONVENTION_NAME!" 84 | } 85 | 86 | ## release_prep_clone 87 | # 88 | # If $RELEASE_CLONE is already set: 89 | # - It should represent a directory containing a clean checkout of the 90 | # release repository; otherwise we error. 91 | # - We checkout and pull master. 92 | # Otherwise: 93 | # - We clone the release repo to a temporary directory. 94 | # - We set the $RELEASE_CLONE global variable to point to that 95 | # directory. 96 | release_prep_clone() { 97 | # If a release repo clone wasn't specified, create one 98 | if [[ -z "$RELEASE_CLONE" ]]; then 99 | RELEASE_CLONE=$(mktemp -dt openshift_release_XXXXXXX) 100 | git clone --depth=1 git@github.com:${RELEASE_REPO}.git $RELEASE_CLONE 101 | else 102 | [[ -z "$(git -C $RELEASE_CLONE status --porcelain)" ]] || err " 103 | Your release clone must start clean." 104 | # These will blow up if it's misconfigured 105 | git -C $RELEASE_CLONE checkout master 106 | git -C $RELEASE_CLONE pull 107 | fi 108 | } 109 | 110 | ## release_done_msg BRANCH 111 | # 112 | # Print exit instructions for submitting the release PR. 113 | # BRANCH is a suggested branch name. 114 | release_done_msg() { 115 | echo 116 | git status 117 | 118 | cat <&2; exit 1; } 8 | 9 | while getopts "o:c:n:H:pr:" option; do 10 | case "${option}" in 11 | c) 12 | operator_channel=${OPTARG} 13 | ;; 14 | H) 15 | operator_commit_hash=${OPTARG} 16 | ;; 17 | n) 18 | operator_commit_number=${OPTARG} 19 | ;; 20 | o) 21 | operator_name=${OPTARG} 22 | ;; 23 | p) 24 | push_catalog=true 25 | ;; 26 | r) 27 | # NOTE: This is the URL without the tag/digest 28 | registry_image=${OPTARG} 29 | ;; 30 | *) 31 | usage 32 | esac 33 | done 34 | 35 | # Checking parameters 36 | check_mandatory_params operator_channel operator_name operator_commit_hash operator_commit_number registry_image 37 | 38 | # Calculate previous version 39 | SAAS_OPERATOR_DIR="saas-${operator_name}-bundle" 40 | BUNDLE_DIR="${SAAS_OPERATOR_DIR}/${operator_name}" 41 | OPERATOR_NEW_VERSION=$(ls "${BUNDLE_DIR}" | sort -t . -k 3 -g | tail -n 1) 42 | OPERATOR_PREV_VERSION=$(ls "${BUNDLE_DIR}" | sort -t . -k 3 -g | tail -n 2 | head -n 1) 43 | 44 | if [[ "$OPERATOR_NEW_VERSION" == "$OPERATOR_PREV_VERSION" ]]; then 45 | echo "New version and previous version are identical. Exiting." 46 | exit 1 47 | fi 48 | 49 | # Get container engine 50 | CONTAINER_ENGINE=$(command -v podman || command -v docker || true) 51 | [[ -n "$CONTAINER_ENGINE" ]] || echo "WARNING: Couldn't find a container engine. Assuming you already in a container, running unit tests." >&2 52 | 53 | # Set SRC container transport based on container engine 54 | if [[ "${CONTAINER_ENGINE##*/}" == "podman" ]]; then 55 | SRC_CONTAINER_TRANSPORT="containers-storage" 56 | else 57 | SRC_CONTAINER_TRANSPORT="docker-daemon" 58 | fi 59 | 60 | # Checking SAAS_OPERATOR_DIR exist 61 | if [ ! -d "${SAAS_OPERATOR_DIR}/.git" ] ; then 62 | echo "${SAAS_OPERATOR_DIR} should exist and be a git repository" 63 | exit 1 64 | fi 65 | 66 | # Read the bundle version we're attempting to publish 67 | # in the OLM catalog from the package yaml 68 | PACKAGE_YAML_PATH="${BUNDLE_DIR}/${operator_name}.package.yaml" 69 | PACKAGE_YAML_VERSION=$(awk '$1 == "currentCSV:" {print $2}' ${PACKAGE_YAML_PATH}) 70 | 71 | # Ensure we're commiting and pushing the version we think we are pushing 72 | # Since we build the bundle in catalog-build.sh this script could be run 73 | # independently and push a version we're not expecting. 74 | # if ! [ "${operator_name}.v${OPERATOR_NEW_VERSION}" = "${PACKAGE_YAML_VERSION}" ]; then 75 | # echo "You are attemping to push a bundle that's pointing to a version of this catalog you are not building" 76 | # echo "You are building version: ${operator_name}.v${OPERATOR_NEW_VERSION}" 77 | # echo "Your local package yaml version is: ${PACKAGE_YAML_VERSION}" 78 | # exit 1 79 | # fi 80 | 81 | # add, commit & push 82 | pushd "${SAAS_OPERATOR_DIR}" 83 | 84 | git add . 85 | 86 | MESSAGE="add version ${operator_commit_number}-${operator_commit_hash} 87 | 88 | replaces ${OPERATOR_PREV_VERSION} 89 | removed versions: ${REMOVED_VERSIONS}" 90 | 91 | git commit -m "${MESSAGE}" 92 | git push origin HEAD 93 | 94 | if [ $? -ne 0 ] ; then 95 | echo "git push failed, exiting..." 96 | exit 1 97 | fi 98 | 99 | popd 100 | 101 | if [ "$push_catalog" = true ] ; then 102 | # push image 103 | if [[ "${RELEASE_BRANCHED_BUILDS}" ]]; then 104 | skopeo copy --dest-creds "${QUAY_USER}:${QUAY_TOKEN}" \ 105 | "${SRC_CONTAINER_TRANSPORT}:${registry_image}:v${OPERATOR_NEW_VERSION}" \ 106 | "docker://${registry_image}:v${OPERATOR_NEW_VERSION}" 107 | 108 | if [ $? -ne 0 ] ; then 109 | echo "skopeo push of ${registry_image}:v${OPERATOR_NEW_VERSION}-latest failed, exiting..." 110 | exit 1 111 | fi 112 | 113 | exit 0 114 | fi 115 | 116 | skopeo copy --dest-creds "${QUAY_USER}:${QUAY_TOKEN}" \ 117 | "${SRC_CONTAINER_TRANSPORT}:${registry_image}:${operator_channel}-latest" \ 118 | "docker://${registry_image}:${operator_channel}-latest" 119 | 120 | if [ $? -ne 0 ] ; then 121 | echo "skopeo push of ${registry_image}:${operator_channel}-latest failed, exiting..." 122 | exit 1 123 | fi 124 | 125 | skopeo copy --dest-creds "${QUAY_USER}:${QUAY_TOKEN}" \ 126 | "${SRC_CONTAINER_TRANSPORT}:${registry_image}:${operator_channel}-latest" \ 127 | "docker://${registry_image}:${operator_channel}-${operator_commit_hash}" 128 | 129 | if [ $? -ne 0 ] ; then 130 | echo "skopeo push of ${registry_image}:${operator_channel}-${operator_commit_hash} failed, exiting..." 131 | exit 1 132 | fi 133 | fi 134 | -------------------------------------------------------------------------------- /pkg/dmsclient/mock/mock_dmsclient.go: -------------------------------------------------------------------------------- 1 | // Code generated by MockGen. DO NOT EDIT. 2 | // Source: pkg/dmsclient/dmsclient.go 3 | 4 | // Package mock_dmsclient is a generated GoMock package. 5 | package mock_dmsclient 6 | 7 | import ( 8 | gomock "github.com/golang/mock/gomock" 9 | dmsclient "github.com/openshift/deadmanssnitch-operator/pkg/dmsclient" 10 | reflect "reflect" 11 | ) 12 | 13 | // MockClient is a mock of Client interface 14 | type MockClient struct { 15 | ctrl *gomock.Controller 16 | recorder *MockClientMockRecorder 17 | } 18 | 19 | // MockClientMockRecorder is the mock recorder for MockClient 20 | type MockClientMockRecorder struct { 21 | mock *MockClient 22 | } 23 | 24 | // NewMockClient creates a new mock instance 25 | func NewMockClient(ctrl *gomock.Controller) *MockClient { 26 | mock := &MockClient{ctrl: ctrl} 27 | mock.recorder = &MockClientMockRecorder{mock} 28 | return mock 29 | } 30 | 31 | // EXPECT returns an object that allows the caller to indicate expected use 32 | func (m *MockClient) EXPECT() *MockClientMockRecorder { 33 | return m.recorder 34 | } 35 | 36 | // ListAll mocks base method 37 | func (m *MockClient) ListAll() ([]dmsclient.Snitch, error) { 38 | m.ctrl.T.Helper() 39 | ret := m.ctrl.Call(m, "ListAll") 40 | ret0, _ := ret[0].([]dmsclient.Snitch) 41 | ret1, _ := ret[1].(error) 42 | return ret0, ret1 43 | } 44 | 45 | // ListAll indicates an expected call of ListAll 46 | func (mr *MockClientMockRecorder) ListAll() *gomock.Call { 47 | mr.mock.ctrl.T.Helper() 48 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListAll", reflect.TypeOf((*MockClient)(nil).ListAll)) 49 | } 50 | 51 | // List mocks base method 52 | func (m *MockClient) List(snitchToken string) (dmsclient.Snitch, error) { 53 | m.ctrl.T.Helper() 54 | ret := m.ctrl.Call(m, "List", snitchToken) 55 | ret0, _ := ret[0].(dmsclient.Snitch) 56 | ret1, _ := ret[1].(error) 57 | return ret0, ret1 58 | } 59 | 60 | // List indicates an expected call of List 61 | func (mr *MockClientMockRecorder) List(snitchToken interface{}) *gomock.Call { 62 | mr.mock.ctrl.T.Helper() 63 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockClient)(nil).List), snitchToken) 64 | } 65 | 66 | // Create mocks base method 67 | func (m *MockClient) Create(newSnitch dmsclient.Snitch) (dmsclient.Snitch, error) { 68 | m.ctrl.T.Helper() 69 | ret := m.ctrl.Call(m, "Create", newSnitch) 70 | ret0, _ := ret[0].(dmsclient.Snitch) 71 | ret1, _ := ret[1].(error) 72 | return ret0, ret1 73 | } 74 | 75 | // Create indicates an expected call of Create 76 | func (mr *MockClientMockRecorder) Create(newSnitch interface{}) *gomock.Call { 77 | mr.mock.ctrl.T.Helper() 78 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockClient)(nil).Create), newSnitch) 79 | } 80 | 81 | // Delete mocks base method 82 | func (m *MockClient) Delete(snitchToken string) (bool, error) { 83 | m.ctrl.T.Helper() 84 | ret := m.ctrl.Call(m, "Delete", snitchToken) 85 | ret0, _ := ret[0].(bool) 86 | ret1, _ := ret[1].(error) 87 | return ret0, ret1 88 | } 89 | 90 | // Delete indicates an expected call of Delete 91 | func (mr *MockClientMockRecorder) Delete(snitchToken interface{}) *gomock.Call { 92 | mr.mock.ctrl.T.Helper() 93 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockClient)(nil).Delete), snitchToken) 94 | } 95 | 96 | // FindSnitchesByName mocks base method 97 | func (m *MockClient) FindSnitchesByName(snitchName string) ([]dmsclient.Snitch, error) { 98 | m.ctrl.T.Helper() 99 | ret := m.ctrl.Call(m, "FindSnitchesByName", snitchName) 100 | ret0, _ := ret[0].([]dmsclient.Snitch) 101 | ret1, _ := ret[1].(error) 102 | return ret0, ret1 103 | } 104 | 105 | // FindSnitchesByName indicates an expected call of FindSnitchesByName 106 | func (mr *MockClientMockRecorder) FindSnitchesByName(snitchName interface{}) *gomock.Call { 107 | mr.mock.ctrl.T.Helper() 108 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindSnitchesByName", reflect.TypeOf((*MockClient)(nil).FindSnitchesByName), snitchName) 109 | } 110 | 111 | // Update mocks base method 112 | func (m *MockClient) Update(updateSnitch dmsclient.Snitch) (dmsclient.Snitch, error) { 113 | m.ctrl.T.Helper() 114 | ret := m.ctrl.Call(m, "Update", updateSnitch) 115 | ret0, _ := ret[0].(dmsclient.Snitch) 116 | ret1, _ := ret[1].(error) 117 | return ret0, ret1 118 | } 119 | 120 | // Update indicates an expected call of Update 121 | func (mr *MockClientMockRecorder) Update(updateSnitch interface{}) *gomock.Call { 122 | mr.mock.ctrl.T.Helper() 123 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Update", reflect.TypeOf((*MockClient)(nil).Update), updateSnitch) 124 | } 125 | 126 | // CheckIn mocks base method 127 | func (m *MockClient) CheckIn(s dmsclient.Snitch) error { 128 | m.ctrl.T.Helper() 129 | ret := m.ctrl.Call(m, "CheckIn", s) 130 | ret0, _ := ret[0].(error) 131 | return ret0 132 | } 133 | 134 | // CheckIn indicates an expected call of CheckIn 135 | func (mr *MockClientMockRecorder) CheckIn(s interface{}) *gomock.Call { 136 | mr.mock.ctrl.T.Helper() 137 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckIn", reflect.TypeOf((*MockClient)(nil).CheckIn), s) 138 | } 139 | -------------------------------------------------------------------------------- /hack/olm-registry/olm-artifacts-template.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Template 3 | metadata: 4 | name: olm-artifacts-template 5 | 6 | parameters: 7 | - name: REGISTRY_IMG 8 | required: true 9 | - name: CHANNEL 10 | value: staging 11 | - name: IMAGE_TAG 12 | value: latest 13 | - name: REPO_DIGEST 14 | value: latest 15 | - name: SILENT_ALERT_LEGALENTITY_IDS 16 | value: '["None"]' 17 | - name: DEADMANSSNITCH_OSD_TAGS 18 | required: true 19 | - name: FEDRAMP 20 | value: "false" 21 | 22 | objects: 23 | - apiVersion: operators.coreos.com/v1alpha1 24 | kind: CatalogSource 25 | metadata: 26 | name: deadmanssnitch-operator-catalog 27 | spec: 28 | sourceType: grpc 29 | grpcPodConfig: 30 | securityContextConfig: restricted 31 | image: ${REPO_DIGEST} 32 | displayName: deadmanssnitch-operator Registry 33 | publisher: SRE 34 | 35 | - apiVersion: operators.coreos.com/v1alpha2 36 | kind: OperatorGroup 37 | metadata: 38 | name: deadmanssnitch-operator-og 39 | spec: 40 | targetNamespaces: 41 | - deadmanssnitch-operator 42 | 43 | - apiVersion: operators.coreos.com/v1alpha1 44 | kind: Subscription 45 | metadata: 46 | name: deadmanssnitch-operator 47 | spec: 48 | channel: ${CHANNEL} 49 | name: deadmanssnitch-operator 50 | source: deadmanssnitch-operator-catalog 51 | sourceNamespace: deadmanssnitch-operator 52 | config: 53 | env: 54 | - name: FEDRAMP 55 | value: "${FEDRAMP}" 56 | 57 | - apiVersion: deadmanssnitch.managed.openshift.io/v1alpha1 58 | kind: DeadmansSnitchIntegration 59 | metadata: 60 | name: osd 61 | spec: 62 | snitchNamePostFix: "" 63 | dmsAPIKeySecretRef: 64 | name: deadmanssnitch-api-key 65 | namespace: deadmanssnitch-operator 66 | clusterDeploymentSelector: 67 | matchExpressions: 68 | # only create DMS service for managed (OSD) clusters 69 | - key: api.openshift.com/managed 70 | operator: In 71 | values: ["true"] 72 | # ignore CD w/ "legacy" noalerts label 73 | - key: api.openshift.com/noalerts 74 | operator: NotIn 75 | values: ["true"] 76 | # ignore CD w/ ext noalerts label 77 | - key: ext-managed.openshift.io/noalerts 78 | operator: NotIn 79 | values: ["true"] 80 | # ignore CD for specific organizations 81 | - key: api.openshift.com/legal-entity-id 82 | operator: NotIn 83 | values: ${{SILENT_ALERT_LEGALENTITY_IDS}} 84 | # ignore CD for any "nightly" clusters 85 | - key: api.openshift.com/channel-group 86 | operator: NotIn 87 | values: ["nightly"] 88 | - key: api.openshift.com/fedramp 89 | operator: NotIn 90 | values: ["true"] 91 | # Ignore CD if cluster is in limited support 92 | - key: api.openshift.com/limited-support 93 | operator: NotIn 94 | values: ["true"] 95 | # ignore CD if cluster has a support exception, 96 | # as we have a dedicated dms integration for supportex that ignores limited support 97 | - key: ext-managed.openshift.io/support-exception 98 | operator: NotIn 99 | values: ["true"] 100 | targetSecretRef: 101 | name: dms-secret 102 | namespace: openshift-monitoring 103 | tags: ${{DEADMANSSNITCH_OSD_TAGS}} 104 | clusterDeploymentAnnotationsToSkip: 105 | - name: hive.openshift.io/fake-cluster 106 | value: "true" 107 | - name: managed.openshift.com/fake 108 | value: "true" 109 | 110 | - apiVersion: deadmanssnitch.managed.openshift.io/v1alpha1 111 | kind: DeadmansSnitchIntegration 112 | metadata: 113 | name: osd-supportex 114 | spec: 115 | snitchNamePostFix: "" 116 | dmsAPIKeySecretRef: 117 | name: deadmanssnitch-api-key 118 | namespace: deadmanssnitch-operator 119 | clusterDeploymentSelector: 120 | matchExpressions: 121 | # only create DMS service for managed (OSD) clusters 122 | - key: api.openshift.com/managed 123 | operator: In 124 | values: ["true"] 125 | # ignore CD w/ "legacy" noalerts label 126 | - key: api.openshift.com/noalerts 127 | operator: NotIn 128 | values: ["true"] 129 | # ignore CD w/ ext noalerts label 130 | - key: ext-managed.openshift.io/noalerts 131 | operator: NotIn 132 | values: ["true"] 133 | # ignore CD for specific organizations 134 | - key: api.openshift.com/legal-entity-id 135 | operator: NotIn 136 | values: ${{SILENT_ALERT_LEGALENTITY_IDS}} 137 | # ignore CD for any "nightly" clusters 138 | - key: api.openshift.com/channel-group 139 | operator: NotIn 140 | values: ["nightly"] 141 | - key: api.openshift.com/fedramp 142 | operator: NotIn 143 | values: ["true"] 144 | # Only create DMS service for clusters with a support exception 145 | - key: ext-managed.openshift.io/support-exception 146 | operator: In 147 | values: ["true"] 148 | targetSecretRef: 149 | name: dms-secret 150 | namespace: openshift-monitoring 151 | tags: ${{DEADMANSSNITCH_OSD_TAGS}} 152 | clusterDeploymentAnnotationsToSkip: 153 | - name: hive.openshift.io/fake-cluster 154 | value: "true" 155 | - name: managed.openshift.com/fake 156 | value: "true" 157 | -------------------------------------------------------------------------------- /boilerplate/_lib/subscriber.sh: -------------------------------------------------------------------------------- 1 | # Helpers and variables for subscriber automation 2 | # 3 | # Source this file from subscriber[-*]. 4 | # 5 | # If your command has subcommands, define SUBCOMMANDS as a map of 6 | # [subcmd_name]='Description one-liner' *before* sourcing this library 7 | # and it will parse the command line up to that point for you, setting 8 | # the SUBCOMMAND variable and leaving everything else in $@. No explicit 9 | # usage function is necessary. 10 | # 11 | # Otherwise, define your usage() function *before* sourcing this library 12 | # and it will handle variants of [-[-]]h[elp] for you. 13 | 14 | CMD=${SOURCER##*/} 15 | 16 | _subcommand_usage() { 17 | echo "Usage: $CMD SUBCOMMAND ..." 18 | for subcommand in "${!SUBCOMMANDS[@]}"; do 19 | echo 20 | echo "===========" 21 | echo "$CMD $subcommand" 22 | echo " ${SUBCOMMANDS[$subcommand]}" 23 | done 24 | exit -1 25 | } 26 | 27 | # Regex for help, -h, -help, --help, etc. 28 | # NOTE: This will match a raw 'h'. That's probably okay, since if 29 | # there's a conflict, 'h' would be ambiguous anyway. 30 | _helpre='^-*h(elp)?$' 31 | 32 | # Subcommand processing 33 | if [[ ${#SUBCOMMANDS[@]} -ne 0 ]]; then 34 | 35 | # No subcommand specified 36 | [[ $# -eq 0 ]] && _subcommand_usage 37 | 38 | subcmd=$1 39 | shift 40 | 41 | [[ "$subcmd" =~ $_helpre ]] && _subcommand_usage 42 | 43 | # Allow unique prefixes 44 | SUBCOMMAND= 45 | for key in "${!SUBCOMMANDS[@]}"; do 46 | if [[ $key == "$subcmd"* ]]; then 47 | # If SUBCOMMAND is already set, this is an ambiguous prefix. 48 | if [[ -n "$SUBCOMMAND" ]]; then 49 | err "Ambiguous subcommand prefix: '$subcmd' matches (at least): ['$SUBCOMMAND', '$key']" 50 | fi 51 | SUBCOMMAND=$key 52 | fi 53 | done 54 | [[ -n "$SUBCOMMAND" ]] || err "Unknown subcommand '$subcmd'. Try 'help' for usage." 55 | 56 | # We got a valid, unique subcommand. Run the helper with the remaining CLI args. 57 | exec $HERE/$CMD-$SUBCOMMAND "$@" 58 | fi 59 | 60 | [[ "$1" =~ $_helpre ]] && usage 61 | 62 | SUBSCRIBERS_FILE=$REPO_ROOT/subscribers.yaml 63 | 64 | ## subscriber_list FILTER 65 | # 66 | # Prints a list of subscribers registered in the $SUBSCRIBERS_FILE. 67 | # 68 | # FILTER: 69 | # all: Prints all subscribers 70 | # onboarded: Prints only onboarded subscribers 71 | subscriber_list() { 72 | case $1 in 73 | all) yq '.subscribers[] | .name' $SUBSCRIBERS_FILE;; 74 | # TODO: Right now subscribers are only "manual". 75 | onboarded) yq '.subscribers[] | select(.conventions[].status == "manual") | .name' $SUBSCRIBERS_FILE;; 76 | esac 77 | } 78 | 79 | ## last_bp_commit ORG/PROJ 80 | # 81 | # Prints the commit hash of the specified repository's boilerplate 82 | # level, or the empty string if the repository is not onboarded. 83 | # 84 | # ORG/PROJ: github organization and project name, e.g. 85 | # "openshift/my-wizbang-operator". 86 | last_bp_commit() { 87 | local repo=$1 88 | local lbc 89 | for default_branch in master main; do 90 | lbc=$(curl -s https://raw.githubusercontent.com/$repo/$default_branch/boilerplate/_data/last-boilerplate-commit) 91 | if [[ "$lbc" != "404: Not Found" ]]; then 92 | echo $lbc | cut -c 1-7 93 | return 94 | fi 95 | done 96 | } 97 | 98 | ## commits_behind_bp_master HASH 99 | # 100 | # Prints how many merge commits behind boilerplate master HASH is. If 101 | # HASH is empty/unspecified, prints the total number of merge commits in 102 | # the boilerplate repo. 103 | commits_behind_bp_master() { 104 | local hash=$1 105 | local range=master 106 | if [[ -n "$hash" ]]; then 107 | range=$hash..master 108 | fi 109 | git rev-list --count --merges $range 110 | } 111 | 112 | ## subscriber_args SUBSCRIBER ... 113 | # 114 | # Processes arguments as a list of onboarded subscribers of the form 115 | # "org/name" (e.g. "openshift/deadmanssnitch-operator"); or the special 116 | # keyword "ALL". 117 | # 118 | # Outputs to stderr a space-separated list of subscribers. If "ALL" was 119 | # specified, these are all onboarded subscribers. 120 | # 121 | # Errors if: 122 | # - "ALL" is specified along with one or more explicit subscriber names. 123 | # - Any specified subscriber is nonexistent or not listed as onboarded 124 | # in the config. 125 | subscriber_args() { 126 | local -A to_process 127 | local ALL=0 128 | local subscriber 129 | local a 130 | 131 | if [[ $# -eq 1 ]] && [[ "$1" == ALL ]]; then 132 | ALL=1 133 | shift 134 | fi 135 | for subscriber in $(subscriber_list onboarded); do 136 | to_process[$subscriber]=$ALL 137 | done 138 | 139 | # Parse specified subscribers 140 | for a in "$@"; do 141 | [[ $a == ALL ]] && err "Can't specify ALL with explicit subscribers" 142 | 143 | [[ -n "${to_process[$a]}" ]] || err "Not an onboarded subscriber: '$a'" 144 | if [[ "${to_process[$a]}" -eq 1 ]]; then 145 | echo "Ignoring duplicate: '$a'" >&2 146 | continue 147 | fi 148 | to_process[$a]=1 149 | done 150 | 151 | for subscriber in "${!to_process[@]}"; do 152 | [[ "${to_process[$subscriber]}" -eq 1 ]] || continue 153 | echo -n "${subscriber} " 154 | done 155 | } 156 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2022. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package main 18 | 19 | import ( 20 | "context" 21 | "flag" 22 | "fmt" 23 | "os" 24 | "runtime" 25 | 26 | // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) 27 | // to ensure that exec-entrypoint and run can make use of them. 28 | "github.com/operator-framework/operator-lib/leader" 29 | _ "k8s.io/client-go/plugin/pkg/client/auth" 30 | 31 | hivev1 "github.com/openshift/hive/apis/hive/v1" 32 | "github.com/openshift/operator-custom-metrics/pkg/metrics" 33 | 34 | "go.uber.org/zap/zapcore" 35 | k8sruntime "k8s.io/apimachinery/pkg/runtime" 36 | utilruntime "k8s.io/apimachinery/pkg/util/runtime" 37 | clientgoscheme "k8s.io/client-go/kubernetes/scheme" 38 | ctrl "sigs.k8s.io/controller-runtime" 39 | "sigs.k8s.io/controller-runtime/pkg/healthz" 40 | "sigs.k8s.io/controller-runtime/pkg/log/zap" 41 | 42 | deadmanssnitchv1alpha1 "github.com/openshift/deadmanssnitch-operator/api/v1alpha1" 43 | operatorconfig "github.com/openshift/deadmanssnitch-operator/config" 44 | controllers "github.com/openshift/deadmanssnitch-operator/controllers/deadmanssnitchintegration" 45 | "github.com/openshift/deadmanssnitch-operator/pkg/localmetrics" 46 | //+kubebuilder:scaffold:imports 47 | ) 48 | 49 | var ( 50 | scheme = k8sruntime.NewScheme() 51 | setupLog = ctrl.Log.WithName("setup") 52 | metricsPath = "/metrics" 53 | // metricsPort the port on which metrics is hosted, don't pick one that's already used 54 | metricsPort = "8081" 55 | ) 56 | 57 | func init() { 58 | utilruntime.Must(clientgoscheme.AddToScheme(scheme)) 59 | 60 | utilruntime.Must(hivev1.AddToScheme(scheme)) 61 | utilruntime.Must(deadmanssnitchv1alpha1.AddToScheme(scheme)) 62 | //+kubebuilder:scaffold:scheme 63 | } 64 | 65 | func printVersion() { 66 | setupLog.Info(fmt.Sprintf("Go Version: %s", runtime.Version())) 67 | setupLog.Info(fmt.Sprintf("Go OS/Arch: %s/%s", runtime.GOOS, runtime.GOARCH)) 68 | } 69 | 70 | func main() { 71 | var enableLeaderElection bool 72 | var probeAddr string 73 | flag.StringVar(&probeAddr, "health-probe-bind-address", ":8080", "The address the probe endpoint binds to.") 74 | flag.BoolVar(&enableLeaderElection, "leader-elect", false, 75 | "Enable leader election for controller manager. "+ 76 | "Enabling this will ensure there is only one active controller manager.") 77 | opts := zap.Options{ 78 | Development: false, 79 | TimeEncoder: zapcore.RFC3339TimeEncoder, 80 | StacktraceLevel: zapcore.DPanicLevel, 81 | } 82 | opts.BindFlags(flag.CommandLine) 83 | flag.Parse() 84 | 85 | ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts))) 86 | 87 | // Print configuration info 88 | printVersion() 89 | if err := operatorconfig.SetIsFedramp(); err != nil { 90 | setupLog.Error(err, "failed to get fedramp value") 91 | os.Exit(1) 92 | } 93 | if operatorconfig.IsFedramp() { 94 | setupLog.Info("running in fedramp environment.") 95 | } 96 | 97 | mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ 98 | Scheme: scheme, 99 | MetricsBindAddress: "0", // disable the controller-runtime metrics 100 | Port: 9443, 101 | HealthProbeBindAddress: probeAddr, 102 | LeaderElection: enableLeaderElection, 103 | LeaderElectionID: "3a94b80a.deadmanssnitch.managed.openshift.io", 104 | }) 105 | if err != nil { 106 | setupLog.Error(err, "unable to start manager") 107 | os.Exit(1) 108 | } 109 | 110 | err = leader.Become(context.TODO(), "deadmanssnitch-operator-lock") 111 | if err != nil { 112 | setupLog.Error(err, "Failed to retry for leader lock") 113 | os.Exit(1) 114 | } 115 | 116 | if err = (&controllers.DeadmansSnitchIntegrationReconciler{ 117 | Client: mgr.GetClient(), 118 | Scheme: mgr.GetScheme(), 119 | }).SetupWithManager(mgr); err != nil { 120 | setupLog.Error(err, "unable to create controller", "controller", "DeadmansSnitchIntegration") 121 | os.Exit(1) 122 | } 123 | //+kubebuilder:scaffold:builder 124 | 125 | if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { 126 | setupLog.Error(err, "unable to set up health check") 127 | os.Exit(1) 128 | } 129 | if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { 130 | setupLog.Error(err, "unable to set up ready check") 131 | os.Exit(1) 132 | } 133 | 134 | // Configure custom metrics 135 | localmetrics.Collector = localmetrics.NewMetricsCollector() 136 | metricsServer := metrics.NewBuilder(operatorconfig.OperatorNamespace, operatorconfig.OperatorName). 137 | WithPort(metricsPort). 138 | WithPath(metricsPath). 139 | WithCollector(localmetrics.Collector). 140 | WithRoute(). 141 | GetConfig() 142 | 143 | if err := metrics.ConfigureMetrics(context.TODO(), *metricsServer); err != nil { 144 | setupLog.Error(err, "failed to configure custom metrics") 145 | os.Exit(1) 146 | } 147 | 148 | setupLog.Info("starting manager") 149 | if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { 150 | setupLog.Error(err, "problem running manager") 151 | os.Exit(1) 152 | } 153 | } 154 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/update: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | source $CONVENTION_ROOT/_lib/common.sh 6 | 7 | # No PRE 8 | [[ "$1" == "PRE" ]] && exit 0 9 | 10 | # Expect POST 11 | [[ "$1" == "POST" ]] || err "Got a parameter I don't understand: '$1'. Did the infrastructure change?" 12 | 13 | # Add codecov configuration 14 | echo "Copying .codecov.yml to your repository root." 15 | cp ${HERE}/.codecov.yml $REPO_ROOT 16 | 17 | # Add OWNERS_ALIASES to $REPO_ROOT 18 | echo "Copying OWNERS_ALIASES to your repository root." 19 | cp -L ${HERE}/OWNERS_ALIASES $REPO_ROOT 20 | 21 | # Add CICD owners to .tekton if exists 22 | if [[ -d "${REPO_ROOT}/.tekton/" ]]; then 23 | echo "Adding Konflux subdirectory OWNERS file to .tekton/" 24 | cat >"${REPO_ROOT}/.tekton/OWNERS" </dev/null; then 41 | echo "Wrapping existing dependabot.yml (which matches boilerplate) with boilerplate-managed markers..." 42 | mv "$TARGET_FILE" "${TARGET_FILE}.bak" 43 | { 44 | echo "# BEGIN boilerplate-managed" 45 | cat "${TARGET_FILE}.bak" 46 | echo "# END boilerplate-managed" 47 | } > "$TARGET_FILE" 48 | rm -f "${TARGET_FILE}.bak" 49 | else 50 | echo "[WARNING] dependabot.yml exists and differs from boilerplate template but has no boilerplate-managed markers." 51 | echo "[WARNING] Please review manually to avoid config duplication." 52 | fi 53 | else 54 | echo "Copying boilerplate-managed dependabot.yml" 55 | cp "$BOILERPLATE_FILE" "$TARGET_FILE" 56 | fi 57 | 58 | # Add olm-registry Dockerfile 59 | mkdir -p $REPO_ROOT/build 60 | echo "Copying Dockerfile.olm-registry to build/Dockerfile.olm-registry" 61 | cp ${HERE}/Dockerfile.olm-registry ${REPO_ROOT}/build/Dockerfile.olm-registry 62 | # if the gitignore file exists, remove the olm-registry line 63 | if [[ -f ${REPO_ROOT}/.gitignore ]]; then 64 | ${SED?} -i "/Dockerfile.olm-registry/d" ${REPO_ROOT}/.gitignore 65 | fi 66 | 67 | OPERATOR_NAME=$(sed -n 's/.*OperatorName .*"\([^"]*\)".*/\1/p' "${REPO_ROOT}/config/config.go") 68 | 69 | if [[ ! -f ${REPO_ROOT}/config/metadata/additional-labels.txt ]]; then 70 | mkdir -p ${REPO_ROOT}/config/metadata 71 | cat >${REPO_ROOT}/config/metadata/additional-labels.txt <$REPO_ROOT/.ci-operator.yaml 112 | 113 | # Check for pipeline files in .tekton directory and centralize them 114 | TEKTON_DIR="${REPO_ROOT}/.tekton" 115 | if [ -d "$TEKTON_DIR" ]; then 116 | for pipeline_file in "$TEKTON_DIR"/*.yaml; do 117 | if [ -f "$pipeline_file" ] && grep -q buildah "${pipeline_file}" && ! grep -q "pipelineRef:" "$pipeline_file"; then 118 | echo "Centralizing pipeline: $(basename "$pipeline_file")" 119 | python3 "${HERE}/migrate_build_pipeline.py" "${pipeline_file}" 120 | fi 121 | done 122 | fi 123 | 124 | cat < core/$version/... 139 | tokens[0] = "core" 140 | case "apis": 141 | // Extensions: /apis/$group/$version/... 142 | // => $group/$version/... 143 | tokens = tokens[1:] 144 | default: 145 | // Something else. Punt. 146 | panic(1) 147 | } 148 | 149 | // Single resource, non-namespaced (including a namespace itself): $group/$version/$kind/$name 150 | if len(tokens) == 4 { 151 | // Factor out the resource name 152 | tokens[3] = "{NAME}" 153 | } 154 | 155 | // Kind or single resource, namespaced: $group/$version/namespaces/$nsname/$kind[/$name[/...]] 156 | if len(tokens) > 4 && tokens[2] == "namespaces" { 157 | // Factor out the namespace name 158 | tokens[3] = "{NAMESPACE}" 159 | 160 | // Single resource, namespaced: $group/$version/namespaces/$nsname/$kind/$name[/...] 161 | if len(tokens) > 5 { 162 | // Factor out the resource name 163 | tokens[5] = "{NAME}" 164 | } 165 | } 166 | 167 | resource = strings.Join(tokens, "/") 168 | 169 | return 170 | } 171 | -------------------------------------------------------------------------------- /pkg/dmsclient/dmsclient.go: -------------------------------------------------------------------------------- 1 | package dmsclient 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "fmt" 7 | "io" 8 | "net/http" 9 | "net/url" 10 | "time" 11 | 12 | "github.com/openshift/deadmanssnitch-operator/pkg/localmetrics" 13 | ) 14 | 15 | const ( 16 | apiEndpoint = "https://api.deadmanssnitch.com/v1" 17 | ) 18 | 19 | // Client is a wrapper interface for the dmsClient to allow for easier testing 20 | type Client interface { 21 | ListAll() ([]Snitch, error) 22 | List(snitchToken string) (Snitch, error) 23 | Create(newSnitch Snitch) (Snitch, error) 24 | Delete(snitchToken string) (bool, error) 25 | FindSnitchesByName(snitchName string) ([]Snitch, error) 26 | Update(updateSnitch Snitch) (Snitch, error) 27 | CheckIn(s Snitch) error 28 | } 29 | 30 | // SnitchType Struct 31 | type SnitchType struct { 32 | Interval string `json:"interval"` 33 | } 34 | 35 | // Snitch Struct 36 | type Snitch struct { 37 | Name string `json:"name"` 38 | Token string `json:"token"` 39 | Href string `json:"href"` 40 | Tags []string `json:"tags"` 41 | Notes string `json:"notes"` 42 | Status string `json:"status"` 43 | CheckedInAt string `json:"checked_in_at"` 44 | CheckInURL string `json:"check_in_url"` 45 | CreatedAt string `json:"created_at"` 46 | Interval string `json:"interval"` 47 | AlertType string `json:"alert_type"` 48 | AlertEmail []string `json:"alert_email"` 49 | Type SnitchType `json:"type"` 50 | } 51 | 52 | func defaultURL() *url.URL { 53 | url, _ := url.Parse(apiEndpoint) 54 | return url 55 | } 56 | 57 | // Client wraps http client 58 | type dmsClient struct { 59 | authToken string 60 | BaseURL *url.URL 61 | httpClient *http.Client 62 | metricsCollector *localmetrics.MetricsCollector 63 | } 64 | 65 | // NewClient creates an API client 66 | func NewClient(authToken string, collector *localmetrics.MetricsCollector) Client { 67 | return &dmsClient{ 68 | authToken: authToken, 69 | BaseURL: defaultURL(), 70 | httpClient: http.DefaultClient, 71 | metricsCollector: collector, 72 | } 73 | } 74 | 75 | // NewSnitch creates a new Snitch only requiring a few items 76 | func NewSnitch(name string, tags []string, interval string, alertType string) Snitch { 77 | return Snitch{ 78 | Name: name, 79 | Tags: tags, 80 | Interval: interval, 81 | AlertType: alertType, 82 | } 83 | } 84 | 85 | func (c *dmsClient) newRequest(method, path string, body interface{}) (*http.Request, error) { 86 | rel := &url.URL{Path: path} 87 | u := c.BaseURL.ResolveReference(rel) 88 | var buf io.ReadWriter 89 | 90 | if body != nil { 91 | buf = new(bytes.Buffer) 92 | err := json.NewEncoder(buf).Encode(body) 93 | if err != nil { 94 | return nil, err 95 | } 96 | } 97 | req, err := http.NewRequest(method, u.String(), buf) 98 | if err != nil { 99 | return nil, err 100 | } 101 | if body != nil { 102 | req.Header.Set("Content-Type", "application/json") 103 | } 104 | req.Header.Set("Accept", "application/json") 105 | req.Header.Set("User-Agent", "golang httpClient") 106 | req.SetBasicAuth(c.authToken, "") 107 | return req, nil 108 | } 109 | 110 | func (c *dmsClient) do(req *http.Request, operation string) (*http.Response, error) { 111 | start := time.Now() 112 | defer func() { 113 | c.metricsCollector.ObserveSnitchCallDuration(time.Since(start).Seconds(), operation) 114 | }() 115 | resp, err := c.httpClient.Do(req) 116 | 117 | // raise an error if unable to authenticate to DMS service 118 | if resp.StatusCode == 401 { 119 | err = fmt.Errorf("unauthorized error: please check the deadmanssnitch credentials") 120 | } 121 | 122 | if err != nil { 123 | c.metricsCollector.ObserveSnitchCallError() 124 | return resp, fmt.Errorf("error calling the API endpoint: %v", err) 125 | } 126 | 127 | return resp, nil 128 | } 129 | 130 | // ListAll snitches 131 | func (c *dmsClient) ListAll() ([]Snitch, error) { 132 | req, err := c.newRequest("GET", "/v1/snitches", nil) 133 | if err != nil { 134 | return nil, err 135 | } 136 | 137 | resp, err := c.do(req, "list_all") 138 | if err != nil { 139 | return nil, err 140 | } 141 | 142 | var snitches []Snitch 143 | decodeErr := json.NewDecoder(resp.Body).Decode(&snitches) 144 | if decodeErr != nil { 145 | err = fmt.Errorf("error listing all snitches: %v", decodeErr) 146 | } 147 | 148 | return snitches, err 149 | } 150 | 151 | //List a single snitch 152 | func (c *dmsClient) List(snitchToken string) (Snitch, error) { 153 | var snitch Snitch 154 | 155 | req, err := c.newRequest("GET", "/v1/snitches/"+snitchToken, nil) 156 | if err != nil { 157 | return snitch, err 158 | } 159 | 160 | resp, err := c.do(req, "describe") 161 | if err != nil { 162 | return snitch, err 163 | } 164 | defer resp.Body.Close() 165 | 166 | decodeErr := json.NewDecoder(resp.Body).Decode(&snitch) 167 | if decodeErr != nil { 168 | err = fmt.Errorf("error listing snitch: %v", decodeErr) 169 | } 170 | return snitch, err 171 | } 172 | 173 | // Create a snitch 174 | func (c *dmsClient) Create(newSnitch Snitch) (Snitch, error) { 175 | var snitch Snitch 176 | req, err := c.newRequest("POST", "/v1/snitches", newSnitch) 177 | if err != nil { 178 | return snitch, err 179 | } 180 | resp, err := c.do(req, "create") 181 | if err != nil { 182 | return snitch, err 183 | } 184 | 185 | defer resp.Body.Close() 186 | 187 | decodeErr := json.NewDecoder(resp.Body).Decode(&snitch) 188 | if decodeErr != nil { 189 | err = fmt.Errorf("error creating snitch: %v", decodeErr) 190 | } 191 | return snitch, err 192 | } 193 | 194 | // Delete a snitch 195 | func (c *dmsClient) Delete(snitchToken string) (bool, error) { 196 | req, err := c.newRequest("DELETE", "/v1/snitches/"+snitchToken, nil) 197 | if err != nil { 198 | return false, err 199 | } 200 | resp, err := c.do(req, "delete") 201 | if err != nil { 202 | return false, err 203 | } 204 | 205 | if resp.StatusCode == 204 { 206 | return true, nil 207 | } 208 | 209 | return false, nil 210 | } 211 | 212 | // FindSnitchesByName This will search for snitches using a name. This 213 | // could return multiple snitches, as the same name may be used multiple times 214 | func (c *dmsClient) FindSnitchesByName(snitchName string) ([]Snitch, error) { 215 | var foundSnitches []Snitch 216 | listedSnitches, err := c.ListAll() 217 | if err != nil { 218 | return foundSnitches, err 219 | } 220 | 221 | for _, snitch := range listedSnitches { 222 | if snitch.Name == snitchName { 223 | foundSnitches = append(foundSnitches, snitch) 224 | } 225 | } 226 | 227 | return foundSnitches, err 228 | } 229 | 230 | // Update the snitch 231 | func (c *dmsClient) Update(updateSnitch Snitch) (Snitch, error) { 232 | var snitch Snitch 233 | req, err := c.newRequest("PATCH", "/v1/snitches/"+updateSnitch.Token, updateSnitch) 234 | if err != nil { 235 | return snitch, err 236 | } 237 | resp, err := c.do(req, "update") 238 | if err != nil { 239 | return snitch, err 240 | } 241 | 242 | defer resp.Body.Close() 243 | err = json.NewDecoder(resp.Body).Decode(&snitch) 244 | 245 | return snitch, err 246 | } 247 | 248 | // Initialize the snitch with a basic GET call to its url 249 | func (c *dmsClient) CheckIn(s Snitch) error { 250 | var buf io.ReadWriter 251 | req, err := http.NewRequest("GET", s.CheckInURL, buf) 252 | if err != nil { 253 | return err 254 | } 255 | 256 | req.Header.Set("User-Agent", "golang httpClient") 257 | 258 | _, err = c.do(req, "check_in") 259 | if err != nil { 260 | return err 261 | } 262 | 263 | return nil 264 | } 265 | -------------------------------------------------------------------------------- /boilerplate/_lib/boilerplate-commit: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | if [ "$BOILERPLATE_SET_X" ]; then 5 | set -x 6 | fi 7 | 8 | REPO_ROOT=$(git rev-parse --show-toplevel) 9 | source $REPO_ROOT/boilerplate/_lib/common.sh 10 | 11 | tmpd=$(mktemp -d) 12 | trap "rm -fr $tmpd" EXIT 13 | git_status=$tmpd/git_status 14 | bp_log=$tmpd/bp_git_log 15 | bp_clone=$tmpd/bp_git_clone 16 | convention_status=$tmpd/convention_status 17 | commit_message=$tmpd/commit_msg 18 | 19 | # Variables to keep track of what's happening in this commit. Empty 20 | # means we're not doing whatever it is. 21 | #### 22 | # - Bootstrapping: bringing boilerplate into the repo for the first 23 | # time. Nonempty if bootstrapping. 24 | bootstrap= 25 | #### 26 | # - If we were already bootstrapped, and boilerplate-update brought in a 27 | # newer boilerplate commit, we'll put "{from_hash}...{to_hash}" here. 28 | # This should be mutually exclusive with `bootstrap`. 29 | bp_commit_change= 30 | #### 31 | # - Changes in conventions. This is a file containing one line per 32 | # convention indicating what was done in this commit with respect to 33 | # that convention: "Subscribe", "Update", or "No change". (TODO: 34 | # "Unsubscribe".) The file is only empty if update.cfg is 35 | # (substantively) empty. 36 | convention_statuses=$tmpd/convention_statuses 37 | >$convention_statuses 38 | #### 39 | 40 | git status --porcelain > $git_status 41 | 42 | # Bootstrapping includes adding the boilerplate-update target to the 43 | # Makefile and adding boilerplate/update and boilerplate/update.cfg. We 44 | # won't bother with the former. Since the latter are new files in a new 45 | # directory, `git status` will just show the `boilerplate/` directory as 46 | # untracked. 47 | if grep -q '^?? boilerplate/$' $git_status; then 48 | bootstrap=true 49 | 50 | # This wasn't a bootstrap. We can detect it was an update if the 51 | # last-boilerplate-commit file was changed. 52 | elif grep -q '^ M boilerplate/_data/last-boilerplate-commit$' $git_status; then 53 | # Produce a string of the form {old_hash}...{new_hash} 54 | bp_commit_change=$(git diff boilerplate/_data/last-boilerplate-commit | tail -2 | paste -d/ -s - | sed 's/[+-]//g; s,/,...,') 55 | # Handy URL showing the commits and deltas 56 | bp_compare_url="https://github.com/openshift/boilerplate/compare/$bp_commit_change" 57 | # Generate the commit history for this range. This will go in the commit message. 58 | ( 59 | git clone "${BOILERPLATE_GIT_REPO}" "${bp_clone}" 60 | cd "${bp_clone}" 61 | # Matches promote.sh 62 | git log --no-merges --pretty=format:'commit: %H%nauthor: %an%n%s%n%n%b%n%n' $bp_commit_change > $bp_log 63 | ) 64 | 65 | fi 66 | 67 | # Okay, let's look for convention changes. 68 | # TODO: Handle unsubscribes (not yet handled by the main `update` either). 69 | while read convention junk; do 70 | # TODO: These first few conditions, scrubbing the config file, are 71 | # identical to what's in `update`. It would be lovely to library-ize 72 | # them. However, `update` needs to remain self-contained since it's 73 | # part of the bootstrap process. 74 | 75 | # Skip comment lines (which can have leading whitespace) 76 | if [[ "$convention" == '#'* ]]; then 77 | continue 78 | fi 79 | # Skip blank or whitespace-only lines 80 | if [[ "$convention" == "" ]]; then 81 | continue 82 | fi 83 | # Lines like 84 | # valid/path other_junk 85 | # are not acceptable, unless `other_junk` is a comment 86 | if [[ "$junk" != "" ]] && [[ "$junk" != '#'* ]]; then 87 | echo "Invalid config! Only one convention is allowed per line. Found '$junk'. Ignoring." 88 | # `update` bails for this. We're being a bit more forgiving. 89 | continue 90 | fi 91 | 92 | dir_path="boilerplate/${convention}" 93 | # Make sure the directory exists 94 | if ! [[ -d "$dir_path" ]]; then 95 | echo "Invalid convention directory: '$convention'." 96 | echo "(Could be because we don't handle unsubscribing yet.)" 97 | echo "Ignoring." 98 | # `update` bails for this. We're being a bit more forgiving. 99 | continue 100 | fi 101 | 102 | # Okay, we have a legit convention. Let's see if the current checkout 103 | # touches it 104 | # (Note that we're reusing the same temp file on each iteration.) 105 | git status --porcelain $dir_path > $convention_status 106 | if ! [[ -s $convention_status ]]; then 107 | # No deltas here. 108 | echo "- $convention: No change" >> $convention_statuses 109 | 110 | elif grep -q -v '^??' $convention_status; then 111 | # If there's anything *other than* untracked, this was an update 112 | echo "- $convention: Update" >> $convention_statuses 113 | 114 | else 115 | # If we get here, everything is '^??' (untracked), meaning this is a 116 | # new subscription. (Or, I suppose, the convention was previously 117 | # empty? We'll call it a new subscription anyway.) 118 | echo "- $convention: Subscribe" >> $convention_statuses 119 | fi 120 | 121 | done < boilerplate/update.cfg 122 | 123 | # Let's make sure *something* boilerplate-related is happening here. 124 | if [[ -z "$bootstrap" ]] && [[ -z "$bp_commit_change" ]] && ! grep -v -q "No change" $convention_statuses; then 125 | err "No boilerplate-related activity found in the current checkout!" 126 | fi 127 | 128 | # Okay, we're ready to do this. 129 | # Generate the commit title and branch name indicating the *main* action 130 | # we're taking. This is 'bootstrap' or 'update'; or if we're doing 131 | # neither of those things and only changing config, 'subscribe'. 132 | # => Commit titles will be of one of the following forms: 133 | # "Boilerplate: Bootstrap at {hash}" 134 | # "Boilerplate: Update to {hash}" 135 | # "Boilerplate: Subscribe at {hash}" 136 | # => Branch names will be of the form: 137 | # boilerplate-{bootstrap|update|subscribe}-{N}-{hash} 138 | # where {N} is the number of configured conventions (omitted if zero) 139 | title="Boilerplate:" 140 | branch=boilerplate 141 | if [[ -n "$bootstrap" ]]; then 142 | title="$title Bootstrap at" 143 | branch="$branch-bootstrap" 144 | elif [[ -n "$bp_commit_change" ]]; then 145 | title="$title Update to" 146 | branch="$branch-update" 147 | else 148 | title="$title Subscribe at" 149 | branch="$branch-subscribe" 150 | fi 151 | cur_commit=$(cat boilerplate/_data/last-boilerplate-commit) 152 | title="$title $cur_commit" 153 | echo "$title 154 | " > $commit_message 155 | 156 | if [[ -n "$bootstrap" ]]; then 157 | echo "https://github.com/openshift/boilerplate/commit/$cur_commit 158 | ---" >> $commit_message 159 | fi 160 | 161 | echo "Conventions:" >> $commit_message 162 | if [[ -s $convention_statuses ]]; then 163 | cat $convention_statuses >> $commit_message 164 | # Add the number of conventions to the branch name 165 | branch="$branch-"$(wc -l $convention_statuses | sed 's/ .*//') 166 | else 167 | echo " None." >> $commit_message 168 | fi 169 | 170 | branch="$branch-$cur_commit" 171 | 172 | if [[ -n "$bp_commit_change" ]]; then 173 | 174 | echo "--- 175 | $bp_compare_url 176 | " >> $commit_message 177 | cat $bp_log >> $commit_message 178 | 179 | fi 180 | 181 | # TODO: Handle branch name conflict. At the moment, this should really only be 182 | # possible if unsubscribing and subscribing the same number of conventions. 183 | # Since we don't handle unsubscribing (properly), we'll take our chances that 184 | # it "can't" happen for now. 185 | git checkout -b $branch 186 | # We can get away with -A because `update` forces a clean checkout. 187 | git add -A 188 | git commit -F $commit_message 189 | echo "Ready to push branch $branch" 190 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # deadmanssnitch-operator 2 | 3 | Operator to manage deadmanssnitch configs for Openshift Dedicated 4 | 5 | - [deadmanssnitch-operator](#deadmanssnitch-operator) 6 | - [Overview](#overview) 7 | - [Metrics](#metrics) 8 | - [Alerts](#alerts) 9 | - [Usage](#usage) 10 | 11 | ## Overview 12 | 13 | The operator runs on hive. It has a single controller. It: 14 | 15 | - Requires a master Secret to talk to the Dead Man's Snitch API. 16 | This secret is expected to be named `deadmanssnitch-api-key` and live in the `deadmanssnitch-operator` namespace. 17 | - Pays attention to [ClusterDeployments](https://github.com/openshift/hive/blob/master/config/crds/hive.openshift.io_clusterdeployments.yaml) that are: 18 | - Installed (`spec.installed=true`) 19 | - Managed (label `api.openshift.com/managed="true"`) 20 | - For each such ClusterDeployment: 21 | - Adds a finalizer to the ClusterDeployment to ensure we get a chance to clean up when it is deleted. 22 | - Creates a Snitch 23 | - Creates a Secret in the ClusterDeployment's namespace named `{clusterdeploymentname}-dms-secret`. 24 | The Secret contains the Snitch URL. 25 | - Creates a SyncSet in the ClusterDeployment's namespace named `{clusterdeploymentname}-dms}`. 26 | The SyncSet creates a SecretMapping that makes the above Secret appear inside the cluster as `dms-secret` in the `openshift-monitoring` namespace. 27 | 28 | ## Metrics 29 | 30 | metricDeadMansSnitchHeartbeat: Every 5 minutes, makes a request to the Dead Man's Snitch API using the API key and updates the gauge to 1 when the response code is between 200-299. 31 | 32 | ## Alerts 33 | 34 | - DeadMansSnitchAPIUnavailable - Unable to communicate with Dead Man's Snitch API for 15 minutes. 35 | 36 | ## Usage 37 | 38 | - Create an account on 39 | - Choose a plan that allows enhanced snitch intervals(Private eye or above) 40 | - Create an API key 41 | - Create the following secret which is required for deadmanssnitch-operator to create snitches 42 | 43 | ```yaml 44 | apiVersion: v1 45 | kind: Secret 46 | type: Opaque 47 | metadata: 48 | name: deadmanssnitch-api-key 49 | namespace: deadmanssnitch-operator 50 | data: 51 | tags: 52 | deadmanssnitch-api-key: 53 | ``` 54 | 55 | - Build a docker image and replace `REPLACE_IMAGE` [operator.yaml](deploy/operator.yaml) field with that image 56 | - you can do that using `oc create -f https://github.com/openshift/deadmanssnitch-operator/raw/master/deploy/operator.yaml --dry-run=client -oyaml | oc set image --local -f - --dry-run=client -oyaml *=REPLACE_IMAGE` 57 | - Deploy using `oc apply -f deploy/` 58 | 59 | ## Development 60 | 61 |
62 | how to develop this locally 63 |

64 | 65 | ### Set up local OpenShift cluster 66 | 67 | Methods include: 68 | 69 | - [MiniShift](https://github.com/minishift/minishift) 70 | - [Code Ready Containers](https://developers.redhat.com/products/codeready-containers/overview) 71 | - [Integration OpenShift Cluster Manager](https://qaprodauth.cloud.redhat.com/openshift/?env=integration) 72 | 73 | ### Deploy dependencies 74 | 75 | [Hive](https://github.com/openshift/hive/) CRDs need to be installed on the cluster. 76 | 77 | Clone [hive repo](https://github.com/openshift/hive/) and run 78 | 79 | ```terminal 80 | git clone https://github.com/openshift/hive.git 81 | oc apply -f hive/config/crds 82 | ``` 83 | 84 | Install the `DeadMansSnitchIntegration` CRD, create the operator namespace and other operator dependencies: 85 | 86 | ```terminal 87 | oc apply -f deploy/crds/deadmanssnitch.managed.openshift.io_deadmanssnitchintegrations.yaml 88 | oc new-project deadmanssnitch-operator 89 | oc apply -f deploy/role.yaml 90 | oc apply -f deploy/service_account.yaml 91 | oc apply -f deploy/role_binding.yaml 92 | ``` 93 | 94 | Create a secret which will contain the DeadMansSnitch API Key and Hive Cluster Tag. 95 | 96 | You will require an API Key signed up to a DeadMansSnitch plan that allows for enhanced snitch intervals (the "Private Eye" plan). You can alternatively test the `deadmanssnitch-operator` by signing up to the free tier DeadMansSnitch plan (limited to 1 snitch), but doing so will require you to customize the snitch interval from `15_minute` to `hourly`. This can be performed in [deadmanssnitchintegration_controller.go](pkg/controller/deadmanssnitchintegration/deadmanssnitchintegration_controller.go) 97 | 98 | Adjust the example below and apply the file with `oc apply -f `. Note that the values for `tags` and `deadmanssnitch-api-key` need to be base64 encoded. This can be performed using `echo -n | base64`. 99 | 100 | ```yaml 101 | apiVersion: v1 102 | kind: Secret 103 | type: Opaque 104 | metadata: 105 | name: deadmanssnitch-api-key 106 | namespace: deadmanssnitch-operator 107 | data: 108 | tags: 109 | deadmanssnitch-api-key: 110 | ``` 111 | 112 | ### Define a DeadMansSnitchIntegration 113 | 114 | Create a `DeadMansSnitchIntegration` CR which will be used to identify clusters to apply DMS to. 115 | 116 | The example below will target `clusterdeployment`s that have a `api.openshift.com/test` label set to `"true"`. Apply it using `oc apply -f `. 117 | 118 | ```yaml 119 | apiVersion: deadmanssnitch.managed.openshift.io/v1alpha1 120 | kind: DeadmansSnitchIntegration 121 | metadata: 122 | finalizers: 123 | - dms.managed.openshift.io/deadmanssnitch-osd 124 | name: test-dmsi 125 | namespace: deadmanssnitch-operator 126 | spec: 127 | clusterDeploymentSelector: 128 | matchExpressions: 129 | - key: api.openshift.com/test 130 | operator: In 131 | values: 132 | - "true" 133 | dmsAPIKeySecretRef: 134 | name: deadmanssnitch-api-key 135 | namespace: deadmanssnitch-operator 136 | snitchNamePostFix: "test" 137 | tags: 138 | - test 139 | targetSecretRef: 140 | name: dms-secret-test 141 | namespace: openshift-monitoring 142 | ``` 143 | 144 | ### Run the operator 145 | 146 | ```terminal 147 | export OPERATOR_NAME=deadmanssnitch-operator 148 | go run main.go 149 | ``` 150 | 151 | ### Create Clusterdeployment 152 | 153 | You can create a dummy ClusterDeployment by copying a real one from an active hive 154 | 155 | ```terminal 156 | real-hive$ oc get cd -n -o yaml > /tmp/fake-clusterdeployment.yaml 157 | 158 | ... 159 | 160 | $ oc create namespace fake-cluster-namespace 161 | $ oc apply -f /tmp/fake-clusterdeployment.yaml 162 | ``` 163 | 164 | `deadmanssnitch-operator` doesn't start reconciling clusters until the `clusterdeployment`'s `spec.installed` is set to `true`. If present, set `spec.installed` to true. 165 | 166 | ```terminal 167 | oc edit clusterdeployment fake-cluster -n fake-cluster-namespace 168 | ``` 169 | 170 | Ensure that the ClusterDeployment is labelled with the label from your `DMSI`'s `clusterDeploymentSelector` clause. 171 | 172 | Using the example from earlier: 173 | 174 | ```terminal 175 | oc label clusterdeployment -n api.openshift.com/test=true 176 | ``` 177 | 178 | ### Delete ClusterDeployment 179 | 180 | To trigger `deadmanssnitch-operator` to remove the service in DeadMansSnitch, you can either delete the `clusterdeployment` or remove the `clusterDeploymentSelector` label: 181 | 182 | ```terminal 183 | oc delete clusterdeployment fake-cluster -n fake-cluster-namespace 184 | ``` 185 | 186 | If deleting the `clusterdeployment`, you may need to remove dangling finalizers from the `clusterdeployment` object. 187 | 188 | ```terminal 189 | oc edit clusterdeployment fake-cluster -n fake-cluster-namespace 190 | ``` 191 | 192 |

193 |
194 | -------------------------------------------------------------------------------- /deploy/crds/deadmanssnitch.managed.openshift.io_deadmanssnitchintegrations.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | controller-gen.kubebuilder.io/version: v0.16.4 7 | name: deadmanssnitchintegrations.deadmanssnitch.managed.openshift.io 8 | spec: 9 | group: deadmanssnitch.managed.openshift.io 10 | names: 11 | kind: DeadmansSnitchIntegration 12 | listKind: DeadmansSnitchIntegrationList 13 | plural: deadmanssnitchintegrations 14 | shortNames: 15 | - dmsi 16 | singular: deadmanssnitchintegration 17 | scope: Namespaced 18 | versions: 19 | - name: v1alpha1 20 | schema: 21 | openAPIV3Schema: 22 | description: DeadmansSnitchIntegration is the Schema for the deadmanssnitchintegrations 23 | API 24 | properties: 25 | apiVersion: 26 | description: |- 27 | APIVersion defines the versioned schema of this representation of an object. 28 | Servers should convert recognized schemas to the latest internal value, and 29 | may reject unrecognized values. 30 | More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources 31 | type: string 32 | kind: 33 | description: |- 34 | Kind is a string value representing the REST resource this object represents. 35 | Servers may infer this from the endpoint the client submits requests to. 36 | Cannot be updated. 37 | In CamelCase. 38 | More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds 39 | type: string 40 | metadata: 41 | type: object 42 | spec: 43 | description: DeadmansSnitchIntegrationSpec defines the desired state of 44 | DeadmansSnitchIntegration 45 | properties: 46 | clusterDeploymentAnnotationsToSkip: 47 | description: a list of annotations the operator to skip 48 | items: 49 | description: |- 50 | ClusterDeploymentAnnotationsToSkip contains a list of annotation keys and values 51 | The operator will skip the cluster deployment if it has the same annotations set 52 | properties: 53 | name: 54 | type: string 55 | value: 56 | type: string 57 | required: 58 | - name 59 | - value 60 | type: object 61 | type: array 62 | clusterDeploymentSelector: 63 | description: a label selector used to find which clusterdeployment 64 | CRs receive a DMS integration based on this configuration 65 | properties: 66 | matchExpressions: 67 | description: matchExpressions is a list of label selector requirements. 68 | The requirements are ANDed. 69 | items: 70 | description: |- 71 | A label selector requirement is a selector that contains values, a key, and an operator that 72 | relates the key and values. 73 | properties: 74 | key: 75 | description: key is the label key that the selector applies 76 | to. 77 | type: string 78 | operator: 79 | description: |- 80 | operator represents a key's relationship to a set of values. 81 | Valid operators are In, NotIn, Exists and DoesNotExist. 82 | type: string 83 | values: 84 | description: |- 85 | values is an array of string values. If the operator is In or NotIn, 86 | the values array must be non-empty. If the operator is Exists or DoesNotExist, 87 | the values array must be empty. This array is replaced during a strategic 88 | merge patch. 89 | items: 90 | type: string 91 | type: array 92 | required: 93 | - key 94 | - operator 95 | type: object 96 | type: array 97 | matchLabels: 98 | additionalProperties: 99 | type: string 100 | description: |- 101 | matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels 102 | map is equivalent to an element of matchExpressions, whose key field is "key", the 103 | operator is "In", and the values array contains only "value". The requirements are ANDed. 104 | type: object 105 | type: object 106 | x-kubernetes-map-type: atomic 107 | dmsAPIKeySecretRef: 108 | description: reference to the secret containing deadmanssnitch-api-key 109 | properties: 110 | name: 111 | description: name is unique within a namespace to reference a 112 | secret resource. 113 | type: string 114 | namespace: 115 | description: namespace defines the space within which the secret 116 | name must be unique. 117 | type: string 118 | type: object 119 | x-kubernetes-map-type: atomic 120 | snitchNamePostFix: 121 | description: The postfix to append to any snitches managed by this 122 | integration. I.e. "osd" or "rhmi" 123 | type: string 124 | tags: 125 | description: Array of strings that are applied to the service created 126 | in DMS 127 | items: 128 | type: string 129 | type: array 130 | targetSecretRef: 131 | description: name and namespace in the target cluster where the secret 132 | is synced 133 | properties: 134 | name: 135 | description: name is unique within a namespace to reference a 136 | secret resource. 137 | type: string 138 | namespace: 139 | description: namespace defines the space within which the secret 140 | name must be unique. 141 | type: string 142 | type: object 143 | x-kubernetes-map-type: atomic 144 | required: 145 | - clusterDeploymentSelector 146 | - dmsAPIKeySecretRef 147 | - targetSecretRef 148 | type: object 149 | status: 150 | description: DeadmansSnitchIntegrationStatus defines the observed state 151 | of DeadmansSnitchIntegration 152 | type: object 153 | required: 154 | - spec 155 | type: object 156 | served: true 157 | storage: true 158 | subresources: 159 | status: {} 160 | -------------------------------------------------------------------------------- /controllers/deadmanssnitchintegration/event_handlers.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2022. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package deadmanssnitchintegration 18 | 19 | import ( 20 | "context" 21 | 22 | deadmanssnitchv1alpha1 "github.com/openshift/deadmanssnitch-operator/api/v1alpha1" 23 | hivev1 "github.com/openshift/hive/apis/hive/v1" 24 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 25 | "k8s.io/apimachinery/pkg/labels" 26 | "k8s.io/apimachinery/pkg/runtime" 27 | "k8s.io/apimachinery/pkg/runtime/schema" 28 | "k8s.io/apimachinery/pkg/types" 29 | "k8s.io/client-go/util/workqueue" 30 | "sigs.k8s.io/controller-runtime/pkg/client" 31 | "sigs.k8s.io/controller-runtime/pkg/event" 32 | "sigs.k8s.io/controller-runtime/pkg/handler" 33 | "sigs.k8s.io/controller-runtime/pkg/reconcile" 34 | ) 35 | 36 | var _ handler.EventHandler = &enqueueRequestForClusterDeployment{} 37 | 38 | // enqueueRequestForClusterDeployment implements the handler.EventHandler interface. 39 | // Heavily inspired by https://github.com/kubernetes-sigs/controller-runtime/blob/v0.12.1/pkg/handler/enqueue_mapped.go 40 | type enqueueRequestForClusterDeployment struct { 41 | Client client.Client 42 | } 43 | 44 | func (e *enqueueRequestForClusterDeployment) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) { 45 | reqs := map[reconcile.Request]struct{}{} 46 | e.mapAndEnqueue(q, evt.Object, reqs) 47 | } 48 | 49 | func (e *enqueueRequestForClusterDeployment) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { 50 | reqs := map[reconcile.Request]struct{}{} 51 | e.mapAndEnqueue(q, evt.ObjectOld, reqs) 52 | e.mapAndEnqueue(q, evt.ObjectNew, reqs) 53 | } 54 | 55 | func (e *enqueueRequestForClusterDeployment) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { 56 | reqs := map[reconcile.Request]struct{}{} 57 | e.mapAndEnqueue(q, evt.Object, reqs) 58 | } 59 | 60 | func (e *enqueueRequestForClusterDeployment) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) { 61 | reqs := map[reconcile.Request]struct{}{} 62 | e.mapAndEnqueue(q, evt.Object, reqs) 63 | } 64 | 65 | // toRequests receives a ClusterDeployment objects that have fired an event and checks if it can find an associated 66 | // DeadmansSnitchIntegration object that has a matching label selector, if so it creates a request for the reconciler to 67 | // take a look at that DeadmansSnitchIntegration object. 68 | func (e *enqueueRequestForClusterDeployment) toRequests(obj client.Object) []reconcile.Request { 69 | reqs := []reconcile.Request{} 70 | dmiList := &deadmanssnitchv1alpha1.DeadmansSnitchIntegrationList{} 71 | if err := e.Client.List(context.TODO(), dmiList, &client.ListOptions{}); err != nil { 72 | return reqs 73 | } 74 | 75 | for _, dmi := range dmiList.Items { 76 | dmi := dmi 77 | selector, err := metav1.LabelSelectorAsSelector(&dmi.Spec.ClusterDeploymentSelector) 78 | if err != nil { 79 | continue 80 | } 81 | if selector.Matches(labels.Set(obj.GetLabels())) { 82 | reqs = append(reqs, reconcile.Request{ 83 | NamespacedName: types.NamespacedName{ 84 | Name: dmi.Name, 85 | Namespace: dmi.Namespace, 86 | }, 87 | }) 88 | } 89 | } 90 | return reqs 91 | } 92 | 93 | func (e *enqueueRequestForClusterDeployment) mapAndEnqueue(q workqueue.RateLimitingInterface, obj client.Object, reqs map[reconcile.Request]struct{}) { 94 | for _, req := range e.toRequests(obj) { 95 | _, ok := reqs[req] 96 | if !ok { 97 | q.Add(req) 98 | // Used for de-duping requests 99 | reqs[req] = struct{}{} 100 | } 101 | } 102 | } 103 | 104 | var _ handler.EventHandler = &enqueueRequestForClusterDeploymentOwner{} 105 | 106 | // enqueueRequestForClusterDeploymentOwner implements the handler.EventHandler interface. 107 | // Heavily inspired by https://github.com/kubernetes-sigs/controller-runtime/blob/v0.12.1/pkg/handler/enqueue_mapped.go 108 | type enqueueRequestForClusterDeploymentOwner struct { 109 | Client client.Client 110 | Scheme *runtime.Scheme 111 | groupKind schema.GroupKind 112 | } 113 | 114 | func (e *enqueueRequestForClusterDeploymentOwner) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) { 115 | e.mapAndEnqueue(q, evt.Object) 116 | } 117 | 118 | func (e *enqueueRequestForClusterDeploymentOwner) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { 119 | e.mapAndEnqueue(q, evt.ObjectOld) 120 | e.mapAndEnqueue(q, evt.ObjectNew) 121 | } 122 | 123 | func (e *enqueueRequestForClusterDeploymentOwner) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { 124 | e.mapAndEnqueue(q, evt.Object) 125 | } 126 | 127 | func (e *enqueueRequestForClusterDeploymentOwner) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) { 128 | e.mapAndEnqueue(q, evt.Object) 129 | } 130 | 131 | func (e *enqueueRequestForClusterDeploymentOwner) getClusterDeploymentGroupKind() { 132 | e.groupKind = schema.GroupKind{ 133 | Group: hivev1.HiveAPIGroup, 134 | Kind: "ClusterDeployment", 135 | } 136 | } 137 | 138 | // getAssociatedDeadmansSnitchIntegrations receives objects and checks if they're owned by a ClusterDeployment. If so, it then 139 | // collects associated DeadmansSnitchIntegration CRs and creates requests for the reconciler to consider. 140 | func (e *enqueueRequestForClusterDeploymentOwner) getAssociatedDeadmansSnitchIntegrations(obj metav1.Object) map[reconcile.Request]struct{} { 141 | e.getClusterDeploymentGroupKind() 142 | 143 | cds := []*hivev1.ClusterDeployment{} 144 | for _, ref := range obj.GetOwnerReferences() { 145 | refGV, err := schema.ParseGroupVersion(ref.APIVersion) 146 | if err != nil { 147 | log.Error(err, "could not parse OwnerReference APIVersion", "api version", ref.APIVersion) 148 | return map[reconcile.Request]struct{}{} 149 | } 150 | 151 | if ref.Kind == e.groupKind.Kind && refGV.Group == e.groupKind.Group { 152 | cd := &hivev1.ClusterDeployment{} 153 | if err := e.Client.Get(context.TODO(), client.ObjectKey{Namespace: obj.GetNamespace(), Name: ref.Name}, cd); err != nil { 154 | log.Error(err, "could not get ClusterDeployment", "namespace", obj.GetNamespace(), "name", ref.Name) 155 | continue 156 | } 157 | cds = append(cds, cd) 158 | } 159 | } 160 | 161 | if len(cds) == 0 { 162 | return map[reconcile.Request]struct{}{} 163 | } 164 | 165 | reqs := map[reconcile.Request]struct{}{} 166 | dmiList := &deadmanssnitchv1alpha1.DeadmansSnitchIntegrationList{} 167 | if err := e.Client.List(context.TODO(), dmiList, &client.ListOptions{}); err != nil { 168 | log.Error(err, "could not list DeadmansSnitchIntegrations") 169 | return reqs 170 | } 171 | 172 | for _, dmi := range dmiList.Items { 173 | dmi := dmi 174 | selector, err := metav1.LabelSelectorAsSelector(&dmi.Spec.ClusterDeploymentSelector) 175 | if err != nil { 176 | log.Error(err, "could not build ClusterDeployment label selector") 177 | continue 178 | } 179 | for _, cd := range cds { 180 | if selector.Matches(labels.Set(cd.GetLabels())) { 181 | request := reconcile.Request{ 182 | NamespacedName: types.NamespacedName{ 183 | Name: dmi.Name, 184 | Namespace: dmi.Namespace, 185 | }, 186 | } 187 | reqs[request] = struct{}{} 188 | } 189 | } 190 | } 191 | 192 | return reqs 193 | } 194 | 195 | func (e *enqueueRequestForClusterDeploymentOwner) mapAndEnqueue(q workqueue.RateLimitingInterface, obj client.Object) { 196 | for req := range e.getAssociatedDeadmansSnitchIntegrations(obj) { 197 | q.Add(req) 198 | } 199 | } 200 | -------------------------------------------------------------------------------- /boilerplate/_lib/common.sh: -------------------------------------------------------------------------------- 1 | err() { 2 | echo "==ERROR== $@" >&2 3 | exit 1 4 | } 5 | 6 | banner() { 7 | echo 8 | echo "==============================" 9 | echo "$@" 10 | echo "==============================" 11 | } 12 | 13 | ## osdk_version BINARY 14 | # 15 | # Print the version of the specified operator-sdk BINARY 16 | osdk_version() { 17 | local osdk=$1 18 | # `operator-sdk version` output looks like 19 | # operator-sdk version: v0.8.2, commit: 28bd2b0d4fd25aa68e15d928ae09d3c18c3b51da 20 | # or 21 | # operator-sdk version: "v0.16.0", commit: "55f1446c5f472e7d8e308dcdf36d0d7fc44fc4fd", go version: "go1.13.8 linux/amd64" 22 | # Peel out the version number, accounting for the optional quotes. 23 | $osdk version | ${SED?} 's/operator-sdk version: "*\([^,"]*\)"*,.*/\1/' 24 | } 25 | 26 | ## opm_version BINARY 27 | # 28 | # Print the version of the specified opm BINARY 29 | opm_version() { 30 | local opm=$1 31 | # `opm version` output looks like: 32 | # Version: version.Version{OpmVersion:"v1.15.2", GitCommit:"fded0bf", BuildDate:"2020-11-18T14:21:24Z", GoOs:"darwin", GoArch:"amd64"} 33 | $opm version | ${SED?} 's/.*OpmVersion:"//;s/".*//' 34 | } 35 | 36 | ## grpcurl_version BINARY 37 | # 38 | # Print the version of the specified grpcurl BINARY 39 | grpcurl_version() { 40 | local grpcurl=$1 41 | # `grpcurl -version` output looks like: grpcurl 1.7.0 42 | $grpcurl -version 2>&1 | cut -d " " -f 2 43 | } 44 | 45 | ## repo_import REPODIR 46 | # 47 | # Print the qualified org/name of the current repository, e.g. 48 | # "openshift/wizbang-foo-operator". This relies on git remotes being set 49 | # reasonably. 50 | repo_name() { 51 | # Just strip off the first component of the import-ish path 52 | repo_import $1 | ${SED?} 's,^[^/]*/,,' 53 | } 54 | 55 | ## repo_import REPODIR 56 | # 57 | # Print the go import-ish path to the current repository, e.g. 58 | # "github.com/openshift/wizbang-foo-operator". This relies on git 59 | # remotes being set reasonably. 60 | repo_import() { 61 | # Account for remotes which are 62 | # - upstream or origin 63 | # - ssh ("git@host.com:org/name.git") or https ("https://host.com/org/name.git") 64 | (git -C $1 config --get remote.upstream.url || git -C $1 config --get remote.origin.url) | ${SED?} 's,git@\([^:]*\):,\1/,; s,https://,,; s/\.git$//' 65 | } 66 | 67 | ## current_branch REPO 68 | # 69 | # Outputs the name of the current branch in the REPO directory 70 | current_branch() { 71 | ( 72 | cd $1 73 | git rev-parse --abbrev-ref HEAD 74 | ) 75 | } 76 | 77 | ## image_exits_in_repo IMAGE_URI 78 | # 79 | # Checks whether IMAGE_URI -- e.g. quay.io/app-sre/osd-metrics-exporter:abcd123 80 | # -- exists in the remote repository. 81 | # If so, returns success. 82 | # If the image does not exist, but the query was otherwise successful, returns 83 | # failure. 84 | # If the query fails for any reason, prints an error and *exits* nonzero. 85 | image_exists_in_repo() { 86 | local image_uri=$1 87 | local output 88 | local rc 89 | 90 | local skopeo_stderr=$(mktemp) 91 | 92 | if ! command -v skopeo &>/dev/null; then 93 | echo "Failed to find the skopeo binary. If you are on Mac: brew install skopeo." >&2 94 | exit 1 95 | fi 96 | 97 | output=$(skopeo inspect docker://${image_uri} 2>$skopeo_stderr) 98 | rc=$? 99 | # So we can delete the temp file right away... 100 | stderr=$(cat $skopeo_stderr) 101 | rm -f $skopeo_stderr 102 | if [[ $rc -eq 0 ]]; then 103 | # The image exists. Sanity check the output. 104 | local digest=$(echo $output | jq -r .Digest) 105 | if [[ -z "$digest" ]]; then 106 | echo "Unexpected error: skopeo inspect succeeded, but output contained no .Digest" 107 | echo "Here's the output:" 108 | echo "$output" 109 | echo "...and stderr:" 110 | echo "$stderr" 111 | exit 1 112 | fi 113 | echo "Image ${image_uri} exists with digest $digest." 114 | return 0 115 | elif [[ "$output" == *"manifest unknown"* || "$stderr" == *"manifest unknown"* ]]; then 116 | # We were able to talk to the repository, but the tag doesn't exist. 117 | # This is the normal "green field" case. 118 | echo "Image ${image_uri} does not exist in the repository." 119 | return 1 120 | elif [[ "$output" == *"manifest unknown"* || "$stderr" == *"was deleted or has expired"* ]]; then 121 | # This should be rare, but accounts for cases where we had to 122 | # manually delete an image. 123 | echo "Image ${image_uri} was deleted from the repository." 124 | echo "Proceeding as if it never existed." 125 | return 1 126 | else 127 | # Any other error. For example: 128 | # - "unauthorized: access to the requested resource is not 129 | # authorized". This happens not just on auth errors, but if we 130 | # reference a repository that doesn't exist. 131 | # - "no such host". 132 | # - Network or other infrastructure failures. 133 | # In all these cases, we want to bail, because we don't know whether 134 | # the image exists (and we'd likely fail to push it anyway). 135 | echo "Error querying the repository for ${image_uri}:" 136 | echo "stdout: $output" 137 | echo "stderr: $stderr" 138 | exit 1 139 | fi 140 | } 141 | 142 | if [ "$BOILERPLATE_SET_X" ]; then 143 | set -x 144 | fi 145 | 146 | # Only used for error messages 147 | _lib=${BASH_SOURCE##*/} 148 | 149 | # When this lib is sourced (which is what it's designed for), $0 is the 150 | # script that did the sourcing. 151 | SOURCER=$(realpath $0) 152 | [[ -n "$SOURCER" ]] || err "$_lib couldn't discover where it was sourced from" 153 | 154 | HERE=${SOURCER%/*} 155 | [[ -n "$HERE" ]] || err "$_lib failed to discover the dirname of sourcer at $SOURCER" 156 | 157 | REPO_ROOT=$(git rev-parse --show-toplevel) 158 | [[ -n "$REPO_ROOT" ]] || err "$_lib couldn't discover the repo root" 159 | 160 | CONVENTION_ROOT=$REPO_ROOT/boilerplate 161 | [[ -d "$CONVENTION_ROOT" ]] || err "$CONVENTION_ROOT: not a directory" 162 | 163 | # Were we sourced from within a convention? 164 | if [[ "$HERE" == "$CONVENTION_ROOT/"* ]]; then 165 | # Okay, figure out the name of the convention 166 | CONVENTION_NAME=${HERE#$CONVENTION_ROOT/} 167 | # If we got here, we really expected to be able to identify the 168 | # convention name. 169 | [[ -n "$CONVENTION_NAME" ]] || err "$_lib couldn't discover the name of the sourcing convention" 170 | fi 171 | 172 | # Set SED variable 173 | if LANG=C sed --help 2>&1 | grep -q GNU; then 174 | SED="sed" 175 | elif command -v gsed &>/dev/null; then 176 | SED="gsed" 177 | else 178 | echo "Failed to find GNU sed as sed or gsed. If you are on Mac: brew install gnu-sed." >&2 179 | exit 1 180 | fi 181 | 182 | if [ -z "$BOILERPLATE_GIT_REPO" ]; then 183 | export BOILERPLATE_GIT_REPO=https://github.com/openshift/boilerplate.git 184 | fi 185 | 186 | # Base image repo url 187 | IMAGE_REPO=quay.io/redhat-services-prod/openshift 188 | # The namespace of the ImageStream by which prow will import the image. 189 | IMAGE_NAMESPACE=openshift 190 | IMAGE_NAME=boilerplate 191 | # LATEST_IMAGE_TAG may be set manually or by `update`, in which case 192 | # that's the value we want to use. 193 | if [[ -z "$LATEST_IMAGE_TAG" ]]; then 194 | # (Non-ancient) consumers will have the tag in this file. 195 | if [[ -f ${CONVENTION_ROOT}/_data/backing-image-tag ]]; then 196 | LATEST_IMAGE_TAG=$(cat ${CONVENTION_ROOT}/_data/backing-image-tag) 197 | 198 | # In boilerplate itself, we can discover the latest from git. 199 | elif [[ $(repo_name .) == openshift/boilerplate ]]; then 200 | LATEST_IMAGE_TAG=$(git describe --tags --abbrev=0 --match image-v*) 201 | fi 202 | fi 203 | # The public image location 204 | IMAGE_PULL_PATH=${IMAGE_PULL_PATH:-$IMAGE_REPO/$IMAGE_NAME:$LATEST_IMAGE_TAG} 205 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-operator/README.md: -------------------------------------------------------------------------------- 1 | # Conventions for OSD operators written in Go 2 | 3 | - [Conventions for OSD operators written in Go](#conventions-for-osd-operators-written-in-go) 4 | - [`make` targets and functions.](#make-targets-and-functions) 5 | - [Prow](#prow) 6 | - [Local Testing](#local-testing) 7 | - [app-sre](#app-sre) 8 | - [Code coverage](#code-coverage) 9 | - [Linting and other static analysis with `golangci-lint`](#linting-and-other-static-analysis-with-golangci-lint) 10 | - [Checks on generated code](#checks-on-generated-code) 11 | - [FIPS](#fips-federal-information-processing-standards) 12 | - [Additional deployment support](#additional-deployment-support) 13 | - [OLM SkipRange](#olm-skiprange) 14 | 15 | This convention is suitable for both cluster- and hive-deployed operators. 16 | 17 | The following components are included: 18 | 19 | ## `make` targets and functions. 20 | 21 | **Note:** Your repository's main `Makefile` needs to be edited to include the 22 | "nexus makefile include": 23 | 24 | ``` 25 | include boilerplate/generated-includes.mk 26 | ``` 27 | 28 | One of the primary purposes of these `make` targets is to allow you to 29 | standardize your prow and app-sre pipeline configurations using the 30 | following: 31 | 32 | ### Prow 33 | 34 | | Test name / `make` target | Purpose | 35 | | ------------------------- | --------------------------------------------------------------------------------------------------------------- | 36 | | `validate` | Ensure code generation has not been forgotten; and ensure generated and boilerplate code has not been modified. | 37 | | `lint` | Perform static analysis. | 38 | | `test` | "Local" unit and functional testing. | 39 | | `coverage` | [Code coverage](#code-coverage) analysis and reporting. | 40 | 41 | To standardize your prow configuration, you may run: 42 | 43 | ```shell 44 | $ make prow-config 45 | ``` 46 | 47 | If you already have the openshift/release repository cloned locally, you 48 | may specify its path via `$RELEASE_CLONE`: 49 | 50 | ```shell 51 | $ make RELEASE_CLONE=/home/me/github/openshift/release prow-config 52 | ``` 53 | 54 | This will generate a delta configuring prow to: 55 | 56 | - Build your `build/Dockerfile`. 57 | - Run the above targets in presubmit tests. 58 | - Run the `coverage` target in a postsubmit. This is the step that 59 | updates your coverage report in codecov.io. 60 | 61 | #### Local Testing 62 | 63 | You can run these `make` targets locally during development to test your 64 | code changes. However, differences in platforms and environments may 65 | lead to unpredictable results. Therefore boilerplate provides a utility 66 | to run targets in a container environment that is designed to be as 67 | similar as possible to CI: 68 | 69 | ```shell 70 | $ make container-{target} 71 | ``` 72 | 73 | or 74 | 75 | ```shell 76 | $ ./boilerplate/_lib/container-make {target} 77 | ``` 78 | 79 | ### app-sre 80 | 81 | The `build-push` target builds and pushes the operator and OLM registry images, 82 | ready to be SaaS-deployed. 83 | By default it is configured to be run from the app-sre jenkins pipelines. 84 | Consult [this doc](app-sre.md) for information on local execution/testing. 85 | 86 | ## Code coverage 87 | 88 | - A `codecov.sh` script, referenced by the `coverage` `make` target, to 89 | run code coverage analysis per [this SOP](https://github.com/openshift/ops-sop/blob/93d100347746ce04ad552591136818f82043c648/services/codecov.md). 90 | 91 | - A `.codecov.yml` configuration file for 92 | [codecov.io](https://docs.codecov.io/docs/codecov-yaml). Note that 93 | this is copied into the repository root, because that's 94 | [where codecov.io expects it](https://docs.codecov.io/docs/codecov-yaml#can-i-name-the-file-codecovyml). 95 | 96 | ## Linting and other static analysis with `golangci-lint` 97 | 98 | - A `go-check` `make` target, which 99 | - ensures the proper version of `golangci-lint` is installed, and 100 | - runs it against 101 | - a `golangci.yml` config. 102 | 103 | ## Checks on generated code 104 | 105 | The convention embeds default checks to ensure generated code generation is current, committed, and unaltered. 106 | To trigger the check, you can use `make generate-check` provided your Makefile properly includes the boilerplate-generated include `boilerplate/generated-includes.mk`. 107 | 108 | Checks consist of: 109 | 110 | - Checking all files are committed to ensure a safe point to revert to in case of error 111 | - Running the `make generate` command (see below) to regenerate the needed code 112 | - Checking if this results in any new uncommitted files in the git project or if all is clean. 113 | 114 | `make generate` does the following: 115 | 116 | - generate crds and deepcopy via controller-gen. This is a no-op if your 117 | operator has no APIs. 118 | - `openapi-gen`. This is a no-op if your operator has no APIs. 119 | - `go generate`. This is a no-op if you have no `//go:generate` 120 | directives in your code. 121 | 122 | ## FIPS (Federal Information Processing Standards) 123 | 124 | To enable FIPS in your build there is a `make ensure-fips` target. 125 | 126 | Add `FIPS_ENABLED=true` to your repos Makefile. Please ensure that this variable is added **before** including boilerplate Makefiles. 127 | 128 | e.g. 129 | 130 | ```.mk 131 | FIPS_ENABLED=true 132 | 133 | include boilerplate/generated-includes.mk 134 | ``` 135 | 136 | `ensure-fips` will add a [fips.go](./fips.go) file in the same directory as the `main.go` file. (Please commit this file as normal) 137 | 138 | `fips.go` will import the necessary packages to restrict all TLS configuration to FIPS-approved settings. 139 | 140 | With `FIPS_ENABLED=true`, `ensure-fips` is always run before `make go-build` 141 | 142 | ## Additional deployment support 143 | 144 | - The convention currently supports a maximum of two deployments. i.e. The operator deployment itself plus an optional additional deployment. 145 | - If an additional deployment image has to be built and appended to the CSV as part of the build process, then the consumer needs to: 146 | - Specify `SupplementaryImage` which is the deployment name in the consuming repository's `config/config.go`. 147 | - Define the image to be built as `ADDITIONAL_IMAGE_SPECS` in the consuming repository's Makefile, Boilerplate later parses this image as part of the build process; [ref](https://github.com/openshift/boilerplate/blob/master/boilerplate/openshift/golang-osd-operator/standard.mk#L56). 148 | 149 | e.g. 150 | 151 | ```.mk 152 | # Additional Deployment Image 153 | define ADDITIONAL_IMAGE_SPECS 154 | build/Dockerfile.webhook $(SUPPLEMENTARY_IMAGE_URI) 155 | end 156 | ``` 157 | - Ensure the CSV template of the consuming repository has the additional deployment name. 158 | 159 | ## OLM SkipRange 160 | 161 | - OLM currently doesn't support cross-catalog upgrades. 162 | - The convention standardizes the catalog repositories to adhere to the naming convention `${OPERATOR_NAME}-registry`. 163 | - For an existing operator that has been deployed looking to onboard Boilerplate is a problem. Once deployed, for an existing operator to upgrade to the new Boilerplate-deployed operator which refers to the new catalog registry with `staging/production` channels, OLM needs to support cross-catalog upgrades. 164 | - Cross catalog upgrades are only possible via [OLM Skiprange](https://v0-18-z.olm.operatorframework.io/docs/concepts/olm-architecture/operator-catalog/creating-an-update-graph/#skiprange). 165 | - The consumer can explictly enable OLM SkipRange for their operator by specifying `EnableOLMSkipRange="true"` in the repository's `config/config.go`. 166 | - If specified, the `olm.skipRange` annotation will be appended to the CSV during the build process creating an upgrade path for the operator. 167 | -------------------------------------------------------------------------------- /boilerplate/update: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # WARNING: THIS FILE IS MANAGED IN THE 'boilerplate' REPO AND COPIED TO OTHER REPOSITORIES. 4 | # ONLY EDIT THIS FILE FROM WITHIN THE 'boilerplate' REPOSITORY. 5 | # 6 | # TO OPT OUT OF UPDATES, SEE THE README. 7 | 8 | # This script updates itself, and then re-execs itself if it has 9 | # changed. This is in case updates to conventions rely on updates to this 10 | # script. 11 | 12 | set -e 13 | if [ "$BOILERPLATE_SET_X" ]; then 14 | set -x 15 | fi 16 | 17 | # The directory in which this script lives is the CONVENTION_ROOT. Export 18 | # this for individual `update` scripts. 19 | export CONVENTION_ROOT="$(realpath $(cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd ))" 20 | CONFIG_FILE="${CONVENTION_ROOT}/update.cfg" 21 | 22 | # Set SED variable 23 | if LANG=C sed --help 2>&1 | grep -q GNU; then 24 | SED="sed" 25 | elif command -v gsed &>/dev/null; then 26 | SED="gsed" 27 | else 28 | echo "Failed to find GNU sed as sed or gsed. If you are on Mac: brew install gnu-sed." >&2 29 | exit 1 30 | fi 31 | 32 | # scrubbed_conventions 33 | # Parses the $CONFIG_FILE and outputs a space-delimited list of conventions. 34 | scrubbed_conventions() { 35 | local conventions= 36 | 37 | while read convention junk; do 38 | # Skip comment lines (which can have leading whitespace) 39 | if [[ "$convention" == '#'* ]]; then 40 | continue 41 | fi 42 | 43 | # Skip blank or whitespace-only lines 44 | if [[ "$convention" == "" ]]; then 45 | continue 46 | fi 47 | 48 | # Lines like 49 | # valid/path other_junk 50 | # are not acceptable, unless `other_junk` is a comment 51 | if [[ "$junk" != "" ]] && [[ "$junk" != '#'* ]]; then 52 | echo "Invalid config! Only one convention is allowed per line. Found '$junk'" 53 | exit 1 54 | fi 55 | 56 | # This one is legit 57 | echo $convention 58 | 59 | done < "$CONFIG_FILE" 60 | } 61 | 62 | ## unacceptable_deltas [GREP_FLAGS] 63 | # 64 | # Looks for uncommitted changes in the current checkout, ignoring certain 65 | # things as noted below. 66 | # 67 | # If changes are found, they are printed (à la `git status --porcelain`) 68 | # and the function returns nonzero. 69 | # 70 | # If no changes are found, the function is silent and returns zero. 71 | # 72 | # Ignores: 73 | # This function ignores uncommitted changes to: 74 | # - Makefile: Editing this file is part of bootstrapping, and we don't 75 | # want to force an additional commit in the bootstrapping process. 76 | # - .gitattributes: This file is created as part of the bootstrapping 77 | # process. See above. 78 | # - ?? boilerplate/: I.e. the boilerplate subdirectory is brand new, 79 | # meaning you're bootstrapping. See above. 80 | # - boilerplate/update.cfg: Changing this file prior to an update is 81 | # part of the process of subscribing to new conventions. 82 | unacceptable_deltas() { 83 | ignores="Makefile|.gitattributes|boilerplate/(update\.cfg)?" 84 | git status --porcelain | grep -E -v $1 " ($ignores)$" 85 | } 86 | 87 | # We're initially invoked with no arguments. The branch below clones the 88 | # boilerplate repo at the latest level into a temporary directory and copies in 89 | # the update script (this script) and utilities back into the consuming repo. 90 | # Then it re-execs this script with the temporary directory as a CLI argument. 91 | if [[ -z "$1" ]]; then 92 | if unacceptable_deltas -q; then 93 | cat <"${NEXUS_MK}" 171 | # THIS FILE IS GENERATED BY BOILERPLATE. DO NOT EDIT. 172 | # This file automatically includes any *.mk files in your subscribed 173 | # conventions. Please ensure your base Makefile includes only this file. 174 | include boilerplate/_lib/boilerplate.mk 175 | EOF 176 | 177 | for convention in $(scrubbed_conventions $CONFIG_FILE); do 178 | dir_path="${BP_CLONE}/boilerplate/${convention}" 179 | # Make sure the directory exists 180 | if ! [[ -d "$dir_path" ]]; then 181 | echo "Invalid convention directory: '$convention'" 182 | exit 1 183 | fi 184 | 185 | echo "***********************************************************************************" 186 | echo "$convention is configured in update.cfg." 187 | echo "-----------------------------------------------------------------------------------" 188 | echo "syncing files from source." 189 | if [ -f "${dir_path}/update" ]; then 190 | # Always run the *new* update script 191 | mkdir -p "${CONVENTION_ROOT}/${convention}" 192 | rsync -a "${dir_path}/update" "${CONVENTION_ROOT}/${convention}" 193 | echo "executing ${CONVENTION_ROOT}/${convention}/update PRE" 194 | "${CONVENTION_ROOT}/${convention}/update" PRE 195 | fi 196 | rm -rf "${CONVENTION_ROOT}/${convention}" 197 | mkdir -p $(dirname "${CONVENTION_ROOT}/${convention}") 198 | rsync -a -r -L --delete "$dir_path" $(dirname "${CONVENTION_ROOT}/${convention}") 199 | if [ -f "${CONVENTION_ROOT}/${convention}/update" ]; then 200 | echo "executing ${CONVENTION_ROOT}/${convention}/update POST" 201 | "${CONVENTION_ROOT}/${convention}/update" POST 202 | fi 203 | echo "adding makefile includes." 204 | for inc in $(find "${CONVENTION_ROOT}/${convention}" -type f -name '*.mk' | sort); do 205 | echo "include ${inc#$REPO_ROOT/}" >> "${NEXUS_MK}" 206 | done 207 | echo "***********************************************************************************" 208 | echo "" 209 | done 210 | 211 | # (Create and) edit .gitattributes to 212 | # - override hiding boilerplate files related to freeze-check (so they 213 | # can't be hacked without you seeing it in the PR by default) 214 | # - unhide .gitattributes itself (so these rules can't be changed 215 | # without you seeing it in the PR by default) 216 | echo "Processing .gitattributes" 217 | gitattributes=${REPO_ROOT}/.gitattributes 218 | if [[ -f "${gitattributes}" ]]; then 219 | # Delete the previously generated section 220 | ${SED?} -i '/BEGIN BOILERPLATE GENERATED/,/END BOILERPLATE GENERATED/d' "${gitattributes}" 221 | fi 222 | # .gitattributes is processed in top-down order. Putting these entries at the 223 | # end ensures they take precedence over earlier, manual entries. 224 | cat <<'EOF'>>"${gitattributes}" 225 | ### BEGIN BOILERPLATE GENERATED -- DO NOT EDIT ### 226 | ### This block must be the last thing in your ### 227 | ### .gitattributes file; otherwise the 'validate' ### 228 | ### CI check will fail. ### 229 | # Used to ensure nobody mucked with boilerplate files. 230 | boilerplate/_lib/freeze-check linguist-generated=false 231 | # Show the boilerplate commit hash update. It's only one line anyway. 232 | boilerplate/_data/last-boilerplate-commit linguist-generated=false 233 | # Used by freeze-check. Good place for attackers to inject badness. 234 | boilerplate/update linguist-generated=false 235 | # Make sure attackers can't hide changes to this configuration 236 | .gitattributes linguist-generated=false 237 | ### END BOILERPLATE GENERATED ### 238 | EOF 239 | 240 | # If all that went well, record some metadata. 241 | mkdir -p ${CONVENTION_ROOT}/_data 242 | # - The last-boilerplate-commit file, which allows freeze-check to work. 243 | echo "Registering commit hash..." 244 | bp_commit=$(cd ${BP_CLONE} && git rev-parse HEAD) 245 | echo ${bp_commit} > ${CONVENTION_ROOT}/_data/last-boilerplate-commit 246 | 247 | # - The boilerplate backing image tag. This is used to run containerized 248 | # local builds/tests. 249 | echo "Registering image tag..." 250 | echo $LATEST_IMAGE_TAG > ${CONVENTION_ROOT}/_data/backing-image-tag 251 | 252 | echo "Done" 253 | --------------------------------------------------------------------------------