├── boilerplate ├── _data │ ├── backing-image-tag │ └── last-boilerplate-commit ├── update.cfg ├── openshift │ └── golang-osd-e2e │ │ ├── OWNERS │ │ ├── project.mk │ │ ├── e2e-template.yml │ │ ├── README.md │ │ ├── standard.mk │ │ └── update ├── _lib │ ├── boilerplate.mk │ ├── subscriber │ ├── subscriber-propose │ ├── subscriber-report │ ├── subscriber-report-onboarding │ ├── subscriber-report-pr │ ├── container-make │ ├── freeze-check │ ├── subscriber-propose-update │ ├── subscriber-report-release │ ├── release.sh │ └── subscriber.sh └── generated-includes.mk ├── .gitignore ├── config ├── package │ ├── managed-cluster-validating-webhooks-package.Containerfile │ └── manifest.yaml └── config.go ├── .github ├── renovate.json └── dependabot.yml ├── pkg ├── webhooks │ ├── add_node.go │ ├── add_pod.go │ ├── add_scc.go │ ├── add_hcpnamespace.go │ ├── add_service_hook.go │ ├── add_namespace_hook.go │ ├── add_regularuser.go │ ├── add_clusterrole.go │ ├── add_hiveownership.go │ ├── add_hostedcluster.go │ ├── add_manifestworks.go │ ├── add_podimagespec.go │ ├── add_sdnmigration.go │ ├── add_clusterlogging.go │ ├── add_ingressconfig_hook.go │ ├── add_prometheusrule.go │ ├── add_serviceaccount.go │ ├── add_networkpolicy.go │ ├── add_clusterrolebinding.go │ ├── add_hostedcontrolplane.go │ ├── add_ingresscontroller.go │ ├── add_imagecontentpolicies.go │ ├── add_techpreviewnoupgrade.go │ ├── add_customresourcedefinitions.go │ ├── utils │ │ ├── utils_test.go │ │ └── utils.go │ ├── register.go │ ├── techpreviewnoupgrade │ │ ├── techpreviewnoupgrade_test.go │ │ └── techpreviewnoupgrade.go │ ├── clusterlogging │ │ └── clusterlogging_test.go │ ├── serviceaccount │ │ └── serviceaccount_test.go │ ├── hostedcluster │ │ ├── hostedcluster_test.go │ │ └── hostedcluster.go │ ├── hostedcontrolplane │ │ ├── hostedcontrolplane_test.go │ │ └── hostedcontrolplane.go │ ├── clusterrole │ │ └── clusterrole_test.go │ ├── manifestworks │ │ ├── manifestworks_test.go │ │ └── manifestworks.go │ ├── hiveownership │ │ ├── hiveownership.go │ │ └── hiveownership_test.go │ ├── ingressconfig │ │ └── ingressconfig.go │ ├── hcpnamespace │ │ └── hcpnamespace_test.go │ └── scc │ │ └── scc_test.go ├── config │ ├── config.go │ ├── namespaces.go │ └── generate │ │ └── namespaces.go ├── localmetrics │ └── localmetrics.go ├── helpers │ ├── response.go │ └── response_test.go ├── k8sutil │ └── k8sutil.go ├── dispatcher │ └── dispatcher.go ├── testutils │ └── testutils.go └── syncset │ └── syncsetbylabelselector.go ├── OWNERS ├── cmd ├── fips.go └── main.go ├── hack ├── test.sh ├── documentation │ └── document.go └── templates │ └── 00-managed-cluster-validating-webhooks-hs.SelectorSyncSet.yaml.tmpl ├── .gitattributes ├── test └── e2e │ ├── README.md │ ├── validation_webhook_runner_test.go │ ├── Dockerfile │ └── e2e-template.yml ├── OWNERS_ALIASES ├── go.mod ├── docs ├── hypershift.md └── webhooks-short.json └── CLAUDE.md /boilerplate/_data/backing-image-tag: -------------------------------------------------------------------------------- 1 | image-v8.2.0 2 | -------------------------------------------------------------------------------- /boilerplate/update.cfg: -------------------------------------------------------------------------------- 1 | openshift/golang-osd-e2e 2 | -------------------------------------------------------------------------------- /boilerplate/_data/last-boilerplate-commit: -------------------------------------------------------------------------------- 1 | 27b681eaa783ae7183c53546a7194162a3041fa0 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .coverage 2 | /build/_output 3 | *.out 4 | /coverage.txt 5 | /.vscode 6 | *.code-workspace 7 | .idea 8 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-e2e/OWNERS: -------------------------------------------------------------------------------- 1 | reviewers: 2 | - srep-infra-cicd 3 | approvers: 4 | - srep-infra-cicd 5 | -------------------------------------------------------------------------------- /config/package/managed-cluster-validating-webhooks-package.Containerfile: -------------------------------------------------------------------------------- 1 | FROM scratch 2 | 3 | ADD config/package/*.yaml* /package/ 4 | -------------------------------------------------------------------------------- /.github/renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "extends": [ 4 | "github>openshift/boilerplate//.github/renovate.json" 5 | ] 6 | } 7 | -------------------------------------------------------------------------------- /boilerplate/_lib/boilerplate.mk: -------------------------------------------------------------------------------- 1 | .PHONY: boilerplate-commit 2 | boilerplate-commit: 3 | @boilerplate/_lib/boilerplate-commit 4 | 5 | .PHONY: boilerplate-freeze-check 6 | boilerplate-freeze-check: 7 | @boilerplate/_lib/freeze-check 8 | -------------------------------------------------------------------------------- /pkg/webhooks/add_node.go: -------------------------------------------------------------------------------- 1 | package webhooks 2 | 3 | import "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/node" 4 | 5 | func init() { 6 | Register(node.WebhookName, func() Webhook { return node.NewWebhook() }) 7 | } 8 | -------------------------------------------------------------------------------- /OWNERS: -------------------------------------------------------------------------------- 1 | reviewers: 2 | - joshbranham 3 | - srep-functional-team-rocket 4 | - srep-functional-team-thor 5 | 6 | approvers: 7 | - joshbranham 8 | - tnierman 9 | - srep-functional-leads 10 | - srep-team-leads 11 | - srep-functional-team-thor 12 | -------------------------------------------------------------------------------- /config/config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | const ( 4 | // I know this isn't the operator's name but so much stuff has been coded to use this... 5 | OperatorName = "validation-webhook" 6 | OperatorNamespace = "openshift-validation-webhook" 7 | ) 8 | -------------------------------------------------------------------------------- /pkg/webhooks/add_pod.go: -------------------------------------------------------------------------------- 1 | package webhooks 2 | 3 | import ( 4 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/pod" 5 | ) 6 | 7 | func init() { 8 | Register(pod.WebhookName, func() Webhook { return pod.NewWebhook() }) 9 | } 10 | -------------------------------------------------------------------------------- /pkg/webhooks/add_scc.go: -------------------------------------------------------------------------------- 1 | package webhooks 2 | 3 | import ( 4 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/scc" 5 | ) 6 | 7 | func init() { 8 | Register(scc.WebhookName, func() Webhook { return scc.NewWebhook() }) 9 | } 10 | -------------------------------------------------------------------------------- /pkg/webhooks/add_hcpnamespace.go: -------------------------------------------------------------------------------- 1 | package webhooks 2 | 3 | import webhooks "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/hcpnamespace" 4 | 5 | func init() { 6 | Register(webhooks.WebhookName, func() Webhook { return webhooks.NewWebhook() }) 7 | } 8 | -------------------------------------------------------------------------------- /pkg/webhooks/add_service_hook.go: -------------------------------------------------------------------------------- 1 | package webhooks 2 | 3 | import ( 4 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/service" 5 | ) 6 | 7 | func init() { 8 | Register(service.WebhookName, func() Webhook { return service.NewWebhook() }) 9 | } 10 | -------------------------------------------------------------------------------- /pkg/webhooks/add_namespace_hook.go: -------------------------------------------------------------------------------- 1 | package webhooks 2 | 3 | import ( 4 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/namespace" 5 | ) 6 | 7 | func init() { 8 | Register(namespace.WebhookName, func() Webhook { return namespace.NewWebhook() }) 9 | } 10 | -------------------------------------------------------------------------------- /pkg/webhooks/add_regularuser.go: -------------------------------------------------------------------------------- 1 | package webhooks 2 | 3 | import ( 4 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/regularuser/common" 5 | ) 6 | 7 | func init() { 8 | Register(common.WebhookName, func() Webhook { return common.NewWebhook() }) 9 | } 10 | -------------------------------------------------------------------------------- /pkg/webhooks/add_clusterrole.go: -------------------------------------------------------------------------------- 1 | package webhooks 2 | 3 | import ( 4 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/clusterrole" 5 | ) 6 | 7 | func init() { 8 | Register(clusterrole.WebhookName, func() Webhook { return clusterrole.NewWebhook() }) 9 | } 10 | -------------------------------------------------------------------------------- /pkg/webhooks/add_hiveownership.go: -------------------------------------------------------------------------------- 1 | package webhooks 2 | 3 | import ( 4 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/hiveownership" 5 | ) 6 | 7 | func init() { 8 | Register(hiveownership.WebhookName, func() Webhook { return hiveownership.NewWebhook() }) 9 | } 10 | -------------------------------------------------------------------------------- /pkg/webhooks/add_hostedcluster.go: -------------------------------------------------------------------------------- 1 | package webhooks 2 | 3 | import ( 4 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/hostedcluster" 5 | ) 6 | 7 | func init() { 8 | Register(hostedcluster.WebhookName, func() Webhook { return hostedcluster.NewWebhook() }) 9 | } 10 | -------------------------------------------------------------------------------- /pkg/webhooks/add_manifestworks.go: -------------------------------------------------------------------------------- 1 | package webhooks 2 | 3 | import ( 4 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/manifestworks" 5 | ) 6 | 7 | func init() { 8 | Register(manifestworks.WebhookName, func() Webhook { return manifestworks.NewWebhook() }) 9 | } 10 | -------------------------------------------------------------------------------- /pkg/webhooks/add_podimagespec.go: -------------------------------------------------------------------------------- 1 | package webhooks 2 | 3 | import ( 4 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/podimagespec" 5 | ) 6 | 7 | func init() { 8 | Register(podimagespec.WebhookName, func() Webhook { return podimagespec.NewWebhook() }) 9 | } 10 | -------------------------------------------------------------------------------- /pkg/webhooks/add_sdnmigration.go: -------------------------------------------------------------------------------- 1 | package webhooks 2 | 3 | import ( 4 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/sdnmigration" 5 | ) 6 | 7 | func init() { 8 | Register(sdnmigration.WebhookName, func() Webhook { return sdnmigration.NewWebhook() }) 9 | } 10 | -------------------------------------------------------------------------------- /pkg/webhooks/add_clusterlogging.go: -------------------------------------------------------------------------------- 1 | package webhooks 2 | 3 | import ( 4 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/clusterlogging" 5 | ) 6 | 7 | func init() { 8 | Register(clusterlogging.WebhookName, func() Webhook { return clusterlogging.NewWebhook() }) 9 | } 10 | -------------------------------------------------------------------------------- /pkg/webhooks/add_ingressconfig_hook.go: -------------------------------------------------------------------------------- 1 | package webhooks 2 | 3 | import ( 4 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/ingressconfig" 5 | ) 6 | 7 | func init() { 8 | Register(ingressconfig.WebhookName, func() Webhook { return ingressconfig.NewWebhook() }) 9 | } 10 | -------------------------------------------------------------------------------- /pkg/webhooks/add_prometheusrule.go: -------------------------------------------------------------------------------- 1 | package webhooks 2 | 3 | import ( 4 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/prometheusrule" 5 | ) 6 | 7 | func init() { 8 | Register(prometheusrule.WebhookName, func() Webhook { return prometheusrule.NewWebhook() }) 9 | } 10 | -------------------------------------------------------------------------------- /pkg/webhooks/add_serviceaccount.go: -------------------------------------------------------------------------------- 1 | package webhooks 2 | 3 | import ( 4 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/serviceaccount" 5 | ) 6 | 7 | func init() { 8 | Register(serviceaccount.WebhookName, func() Webhook { return serviceaccount.NewWebhook() }) 9 | } 10 | -------------------------------------------------------------------------------- /pkg/webhooks/add_networkpolicy.go: -------------------------------------------------------------------------------- 1 | package webhooks 2 | 3 | import ( 4 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/networkpolicies" 5 | ) 6 | 7 | func init() { 8 | Register(networkpolicies.WebhookName, func() Webhook { return networkpolicies.NewWebhook() }) 9 | } 10 | -------------------------------------------------------------------------------- /pkg/webhooks/add_clusterrolebinding.go: -------------------------------------------------------------------------------- 1 | package webhooks 2 | 3 | import ( 4 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/clusterrolebinding" 5 | ) 6 | 7 | func init() { 8 | Register(clusterrolebinding.WebhookName, func() Webhook { return clusterrolebinding.NewWebhook() }) 9 | } 10 | -------------------------------------------------------------------------------- /pkg/webhooks/add_hostedcontrolplane.go: -------------------------------------------------------------------------------- 1 | package webhooks 2 | 3 | import ( 4 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/hostedcontrolplane" 5 | ) 6 | 7 | func init() { 8 | Register(hostedcontrolplane.WebhookName, func() Webhook { return hostedcontrolplane.NewWebhook() }) 9 | } 10 | -------------------------------------------------------------------------------- /pkg/webhooks/add_ingresscontroller.go: -------------------------------------------------------------------------------- 1 | package webhooks 2 | 3 | import ( 4 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/ingresscontroller" 5 | ) 6 | 7 | func init() { 8 | Register(ingresscontroller.WebhookName, func() Webhook { return ingresscontroller.NewWebhook() }) 9 | } 10 | -------------------------------------------------------------------------------- /pkg/config/config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | //go:generate go run ./generate/namespaces.go 4 | import ( 5 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/utils" 6 | ) 7 | 8 | func IsPrivilegedNamespace(ns string) bool { 9 | return utils.RegexSliceContains(ns, PrivilegedNamespaces) 10 | } 11 | -------------------------------------------------------------------------------- /pkg/webhooks/add_imagecontentpolicies.go: -------------------------------------------------------------------------------- 1 | package webhooks 2 | 3 | import ( 4 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/imagecontentpolicies" 5 | ) 6 | 7 | func init() { 8 | Register(imagecontentpolicies.WebhookName, func() Webhook { return imagecontentpolicies.NewWebhook() }) 9 | } 10 | -------------------------------------------------------------------------------- /pkg/webhooks/add_techpreviewnoupgrade.go: -------------------------------------------------------------------------------- 1 | package webhooks 2 | 3 | import ( 4 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/techpreviewnoupgrade" 5 | ) 6 | 7 | func init() { 8 | Register(techpreviewnoupgrade.WebhookName, func() Webhook { return techpreviewnoupgrade.NewWebhook() }) 9 | } 10 | -------------------------------------------------------------------------------- /pkg/webhooks/add_customresourcedefinitions.go: -------------------------------------------------------------------------------- 1 | package webhooks 2 | 3 | import ( 4 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/customresourcedefinitions" 5 | ) 6 | 7 | func init() { 8 | Register(customresourcedefinitions.WebhookName, func() Webhook { return customresourcedefinitions.NewWebhook() }) 9 | } 10 | -------------------------------------------------------------------------------- /cmd/fips.go: -------------------------------------------------------------------------------- 1 | //go:build fips_enabled 2 | // +build fips_enabled 3 | 4 | // BOILERPLATE GENERATED -- DO NOT EDIT 5 | // Run 'make ensure-fips' to regenerate 6 | 7 | package main 8 | 9 | import ( 10 | _ "crypto/tls/fipsonly" 11 | "fmt" 12 | ) 13 | 14 | func init() { 15 | fmt.Println("***** Starting with FIPS crypto enabled *****") 16 | } 17 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-e2e/project.mk: -------------------------------------------------------------------------------- 1 | # Project specific values 2 | OPERATOR_NAME?=$(shell sed -n 's/.*OperatorName .*"\([^"]*\)".*/\1/p' config/config.go) 3 | 4 | E2E_IMAGE_REGISTRY?=quay.io 5 | E2E_IMAGE_REPOSITORY?=app-sre 6 | E2E_IMAGE_NAME?=$(OPERATOR_NAME)-e2e 7 | 8 | 9 | REGISTRY_USER?=$(QUAY_USER) 10 | REGISTRY_TOKEN?=$(QUAY_TOKEN) 11 | -------------------------------------------------------------------------------- /hack/test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | cd $(dirname $0)/../ 6 | 7 | echo "" > coverage.txt 8 | 9 | for d in $(go list ./... | grep -v vendor); do 10 | go test -race -coverprofile=profile.out -covermode=atomic $d 11 | if [ -f profile.out ]; then 12 | cat profile.out >> coverage.txt 13 | rm profile.out 14 | fi 15 | done -------------------------------------------------------------------------------- /boilerplate/_lib/subscriber: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | REPO_ROOT=$(git rev-parse --show-toplevel) 4 | source $REPO_ROOT/boilerplate/_lib/common.sh 5 | 6 | declare -A SUBCOMMANDS 7 | SUBCOMMANDS=( 8 | [propose]='Propose pull/merge requests for subscribers' 9 | [report]='Print information about subscribers' 10 | ) 11 | 12 | source $REPO_ROOT/boilerplate/_lib/subscriber.sh 13 | -------------------------------------------------------------------------------- /boilerplate/generated-includes.mk: -------------------------------------------------------------------------------- 1 | # THIS FILE IS GENERATED BY BOILERPLATE. DO NOT EDIT. 2 | # This file automatically includes any *.mk files in your subscribed 3 | # conventions. Please ensure your base Makefile includes only this file. 4 | include boilerplate/_lib/boilerplate.mk 5 | include boilerplate/openshift/golang-osd-e2e/project.mk 6 | include boilerplate/openshift/golang-osd-e2e/standard.mk 7 | -------------------------------------------------------------------------------- /boilerplate/_lib/subscriber-propose: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | REPO_ROOT=$(git rev-parse --show-toplevel) 4 | source $REPO_ROOT/boilerplate/_lib/common.sh 5 | 6 | declare -A SUBCOMMANDS 7 | SUBCOMMANDS=( 8 | # TODO: 9 | # [bootstrap]='Bootstrap a new subscriber' 10 | # [prow-config]='Propose standardized prow configuration to openshift/release' 11 | [update]='Update an already-onboarded subscriber' 12 | ) 13 | 14 | source $REPO_ROOT/boilerplate/_lib/subscriber.sh 15 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "docker" 4 | directory: "/build" 5 | labels: 6 | - "area/dependency" 7 | - "ok-to-test" 8 | schedule: 9 | interval: "weekly" 10 | ignore: 11 | - dependency-name: "app-sre/boilerplate" 12 | # don't upgrade boilerplate via these means 13 | - dependency-name: "openshift/origin-operator-registry" 14 | # don't upgrade origin-operator-registry via these means 15 | -------------------------------------------------------------------------------- /boilerplate/_lib/subscriber-report: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | REPO_ROOT=$(git rev-parse --show-toplevel) 4 | source $REPO_ROOT/boilerplate/_lib/common.sh 5 | 6 | declare -A SUBCOMMANDS 7 | SUBCOMMANDS=( 8 | [onboarding]='Prints a CSV report of onboarded boilerplate subscribers.' 9 | [pr]='Finds boilerplate-related pull requests for registered subscribers.' 10 | [release]='Checks openshift/release configuration for onboarded subscribers.' 11 | ) 12 | 13 | source $REPO_ROOT/boilerplate/_lib/subscriber.sh 14 | -------------------------------------------------------------------------------- /pkg/localmetrics/localmetrics.go: -------------------------------------------------------------------------------- 1 | package localmetrics 2 | 3 | import ( 4 | "github.com/prometheus/client_golang/prometheus" 5 | ) 6 | 7 | var ( 8 | MetricNodeWebhookBlockedReqeust = prometheus.NewCounterVec(prometheus.CounterOpts{ 9 | Name: "managed_webhook_node_blocked_request", 10 | Help: "Report how many times the managed node webhook has blocked requests", 11 | }, []string{"user"}) 12 | 13 | MetricsList = []prometheus.Collector{ 14 | MetricNodeWebhookBlockedReqeust, 15 | } 16 | ) 17 | 18 | func IncrementNodeWebhookBlockedRequest(user string) { 19 | MetricNodeWebhookBlockedReqeust.With(prometheus.Labels{"user": user}).Inc() 20 | } 21 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | ### BEGIN BOILERPLATE GENERATED -- DO NOT EDIT ### 2 | ### This block must be the last thing in your ### 3 | ### .gitattributes file; otherwise the 'validate' ### 4 | ### CI check will fail. ### 5 | # Used to ensure nobody mucked with boilerplate files. 6 | boilerplate/_lib/freeze-check linguist-generated=false 7 | # Show the boilerplate commit hash update. It's only one line anyway. 8 | boilerplate/_data/last-boilerplate-commit linguist-generated=false 9 | # Used by freeze-check. Good place for attackers to inject badness. 10 | boilerplate/update linguist-generated=false 11 | # Make sure attackers can't hide changes to this configuration 12 | .gitattributes linguist-generated=false 13 | ### END BOILERPLATE GENERATED ### 14 | -------------------------------------------------------------------------------- /test/e2e/README.md: -------------------------------------------------------------------------------- 1 | ## Locally running e2e test suite 2 | When updating your operator it's beneficial to add e2e tests for new functionality AND ensure existing functionality is not breaking using e2e tests. 3 | To do this, following steps are recommended 4 | 5 | 1. Run "make e2e-binary-build" to make sure e2e tests build 6 | 2. Deploy your new version of operator in a test cluster 7 | 3. Run "go install github.com/onsi/ginkgo/ginkgo@latest" 8 | 4. Get kubeadmin credentials from your cluster using 9 | 10 | ocm get /api/clusters_mgmt/v1/clusters/(cluster-id)/credentials | jq -r .kubeconfig > /(path-to)/kubeconfig 11 | 12 | 5. Run test suite using 13 | 14 | DISABLE_JUNIT_REPORT=true KUBECONFIG=/(path-to)/kubeconfig ./(path-to)/bin/ginkgo --tags=osde2e -v test/e2e 15 | -------------------------------------------------------------------------------- /test/e2e/validation_webhook_runner_test.go: -------------------------------------------------------------------------------- 1 | // THIS FILE IS GENERATED BY BOILERPLATE. DO NOT EDIT. 2 | //go:build osde2e 3 | 4 | package osde2etests 5 | 6 | import ( 7 | "os" 8 | "path/filepath" 9 | "testing" 10 | 11 | . "github.com/onsi/ginkgo/v2" 12 | . "github.com/onsi/gomega" 13 | ) 14 | 15 | const ( 16 | testResultsDirectory = "/test-run-results" 17 | jUnitOutputFilename = "junit-validation-webhook.xml" 18 | ) 19 | 20 | // Test entrypoint. osde2e runs this as a test suite on test pod. 21 | func TestValidationWebhook(t *testing.T) { 22 | RegisterFailHandler(Fail) 23 | suiteConfig, reporterConfig := GinkgoConfiguration() 24 | if _, ok := os.LookupEnv("DISABLE_JUNIT_REPORT"); !ok { 25 | reporterConfig.JUnitReport = filepath.Join(testResultsDirectory, jUnitOutputFilename) 26 | } 27 | RunSpecs(t, "Validation Webhook", suiteConfig, reporterConfig) 28 | } 29 | -------------------------------------------------------------------------------- /config/package/manifest.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: manifests.package-operator.run/v1alpha1 2 | kind: PackageManifest 3 | metadata: 4 | name: validation-webhook 5 | spec: 6 | scopes: 7 | - Namespaced 8 | phases: 9 | - name: config 10 | - name: rbac 11 | - name: deploy 12 | - name: webhooks 13 | class: hosted-cluster 14 | config: 15 | openAPIV3Schema: 16 | properties: 17 | serviceca: 18 | description: Service Certificate Authority used for webhook client authentication 19 | type: string 20 | required: 21 | - serviceca 22 | type: object 23 | availabilityProbes: 24 | - probes: 25 | - condition: 26 | type: Available 27 | status: "True" 28 | - fieldsEqual: 29 | fieldA: .status.updatedReplicas 30 | fieldB: .status.replicas 31 | selector: 32 | kind: 33 | group: apps 34 | kind: Deployment 35 | -------------------------------------------------------------------------------- /test/e2e/Dockerfile: -------------------------------------------------------------------------------- 1 | # THIS FILE IS GENERATED BY BOILERPLATE. DO NOT EDIT. 2 | FROM registry.access.redhat.com/ubi9/go-toolset:1.24 as builder 3 | WORKDIR /opt/app-root/src 4 | COPY . . 5 | RUN CGO_ENABLED=0 GOFLAGS="-mod=mod" go test ./test/e2e -v -c --tags=osde2e -o e2e.test 6 | 7 | FROM registry.access.redhat.com/ubi8/ubi-minimal:latest 8 | COPY --from=builder /opt/app-root/src/e2e.test e2e.test 9 | 10 | LABEL com.redhat.component="managed-cluster-validating-webhooks-e2e-container" \ 11 | name="managed-cluster-validating-webhooks-e2e" \ 12 | version="1.0" \ 13 | release="1" \ 14 | summary="E2E tests for Managed Cluster Validating Webhooks" \ 15 | description="End-to-end tests for validating admission webhooks for OpenShift" \ 16 | io.k8s.description="End-to-end tests for validating admission webhooks for OpenShift" \ 17 | io.k8s.display-name="Managed Cluster Validating Webhooks E2E" \ 18 | io.openshift.tags="openshift,webhooks,validation,e2e,tests" 19 | 20 | ENTRYPOINT [ "/e2e.test" ] 21 | -------------------------------------------------------------------------------- /pkg/helpers/response.go: -------------------------------------------------------------------------------- 1 | package helpers 2 | 3 | import ( 4 | "encoding/json" 5 | "io" 6 | "net/http" 7 | 8 | admissionapi "k8s.io/api/admission/v1" 9 | logf "sigs.k8s.io/controller-runtime/pkg/log" 10 | admissionctl "sigs.k8s.io/controller-runtime/pkg/webhook/admission" 11 | ) 12 | 13 | var log = logf.Log.WithName("response_helper") 14 | 15 | // SendResponse Send the AdmissionReview. 16 | func SendResponse(w io.Writer, resp admissionctl.Response) { 17 | 18 | // Apply ownership annotation to allow for granular alerts for 19 | // manipulation of SREP owned webhooks. 20 | resp.AuditAnnotations = map[string]string{ 21 | "owner": "srep-managed-webhook", 22 | } 23 | 24 | encoder := json.NewEncoder(w) 25 | responseAdmissionReview := admissionapi.AdmissionReview{ 26 | Response: &resp.AdmissionResponse, 27 | } 28 | responseAdmissionReview.APIVersion = admissionapi.SchemeGroupVersion.String() 29 | responseAdmissionReview.Kind = "AdmissionReview" 30 | err := encoder.Encode(responseAdmissionReview) 31 | // TODO (lisa): handle this in a non-recursive way (why would the second one succeed)? 32 | if err != nil { 33 | log.Error(err, "Failed to encode Response", "response", resp) 34 | SendResponse(w, admissionctl.Errored(http.StatusInternalServerError, err)) 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /boilerplate/_lib/subscriber-report-onboarding: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | REPO_ROOT=$(git rev-parse --show-toplevel) 4 | source $REPO_ROOT/boilerplate/_lib/common.sh 5 | 6 | usage() { 7 | cat </dev/null 52 | -------------------------------------------------------------------------------- /OWNERS_ALIASES: -------------------------------------------------------------------------------- 1 | # ================================ NOTICE ==================================== 2 | # This file is sourced from https://github.com/openshift/boilerplate 3 | # However, this repository is not currently subscribed to boilerplate, so manual updates are required 4 | # See the OWNERS_ALIASES docs: https://git.k8s.io/community/contributors/guide/owners.md#OWNERS_ALIASES 5 | # ============================================================================= 6 | aliases: 7 | srep-functional-team-aurora: 8 | - abyrne55 9 | - dakotalongRH 10 | - joshbranham 11 | - luis-falcon 12 | - reedcort 13 | srep-functional-team-fedramp: 14 | - tonytheleg 15 | - theautoroboto 16 | - rhdedgar 17 | - katherinelc321 18 | - rojasreinold 19 | - fsferraz-rh 20 | srep-functional-team-hulk: 21 | - a7vicky 22 | - ravitri 23 | - shitaljante 24 | - devppratik 25 | - Tafhim 26 | - tkong-redhat 27 | - TheUndeadKing 28 | - vaidehi411 29 | - chamalabey 30 | srep-functional-team-orange: 31 | - bergmannf 32 | - Makdaam 33 | - Nikokolas3270 34 | - RaphaelBut 35 | - MateSaary 36 | - rolandmkunkel 37 | - petrkotas 38 | - zmird-r 39 | - evlin-rh 40 | - hectorakemp 41 | srep-functional-team-rocket: 42 | - aliceh 43 | - anispate 44 | - clcollins 45 | - Mhodesty 46 | - nephomaniac 47 | - tnierman 48 | srep-functional-team-security: 49 | - jaybeeunix 50 | - sam-nguyen7 51 | - wshearn 52 | - dem4gus 53 | - npecka 54 | - pshickeydev 55 | - casey-williams-rh 56 | - boranx 57 | srep-functional-team-thor: 58 | - bmeng 59 | - diakovnec 60 | - MitaliBhalla 61 | - feichashao 62 | - samanthajayasinghe 63 | - xiaoyu74 64 | - Dee-6777 65 | - Tessg22 66 | - smarthall 67 | srep-infra-cicd: 68 | - mmazur 69 | - mrsantamaria 70 | - ritmun 71 | - jbpratt 72 | - yiqinzhang 73 | srep-functional-leads: 74 | - abyrne55 75 | - clcollins 76 | - Nikokolas3270 77 | - theautoroboto 78 | - smarthall 79 | - sam-nguyen7 80 | - ravitri 81 | srep-team-leads: 82 | - rafael-azevedo 83 | - iamkirkbater 84 | - rogbas 85 | - dustman9000 86 | - wanghaoran1988 87 | - bng0y 88 | - bmeng 89 | - typeid 90 | sre-group-leads: 91 | - apahim 92 | - maorfr 93 | - rogbas 94 | srep-architects: 95 | - jharrington22 96 | - cblecker 97 | -------------------------------------------------------------------------------- /pkg/k8sutil/k8sutil.go: -------------------------------------------------------------------------------- 1 | package k8sutil 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "strings" 7 | 8 | "k8s.io/apimachinery/pkg/runtime" 9 | "k8s.io/client-go/rest" 10 | "k8s.io/client-go/tools/clientcmd" 11 | "sigs.k8s.io/controller-runtime/pkg/client" 12 | logf "sigs.k8s.io/controller-runtime/pkg/log" 13 | ) 14 | 15 | type RunModeType string 16 | 17 | const ( 18 | LocalRunMode RunModeType = "local" 19 | ClusterRunMode RunModeType = "cluster" 20 | 21 | OperatorNameEnvVar = "OPERATOR_NAME" 22 | ) 23 | 24 | var ( 25 | log = logf.Log.WithName("k8sutil") 26 | 27 | ForceRunModeEnv = "OSDK_FORCE_RUN_MODE" 28 | ErrNoNamespace = fmt.Errorf("namespace not found for current environment") 29 | ErrRunLocal = fmt.Errorf("operator run mode forced to local") 30 | ) 31 | 32 | func buildConfig(kubeconfig string) (*rest.Config, error) { 33 | // Try loading KUBECONFIG env var. If not set fallback on InClusterConfig 34 | 35 | if kubeconfig != "" { 36 | cfg, err := clientcmd.BuildConfigFromFlags("", kubeconfig) 37 | if err != nil { 38 | return nil, err 39 | } 40 | return cfg, nil 41 | } 42 | 43 | cfg, err := rest.InClusterConfig() 44 | if err != nil { 45 | return nil, err 46 | } 47 | return cfg, nil 48 | } 49 | 50 | // KubeClient creates a new kubeclient that interacts with the Kube api with the service account secrets 51 | func KubeClient(s *runtime.Scheme) (client.Client, error) { 52 | // Try loading KUBECONFIG env var. Else falls back on in-cluster config 53 | config, err := buildConfig(os.Getenv("KUBECONFIG")) 54 | if err != nil { 55 | return nil, err 56 | } 57 | 58 | c, err := client.New(config, client.Options{ 59 | Scheme: s, 60 | }) 61 | if err != nil { 62 | return nil, err 63 | } 64 | return c, nil 65 | } 66 | 67 | func isRunModeLocal() bool { 68 | return os.Getenv(ForceRunModeEnv) == string(LocalRunMode) 69 | } 70 | 71 | // GetOperatorNamespace returns the namespace the operator should be running in. 72 | func GetOperatorNamespace() (string, error) { 73 | if isRunModeLocal() { 74 | return "", ErrRunLocal 75 | } 76 | nsBytes, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace") 77 | if err != nil { 78 | if os.IsNotExist(err) { 79 | return "", ErrNoNamespace 80 | } 81 | return "", err 82 | } 83 | ns := strings.TrimSpace(string(nsBytes)) 84 | log.V(1).Info("Found namespace", "Namespace", ns) 85 | return ns, nil 86 | } 87 | 88 | // GetOperatorName return the operator name 89 | func GetOperatorName() (string, error) { 90 | operatorName, found := os.LookupEnv(OperatorNameEnvVar) 91 | if !found { 92 | return "", fmt.Errorf("%s must be set", OperatorNameEnvVar) 93 | } 94 | if len(operatorName) == 0 { 95 | return "", fmt.Errorf("%s must not be empty", OperatorNameEnvVar) 96 | } 97 | return operatorName, nil 98 | } 99 | -------------------------------------------------------------------------------- /pkg/webhooks/register.go: -------------------------------------------------------------------------------- 1 | package webhooks 2 | 3 | import ( 4 | admissionregv1 "k8s.io/api/admissionregistration/v1" 5 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 6 | admissionctl "sigs.k8s.io/controller-runtime/pkg/webhook/admission" 7 | ) 8 | 9 | type RegisteredWebhooks map[string]WebhookFactory 10 | 11 | // Webhooks are all registered webhooks mapping name to hook 12 | var Webhooks = RegisteredWebhooks{} 13 | 14 | // Webhook interface 15 | type Webhook interface { 16 | // Authorized will determine if the request is allowed 17 | Authorized(request admissionctl.Request) admissionctl.Response 18 | // GetURI returns the URI for the webhook 19 | GetURI() string 20 | // Validate will validate the incoming request 21 | Validate(admissionctl.Request) bool 22 | // Name is the name of the webhook 23 | Name() string 24 | // FailurePolicy is how the hook config should react if k8s can't access it 25 | // https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#failure-policy 26 | FailurePolicy() admissionregv1.FailurePolicyType 27 | // MatchPolicy mirrors validatingwebhookconfiguration.webhooks[].matchPolicy. 28 | // If it is important to the webhook, be sure to check subResource vs 29 | // requestSubResource. 30 | // https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-matchpolicy 31 | MatchPolicy() admissionregv1.MatchPolicyType 32 | // Rules is a slice of rules on which this hook should trigger 33 | Rules() []admissionregv1.RuleWithOperations 34 | // ObjectSelector uses a *metav1.LabelSelector to augment the webhook's 35 | // Rules() to match only on incoming requests which match the specific 36 | // LabelSelector. 37 | ObjectSelector() *metav1.LabelSelector 38 | // SideEffects are what side effects, if any, this hook has. Refer to 39 | // https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#side-effects 40 | SideEffects() admissionregv1.SideEffectClass 41 | // TimeoutSeconds returns an int32 representing how long to wait for this hook to complete 42 | // The timeout value must be between 1 and 30 seconds. 43 | // https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#timeouts 44 | TimeoutSeconds() int32 45 | // Doc returns a string for end-customer documentation purposes. 46 | Doc() string 47 | // SyncSetLabelSelector returns the label selector to use in the SyncSet. 48 | // Return utils.DefaultLabelSelector() to stick with the default 49 | SyncSetLabelSelector() metav1.LabelSelector 50 | // ClassicEnabled will return true if the webhook should be deployed to OSD/ROSA Classic clusters 51 | ClassicEnabled() bool 52 | // HypershiftEnabled will return true if the webhook should be deployed to ROSA HCP clusters 53 | HypershiftEnabled() bool 54 | } 55 | 56 | // WebhookFactory return a kind of Webhook 57 | type WebhookFactory func() Webhook 58 | 59 | // Register webhooks 60 | func Register(name string, input WebhookFactory) { 61 | Webhooks[name] = input 62 | } 63 | -------------------------------------------------------------------------------- /test/e2e/e2e-template.yml: -------------------------------------------------------------------------------- 1 | # THIS FILE IS GENERATED BY BOILERPLATE. DO NOT EDIT. 2 | apiVersion: template.openshift.io/v1 3 | kind: Template 4 | metadata: 5 | name: osde2e-focused-tests 6 | parameters: 7 | - name: OSDE2E_CONFIGS 8 | required: true 9 | - name: TEST_IMAGE 10 | required: true 11 | - name: OCM_CLIENT_ID 12 | required: false 13 | - name: OCM_CLIENT_SECRET 14 | required: false 15 | - name: OCM_CCS 16 | required: false 17 | - name: AWS_ACCESS_KEY_ID 18 | required: false 19 | - name: AWS_SECRET_ACCESS_KEY 20 | required: false 21 | - name: CLOUD_PROVIDER_REGION 22 | required: false 23 | - name: GCP_CREDS_JSON 24 | required: false 25 | - name: JOBID 26 | generate: expression 27 | from: "[0-9a-z]{7}" 28 | - name: IMAGE_TAG 29 | value: '' 30 | required: true 31 | - name: LOG_BUCKET 32 | value: 'osde2e-logs' 33 | - name: USE_EXISTING_CLUSTER 34 | value: 'TRUE' 35 | - name: CAD_PAGERDUTY_ROUTING_KEY 36 | required: false 37 | objects: 38 | - apiVersion: batch/v1 39 | kind: Job 40 | metadata: 41 | name: osde2e-validation-webhook-${IMAGE_TAG}-${JOBID} 42 | spec: 43 | backoffLimit: 0 44 | template: 45 | spec: 46 | restartPolicy: Never 47 | containers: 48 | - name: osde2e 49 | image: quay.io/redhat-services-prod/osde2e-cicada-tenant/osde2e:latest 50 | command: 51 | - /osde2e 52 | args: 53 | - test 54 | - --only-health-check-nodes 55 | - --skip-destroy-cluster 56 | - --skip-must-gather 57 | - --configs 58 | - ${OSDE2E_CONFIGS} 59 | securityContext: 60 | runAsNonRoot: true 61 | allowPrivilegeEscalation: false 62 | capabilities: 63 | drop: ["ALL"] 64 | seccompProfile: 65 | type: RuntimeDefault 66 | env: 67 | - name: AD_HOC_TEST_IMAGES 68 | value: ${TEST_IMAGE}:${IMAGE_TAG} 69 | - name: OCM_CLIENT_ID 70 | value: ${OCM_CLIENT_ID} 71 | - name: OCM_CLIENT_SECRET 72 | value: ${OCM_CLIENT_SECRET} 73 | - name: OCM_CCS 74 | value: ${OCM_CCS} 75 | - name: AWS_ACCESS_KEY_ID 76 | value: ${AWS_ACCESS_KEY_ID} 77 | - name: AWS_SECRET_ACCESS_KEY 78 | value: ${AWS_SECRET_ACCESS_KEY} 79 | - name: CLOUD_PROVIDER_REGION 80 | value: ${CLOUD_PROVIDER_REGION} 81 | - name: GCP_CREDS_JSON 82 | value: ${GCP_CREDS_JSON} 83 | - name: LOG_BUCKET 84 | value: ${LOG_BUCKET} 85 | - name: USE_EXISTING_CLUSTER 86 | value: ${USE_EXISTING_CLUSTER} 87 | - name: CAD_PAGERDUTY_ROUTING_KEY 88 | value: ${CAD_PAGERDUTY_ROUTING_KEY} 89 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-e2e/e2e-template.yml: -------------------------------------------------------------------------------- 1 | # THIS FILE IS GENERATED BY BOILERPLATE. DO NOT EDIT. 2 | apiVersion: template.openshift.io/v1 3 | kind: Template 4 | metadata: 5 | name: osde2e-focused-tests 6 | parameters: 7 | - name: OSDE2E_CONFIGS 8 | required: true 9 | - name: TEST_IMAGE 10 | required: true 11 | - name: OCM_CLIENT_ID 12 | required: false 13 | - name: OCM_CLIENT_SECRET 14 | required: false 15 | - name: OCM_CCS 16 | required: false 17 | - name: AWS_ACCESS_KEY_ID 18 | required: false 19 | - name: AWS_SECRET_ACCESS_KEY 20 | required: false 21 | - name: CLOUD_PROVIDER_REGION 22 | required: false 23 | - name: GCP_CREDS_JSON 24 | required: false 25 | - name: JOBID 26 | generate: expression 27 | from: "[0-9a-z]{7}" 28 | - name: IMAGE_TAG 29 | value: '' 30 | required: true 31 | - name: LOG_BUCKET 32 | value: 'osde2e-logs' 33 | - name: USE_EXISTING_CLUSTER 34 | value: 'TRUE' 35 | - name: CAD_PAGERDUTY_ROUTING_KEY 36 | required: false 37 | objects: 38 | - apiVersion: batch/v1 39 | kind: Job 40 | metadata: 41 | name: osde2e-${OPERATOR_NAME}-${IMAGE_TAG}-${JOBID} 42 | spec: 43 | backoffLimit: 0 44 | template: 45 | spec: 46 | restartPolicy: Never 47 | containers: 48 | - name: osde2e 49 | image: quay.io/redhat-services-prod/osde2e-cicada-tenant/osde2e:latest 50 | command: 51 | - /osde2e 52 | args: 53 | - test 54 | - --only-health-check-nodes 55 | - --skip-destroy-cluster 56 | - --skip-must-gather 57 | - --configs 58 | - ${OSDE2E_CONFIGS} 59 | securityContext: 60 | runAsNonRoot: true 61 | allowPrivilegeEscalation: false 62 | capabilities: 63 | drop: ["ALL"] 64 | seccompProfile: 65 | type: RuntimeDefault 66 | env: 67 | - name: AD_HOC_TEST_IMAGES 68 | value: ${TEST_IMAGE}:${IMAGE_TAG} 69 | - name: OCM_CLIENT_ID 70 | value: ${OCM_CLIENT_ID} 71 | - name: OCM_CLIENT_SECRET 72 | value: ${OCM_CLIENT_SECRET} 73 | - name: OCM_CCS 74 | value: ${OCM_CCS} 75 | - name: AWS_ACCESS_KEY_ID 76 | value: ${AWS_ACCESS_KEY_ID} 77 | - name: AWS_SECRET_ACCESS_KEY 78 | value: ${AWS_SECRET_ACCESS_KEY} 79 | - name: CLOUD_PROVIDER_REGION 80 | value: ${CLOUD_PROVIDER_REGION} 81 | - name: GCP_CREDS_JSON 82 | value: ${GCP_CREDS_JSON} 83 | - name: LOG_BUCKET 84 | value: ${LOG_BUCKET} 85 | - name: USE_EXISTING_CLUSTER 86 | value: ${USE_EXISTING_CLUSTER} 87 | - name: CAD_PAGERDUTY_ROUTING_KEY 88 | value: ${CAD_PAGERDUTY_ROUTING_KEY} 89 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-e2e/README.md: -------------------------------------------------------------------------------- 1 | # Conventions for Ginkgo based e2e tests 2 | 3 | - [Conventions for Ginkgo based e2e tests](#conventions-for-ginkgo-based-e2e-tests) 4 | - [Consuming](#consuming) 5 | - [`make` targets and functions.](#make-targets-and-functions) 6 | - [E2E Test](#e2e-test) 7 | - [Local Testing](#e2e-local-testing) 8 | 9 | ## Consuming 10 | Currently, this convention is only intended for OSD operators. To adopt this convention, your `boilerplate/update.cfg` should include: 11 | 12 | ``` 13 | openshift/golang-osd-e2e 14 | ``` 15 | 16 | ## `make` targets and functions. 17 | 18 | **Note:** Your repository's main `Makefile` needs to be edited to include: 19 | 20 | ``` 21 | include boilerplate/generated-includes.mk 22 | ``` 23 | 24 | One of the primary purposes of these `make` targets is to allow you to 25 | standardize your prow and app-sre pipeline configurations using the 26 | following: 27 | 28 | ### E2e Test 29 | 30 | | `make` target | Purpose | 31 | |------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| 32 | | `e2e-binary-build` | Compiles ginkgo tests under test/e2e and creates the ginkgo binary. | 33 | | `e2e-image-build-push` | Builds e2e image and pushes to operator's quay repo. Image name is defaulted to -test-harness. Quay repository must be created beforehand. | 34 | 35 | #### E2E Local Testing 36 | 37 | Please follow [this README](https://github.com/openshift/ops-sop/blob/master/v4/howto/osde2e/operator-test-harnesses.md#using-ginkgo) to run your e2e tests locally 38 | 39 | -------------------------------------------------------------------------------- /pkg/helpers/response_test.go: -------------------------------------------------------------------------------- 1 | package helpers 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "fmt" 7 | "net/http" 8 | "testing" 9 | 10 | admissionapi "k8s.io/api/admission/v1" 11 | "k8s.io/apimachinery/pkg/types" 12 | admissionctl "sigs.k8s.io/controller-runtime/pkg/webhook/admission" 13 | ) 14 | 15 | func makeBuffer() *bytes.Buffer { 16 | return new(bytes.Buffer) 17 | } 18 | 19 | func formatOutput(s string) string { 20 | return fmt.Sprintf("%s\n", s) 21 | } 22 | 23 | func makeResponseObj(uid string, allowed bool, e error) *admissionctl.Response { 24 | if e == nil { 25 | return &admissionctl.Response{ 26 | AdmissionResponse: admissionapi.AdmissionResponse{ 27 | UID: types.UID(uid), 28 | Allowed: allowed, 29 | }, 30 | } 31 | } else { 32 | n := admissionctl.Errored(http.StatusBadRequest, e) 33 | return &n 34 | } 35 | } 36 | 37 | func TestBadResponse(t *testing.T) { 38 | t.Skip("Not quite sure how to test json encoding error") 39 | } 40 | 41 | func TestResponse(t *testing.T) { 42 | tests := []struct { 43 | allowed bool 44 | uid string 45 | e error 46 | status int32 47 | expectedResult string 48 | }{ 49 | { 50 | allowed: true, 51 | uid: "test-uid", 52 | e: nil, 53 | status: http.StatusOK, 54 | // the writer sends a newline 55 | expectedResult: formatOutput(`{"kind":"AdmissionReview","apiVersion":"admission.k8s.io/v1","response":{"uid":"test-uid","allowed":true,"auditAnnotations":{"owner":"srep-managed-webhook"}}}`), 56 | }, 57 | { 58 | allowed: false, 59 | uid: "test-fail-with-error", 60 | e: fmt.Errorf("request body is empty"), 61 | status: http.StatusBadRequest, 62 | expectedResult: formatOutput(`{"kind":"AdmissionReview","apiVersion":"admission.k8s.io/v1","response":{"uid":"","allowed":false,"status":{"metadata":{},"message":"request body is empty","code":400},"auditAnnotations":{"owner":"srep-managed-webhook"}}}`), 63 | }, 64 | } 65 | for _, test := range tests { 66 | buf := makeBuffer() 67 | respObj := makeResponseObj(test.uid, test.allowed, test.e) 68 | SendResponse(buf, *respObj) 69 | if buf.String() != test.expectedResult { 70 | t.Fatalf("Expected to have `%s` but got `%s`", test.expectedResult, buf.String()) 71 | } 72 | decodedResult := &admissionapi.AdmissionReview{} 73 | err := json.Unmarshal([]byte(buf.String()), decodedResult) 74 | if err != nil { 75 | t.Errorf("Couldn't unmarshal the JSON blob: %s", err.Error()) 76 | } 77 | t.Logf("Response body = %s", buf.String()) 78 | 79 | if test.e != nil { 80 | if test.status == http.StatusOK { 81 | t.Errorf("It is weird to have an error result and a 200 OK. Check test's status field.") 82 | } 83 | // check for the Response.Result 84 | if decodedResult.Response.Result == nil { 85 | t.Fatalf("Error responses need a Response.Result, and this one didn't have one") 86 | } else { 87 | if decodedResult.Response.Result.Code != test.status { 88 | t.Fatalf("Expected HTTP status code of the Result to be %d, but got %d instead", test.status, decodedResult.Response.Result.Code) 89 | } 90 | } 91 | } 92 | 93 | } 94 | 95 | } 96 | -------------------------------------------------------------------------------- /boilerplate/_lib/freeze-check: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # NOTE: For security reasons, everything imported or invoked (even 4 | # indirectly) by this script should be audited for vulnerabilities and 5 | # explicitly excluded from `linguist-generated` in the consuming 6 | # repository's .gitattributes. In other words, we want PRs to show 7 | # deltas to this script and all its dependencies by default so that 8 | # attempts to inject or circumvent code are visible. 9 | 10 | set -e 11 | 12 | REPO_ROOT=$(git rev-parse --show-toplevel) 13 | # Hardcoded rather than sourced to reduce attack surface. 14 | BOILERPLATE_GIT_REPO=https://github.com/openshift/boilerplate.git 15 | 16 | # Validate that no subscribed boilerplate artifacts have been changed. 17 | # PR checks may wish to gate on this. 18 | 19 | # This works by grabbing the commit hash of the boilerplate repository 20 | # at which the last update was applied, running the main `update` driver 21 | # against that, and failing if there's a resulting diff. 22 | 23 | # If we can't tell what that commit was, we must assume this is the 24 | # first update, and we'll (noisily) "succeed". 25 | 26 | # Note that this ought to work when you've just committed an update, 27 | # even if you've changed your update.cfg beforehand. We're basically 28 | # making sure you didn't muck with anything after updating. 29 | 30 | # For this to work, you have to be starting from a clean repository 31 | # state (any changes committed). 32 | # TODO(efried): This is not ideal -- it would be nice if I could check 33 | # this before committing my changes -- but how would that work? Diff to 34 | # a file, create a temporary commit, run the rest, remove the commit, 35 | # and reapply the diff? Messy and error-prone -- and I would be 36 | # seriously ticked off if something went wrong and lost my in-flight 37 | # changes. 38 | if ! [ -z "$(git status --porcelain -- ':!build/Dockerfile*')" ]; then 39 | echo "Can't validate boilerplate in a dirty repository. Please commit your changes and try again." >&2 40 | exit 1 41 | fi 42 | 43 | # We glean the last boilerplate commit from the 44 | # last-boilerplate-commit file, which gets laid down by the main 45 | # `update` driver each time it runs. 46 | LBCF=${REPO_ROOT}/boilerplate/_data/last-boilerplate-commit 47 | if ! [[ -f "$LBCF" ]]; then 48 | echo "Couldn't discover last boilerplate commit! Assuming you're bootstrapping." 49 | exit 0 50 | fi 51 | LBC=$(cat $LBCF) 52 | 53 | # Download just that commit 54 | echo "Fetching $LBC from $BOILERPLATE_GIT_REPO" 55 | # boilerplate/update cleans up this temp dir 56 | TMPD=$(mktemp -d) 57 | cd $TMPD 58 | git init 59 | # TODO(efried): DRY this remote. Make it configurable? 60 | git remote add origin $BOILERPLATE_GIT_REPO 61 | git fetch origin $(cat $LBCF) --tags 62 | git reset --hard FETCH_HEAD 63 | 64 | # Now invoke the update script, overriding the source repository we've 65 | # just downloaded at the appropriate commit. 66 | # We invoke the script explicitly rather than via the make target to 67 | # close a security hole whereby the latter is overridden. 68 | echo "Running update" 69 | cd $REPO_ROOT 70 | BOILERPLATE_GIT_REPO="${TMPD}" boilerplate/update 71 | 72 | # Okay, if anything has changed, that's bad. 73 | if [[ $(git status --porcelain -- ':!build/Dockerfile*' | wc -l) -ne 0 ]]; then 74 | echo "Your boilerplate is dirty!" >&2 75 | git status --porcelain -- ':!build/Dockerfile*' 76 | exit 1 77 | fi 78 | 79 | echo "Your boilerplate is clean!" 80 | exit 0 81 | -------------------------------------------------------------------------------- /pkg/dispatcher/dispatcher.go: -------------------------------------------------------------------------------- 1 | package dispatcher 2 | 3 | import ( 4 | "fmt" 5 | "net/http" 6 | "net/url" 7 | "sync" 8 | 9 | logf "sigs.k8s.io/controller-runtime/pkg/log" 10 | admissionctl "sigs.k8s.io/controller-runtime/pkg/webhook/admission" 11 | 12 | responsehelper "github.com/openshift/managed-cluster-validating-webhooks/pkg/helpers" 13 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks" 14 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/utils" 15 | ) 16 | 17 | var log = logf.Log.WithName("dispatcher") 18 | 19 | // Dispatcher struct 20 | type Dispatcher struct { 21 | hooks *map[string]webhooks.WebhookFactory // uri -> hookfactory 22 | mu sync.Mutex 23 | } 24 | 25 | // NewDispatcher new dispatcher 26 | func NewDispatcher(hooks webhooks.RegisteredWebhooks) *Dispatcher { 27 | hookMap := make(map[string]webhooks.WebhookFactory) 28 | for _, hook := range hooks { 29 | hookMap[hook().GetURI()] = hook 30 | } 31 | return &Dispatcher{ 32 | hooks: &hookMap, 33 | } 34 | } 35 | 36 | // HandleRequest http request 37 | // HTTP status code usage: When the request body is correctly parsed into a 38 | // request (utils.ParseHTTPRequest) then we should always send 200 OK and use 39 | // the response body (response.status.code) to indicate a problem. When instead 40 | // there's a problem with the HTTP request itself (404, an inability to parse a 41 | // request, or some internal problem) it is appropriate to use the HTTP status 42 | // code to communicate. 43 | func (d *Dispatcher) HandleRequest(w http.ResponseWriter, r *http.Request) { 44 | d.mu.Lock() 45 | defer d.mu.Unlock() 46 | log.Info("Handling request", "request", r.RequestURI) 47 | url, err := url.Parse(r.RequestURI) 48 | if err != nil { 49 | w.WriteHeader(http.StatusBadRequest) 50 | log.Error(err, "Couldn't parse request %s", r.RequestURI) 51 | responsehelper.SendResponse(w, admissionctl.Errored(http.StatusBadRequest, err)) 52 | return 53 | } 54 | 55 | // is it one of ours? 56 | if hook, ok := (*d.hooks)[url.Path]; ok { 57 | // it's one of ours, so let's attempt to parse the request 58 | request, _, err := utils.ParseHTTPRequest(r) 59 | // Problem even parsing an AdmissionReview, so use HTTP status code 60 | if err != nil { 61 | w.WriteHeader(http.StatusBadRequest) 62 | log.Error(err, "Error parsing HTTP Request Body") 63 | responsehelper.SendResponse(w, admissionctl.Errored(http.StatusBadRequest, err)) 64 | return 65 | } 66 | // Valid AdmissionReview, but we can't do anything with it because we do not 67 | // think the request inside is valid. 68 | if !hook().Validate(request) { 69 | err = fmt.Errorf("not a valid webhook request") 70 | log.Error(err, "Error validaing HTTP Request Body") 71 | responsehelper.SendResponse(w, 72 | admissionctl.Errored(http.StatusBadRequest, err)) 73 | return 74 | } 75 | 76 | // Dispatch 77 | responsehelper.SendResponse(w, hook().Authorized(request)) 78 | return 79 | } 80 | log.Info("Request is not for a registered webhook.", "known_hooks", *d.hooks, "parsed_url", url, "lookup", (*d.hooks)[url.Path]) 81 | // Not a registered hook 82 | // Note: This segment is not likely to be reached because there will not be 83 | // any URI registered (handler set up) for an URI that would trigger this. 84 | w.WriteHeader(404) 85 | responsehelper.SendResponse(w, 86 | admissionctl.Errored(http.StatusBadRequest, 87 | fmt.Errorf("request is not for a registered webhook"))) 88 | } 89 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-e2e/standard.mk: -------------------------------------------------------------------------------- 1 | # Validate variables in project.mk exist 2 | ifndef OPERATOR_NAME 3 | $(error OPERATOR_NAME is not set; only operators should consume this convention; check project.mk file) 4 | endif 5 | ifndef E2E_IMAGE_REGISTRY 6 | $(error E2E_IMAGE_REGISTRY is not set; check project.mk file) 7 | endif 8 | ifndef E2E_IMAGE_REPOSITORY 9 | $(error E2E_IMAGE_REPOSITORY is not set; check project.mk file) 10 | endif 11 | 12 | # Use current commit as e2e image tag 13 | CURRENT_COMMIT=$(shell git rev-parse --short=7 HEAD) 14 | E2E_IMAGE_TAG=$(CURRENT_COMMIT) 15 | 16 | ### Accommodate docker or podman 17 | # 18 | # The docker/podman creds cache needs to be in a location unique to this 19 | # invocation; otherwise it could collide across jenkins jobs. We'll use 20 | # a .docker folder relative to pwd (the repo root). 21 | CONTAINER_ENGINE_CONFIG_DIR = .docker 22 | JENKINS_DOCKER_CONFIG_FILE = /var/lib/jenkins/.docker/config.json 23 | export REGISTRY_AUTH_FILE = ${CONTAINER_ENGINE_CONFIG_DIR}/config.json 24 | 25 | # If this configuration file doesn't exist, podman will error out. So 26 | # we'll create it if it doesn't exist. 27 | ifeq (,$(wildcard $(REGISTRY_AUTH_FILE))) 28 | $(shell mkdir -p $(CONTAINER_ENGINE_CONFIG_DIR)) 29 | # Copy the node container auth file so that we get access to the registries the 30 | # parent node has access to 31 | $(shell if test -f $(JENKINS_DOCKER_CONFIG_FILE); then cp $(JENKINS_DOCKER_CONFIG_FILE) $(REGISTRY_AUTH_FILE); fi) 32 | endif 33 | 34 | # ==> Docker uses --config=PATH *before* (any) subcommand; so we'll glue 35 | # that to the CONTAINER_ENGINE variable itself. (NOTE: I tried half a 36 | # dozen other ways to do this. This was the least ugly one that actually 37 | # works.) 38 | ifndef CONTAINER_ENGINE 39 | CONTAINER_ENGINE=$(shell command -v podman 2>/dev/null || echo docker --config=$(CONTAINER_ENGINE_CONFIG_DIR)) 40 | endif 41 | 42 | REGISTRY_USER ?= 43 | REGISTRY_TOKEN ?= 44 | 45 | # TODO: Figure out how to discover this dynamically 46 | OSDE2E_CONVENTION_DIR := boilerplate/openshift/golang-osd-operator-osde2e 47 | 48 | # log into quay.io 49 | .PHONY: container-engine-login 50 | container-engine-login: 51 | @test "${REGISTRY_USER}" != "" && test "${REGISTRY_TOKEN}" != "" || (echo "REGISTRY_USER and REGISTRY_TOKEN must be defined" && exit 1) 52 | mkdir -p ${CONTAINER_ENGINE_CONFIG_DIR} 53 | @${CONTAINER_ENGINE} login -u="${REGISTRY_USER}" -p="${REGISTRY_TOKEN}" quay.io 54 | 55 | ###################### 56 | # Targets used by e2e test suite 57 | ###################### 58 | 59 | # create binary 60 | .PHONY: e2e-binary-build 61 | e2e-binary-build: GOFLAGS_MOD=-mod=mod 62 | e2e-binary-build: GOENV=GOOS=${GOOS} GOARCH=${GOARCH} CGO_ENABLED=0 GOFLAGS="${GOFLAGS_MOD}" 63 | e2e-binary-build: 64 | go mod tidy 65 | go test ./test/e2e -v -c --tags=osde2e -o e2e.test 66 | 67 | # push e2e image tagged as latest and as repo commit hash 68 | .PHONY: e2e-image-build-push 69 | e2e-image-build-push: container-engine-login 70 | ${CONTAINER_ENGINE} build --pull -f test/e2e/Dockerfile -t $(E2E_IMAGE_REGISTRY)/$(E2E_IMAGE_REPOSITORY)/$(E2E_IMAGE_NAME):$(E2E_IMAGE_TAG) . 71 | ${CONTAINER_ENGINE} tag $(E2E_IMAGE_REGISTRY)/$(E2E_IMAGE_REPOSITORY)/$(E2E_IMAGE_NAME):$(E2E_IMAGE_TAG) $(E2E_IMAGE_REGISTRY)/$(E2E_IMAGE_REPOSITORY)/$(E2E_IMAGE_NAME):latest 72 | ${CONTAINER_ENGINE} push $(E2E_IMAGE_REGISTRY)/$(E2E_IMAGE_REPOSITORY)/$(E2E_IMAGE_NAME):$(E2E_IMAGE_TAG) 73 | ${CONTAINER_ENGINE} push $(E2E_IMAGE_REGISTRY)/$(E2E_IMAGE_REPOSITORY)/$(E2E_IMAGE_NAME):latest 74 | -------------------------------------------------------------------------------- /boilerplate/_lib/subscriber-propose-update: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | REPO_ROOT=$(git rev-parse --show-toplevel) 4 | source $REPO_ROOT/boilerplate/_lib/common.sh 5 | 6 | usage() { 7 | cat < $TMPD/$f 46 | echo $TMPD/$f 47 | return 48 | fi 49 | done 50 | } 51 | 52 | ## expected_prow_config ORG PROJ BRANCH 53 | # 54 | # Prints to stdout the expected prow configuration for the specified 55 | # ORG/PROJ. 56 | expected_prow_config() { 57 | local org=$1 58 | local consumer_name=$2 59 | local branch=$3 60 | # TODO: DRY this with what's in prow-config. 61 | # Do it by making it a template in the convention dir. 62 | cat < /(path-to)/kubeconfig 101 | 102 | 5. Run test suite using 103 | 104 | DISABLE_JUNIT_REPORT=true KUBECONFIG=/(path-to)/kubeconfig ./(path-to)/bin/ginkgo --tags=osde2e -v test/e2e 105 | EOF 106 | 107 | sed -e "s/\${OPERATOR_NAME}/${OPERATOR_NAME}/" $(dirname $0)/e2e-template.yml >"${E2E_SUITE_DIRECTORY}/e2e-template.yml" 108 | 109 | # todo: remove after file is renamed in ALL consumer repos 110 | if [ -f "${E2E_SUITE_DIRECTORY}/test-harness-template.yml" ]; then 111 | rm -f "${E2E_SUITE_DIRECTORY}/test-harness-template.yml" 112 | fi 113 | -------------------------------------------------------------------------------- /pkg/config/namespaces.go: -------------------------------------------------------------------------------- 1 | // Code generated by pkg/config/generate/namespaces.go; DO NOT EDIT. 2 | // Generated at 2025-09-01 07:11:23.45239 +0000 UTC 3 | package config 4 | 5 | var ConfigMapSources = []string{ 6 | "openshift-monitoring/managed-namespaces", 7 | "openshift-monitoring/ocp-namespaces", 8 | } 9 | 10 | var PrivilegedNamespaces = []string{ 11 | "^default$", 12 | "^openshift$", 13 | "^kube-.*", 14 | "^redhat-.*", 15 | "^dedicated-admin$", 16 | "^openshift-addon-operator$", 17 | "^openshift-aqua$", 18 | "^openshift-aws-vpce-operator$", 19 | "^openshift-backplane$", 20 | "^openshift-backplane-cee$", 21 | "^openshift-backplane-csa$", 22 | "^openshift-backplane-cse$", 23 | "^openshift-backplane-csm$", 24 | "^openshift-backplane-managed-scripts$", 25 | "^openshift-backplane-mobb$", 26 | "^openshift-backplane-srep$", 27 | "^openshift-backplane-tam$", 28 | "^openshift-cloud-ingress-operator$", 29 | "^openshift-codeready-workspaces$", 30 | "^openshift-compliance$", 31 | "^openshift-compliance-monkey$", 32 | "^openshift-container-security$", 33 | "^openshift-custom-domains-operator$", 34 | "^openshift-customer-monitoring$", 35 | "^openshift-deployment-validation-operator$", 36 | "^openshift-managed-node-metadata-operator$", 37 | "^openshift-file-integrity$", 38 | "^openshift-logging$", 39 | "^openshift-managed-upgrade-operator$", 40 | "^openshift-must-gather-operator$", 41 | "^openshift-observability-operator$", 42 | "^openshift-ocm-agent-operator$", 43 | "^openshift-operators-redhat$", 44 | "^openshift-osd-metrics$", 45 | "^openshift-rbac-permissions$", 46 | "^openshift-route-monitor-operator$", 47 | "^openshift-scanning$", 48 | "^openshift-security$", 49 | "^openshift-splunk-forwarder-operator$", 50 | "^openshift-sre-pruning$", 51 | "^openshift-suricata$", 52 | "^openshift-validation-webhook$", 53 | "^openshift-velero$", 54 | "^openshift-monitoring$", 55 | "^openshift$", 56 | "^openshift-cluster-version$", 57 | "^goalert$", 58 | "^keycloak$", 59 | "^configure-goalert-operator$", 60 | "^kube-system$", 61 | "^openshift-apiserver$", 62 | "^openshift-apiserver-operator$", 63 | "^openshift-authentication$", 64 | "^openshift-authentication-operator$", 65 | "^openshift-cloud-controller-manager$", 66 | "^openshift-cloud-controller-manager-operator$", 67 | "^openshift-cloud-credential-operator$", 68 | "^openshift-cloud-network-config-controller$", 69 | "^openshift-cluster-api$", 70 | "^openshift-cluster-csi-drivers$", 71 | "^openshift-cluster-machine-approver$", 72 | "^openshift-cluster-node-tuning-operator$", 73 | "^openshift-cluster-samples-operator$", 74 | "^openshift-cluster-storage-operator$", 75 | "^openshift-config$", 76 | "^openshift-config-managed$", 77 | "^openshift-config-operator$", 78 | "^openshift-console$", 79 | "^openshift-console-operator$", 80 | "^openshift-console-user-settings$", 81 | "^openshift-controller-manager$", 82 | "^openshift-controller-manager-operator$", 83 | "^openshift-dns$", 84 | "^openshift-dns-operator$", 85 | "^openshift-etcd$", 86 | "^openshift-etcd-operator$", 87 | "^openshift-host-network$", 88 | "^openshift-image-registry$", 89 | "^openshift-ingress$", 90 | "^openshift-ingress-canary$", 91 | "^openshift-ingress-operator$", 92 | "^openshift-insights$", 93 | "^openshift-kni-infra$", 94 | "^openshift-kube-apiserver$", 95 | "^openshift-kube-apiserver-operator$", 96 | "^openshift-kube-controller-manager$", 97 | "^openshift-kube-controller-manager-operator$", 98 | "^openshift-kube-scheduler$", 99 | "^openshift-kube-scheduler-operator$", 100 | "^openshift-kube-storage-version-migrator$", 101 | "^openshift-kube-storage-version-migrator-operator$", 102 | "^openshift-machine-api$", 103 | "^openshift-machine-config-operator$", 104 | "^openshift-marketplace$", 105 | "^openshift-monitoring$", 106 | "^openshift-multus$", 107 | "^openshift-network-diagnostics$", 108 | "^openshift-network-operator$", 109 | "^openshift-nutanix-infra$", 110 | "^openshift-oauth-apiserver$", 111 | "^openshift-openstack-infra$", 112 | "^openshift-operator-lifecycle-manager$", 113 | "^openshift-operators$", 114 | "^openshift-ovirt-infra$", 115 | "^openshift-sdn$", 116 | "^openshift-ovn-kubernetes$", 117 | "^openshift-platform-operators$", 118 | "^openshift-route-controller-manager$", 119 | "^openshift-service-ca$", 120 | "^openshift-service-ca-operator$", 121 | "^openshift-user-workload-monitoring$", 122 | "^openshift-vsphere-infra$", 123 | } 124 | -------------------------------------------------------------------------------- /pkg/webhooks/utils/utils.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "io" 7 | "net/http" 8 | "regexp" 9 | "slices" 10 | 11 | admissionv1 "k8s.io/api/admission/v1" 12 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 13 | "k8s.io/apimachinery/pkg/runtime" 14 | "k8s.io/apimachinery/pkg/runtime/serializer" 15 | utilruntime "k8s.io/apimachinery/pkg/util/runtime" 16 | admissionctl "sigs.k8s.io/controller-runtime/pkg/webhook/admission" 17 | ) 18 | 19 | const ( 20 | validContentType string = "application/json" 21 | // PrivilegedServiceAccountGroups is a regex string of serviceaccounts that our webhooks should commonly allow to 22 | // perform restricted actions. 23 | // Centralized osde2e tests have a serviceaccount like "system:serviceaccounts:osde2e-abcde" 24 | // Decentralized osde2e tests have a serviceaccount like "system:serviceaccounts:osde2e-h-abcde" 25 | PrivilegedServiceAccountGroups string = `^system:serviceaccounts:(kube-.*|openshift|openshift-.*|default|redhat-.*|osde2e-(h-)?[a-z0-9]{5})` 26 | ) 27 | 28 | var ( 29 | admissionScheme = runtime.NewScheme() 30 | admissionCodecs = serializer.NewCodecFactory(admissionScheme) 31 | ) 32 | 33 | func RequestMatchesGroupKind(req admissionctl.Request, kind, group string) bool { 34 | return req.Kind.Kind == kind && req.Kind.Group == group 35 | } 36 | 37 | func DefaultLabelSelector() metav1.LabelSelector { 38 | return metav1.LabelSelector{ 39 | MatchLabels: map[string]string{ 40 | "api.openshift.com/managed": "true", 41 | }, 42 | } 43 | } 44 | 45 | func IsProtectedByResourceName(name string) bool { 46 | protectedNames := []string{ 47 | "alertmanagerconfigs.monitoring.coreos.com", 48 | "alertmanagers.monitoring.coreos.com", 49 | "prometheuses.monitoring.coreos.com", 50 | "thanosrulers.monitoring.coreos.com", 51 | "podmonitors.monitoring.coreos.com", 52 | "probes.monitoring.coreos.com", 53 | "prometheusrules.monitoring.coreos.com", 54 | "servicemonitors.monitoring.coreos.com", 55 | "prometheusagents.monitoring.coreos.com", 56 | "scrapeconfigs.monitoring.coreos.com", 57 | } 58 | return slices.Contains(protectedNames, name) 59 | } 60 | 61 | func RegexSliceContains(needle string, haystack []string) bool { 62 | for _, check := range haystack { 63 | checkRe := regexp.MustCompile(check) 64 | if checkRe.Match([]byte(needle)) { 65 | return true 66 | } 67 | } 68 | return false 69 | } 70 | 71 | func ParseHTTPRequest(r *http.Request) (admissionctl.Request, admissionctl.Response, error) { 72 | var resp admissionctl.Response 73 | var req admissionctl.Request 74 | var err error 75 | var body []byte 76 | if r.Body != nil { 77 | if body, err = io.ReadAll(r.Body); err != nil { 78 | resp = admissionctl.Errored(http.StatusBadRequest, err) 79 | return req, resp, err 80 | } 81 | } else { 82 | err := errors.New("request body is nil") 83 | resp = admissionctl.Errored(http.StatusBadRequest, err) 84 | return req, resp, err 85 | } 86 | if len(body) == 0 { 87 | err := errors.New("request body is empty") 88 | resp = admissionctl.Errored(http.StatusBadRequest, err) 89 | return req, resp, err 90 | } 91 | contentType := r.Header.Get("Content-Type") 92 | if contentType != validContentType { 93 | err := fmt.Errorf("contentType=%s, expected application/json", contentType) 94 | resp = admissionctl.Errored(http.StatusBadRequest, err) 95 | return req, resp, err 96 | } 97 | ar := admissionv1.AdmissionReview{} 98 | if _, _, err := admissionCodecs.UniversalDeserializer().Decode(body, nil, &ar); err != nil { 99 | resp = admissionctl.Errored(http.StatusBadRequest, err) 100 | return req, resp, err 101 | } 102 | 103 | // Copy for tracking 104 | if ar.Request == nil { 105 | err = fmt.Errorf("No request in request body") 106 | resp = admissionctl.Errored(http.StatusBadRequest, err) 107 | return req, resp, err 108 | } 109 | resp.UID = ar.Request.UID 110 | req = admissionctl.Request{ 111 | AdmissionRequest: *ar.Request, 112 | } 113 | return req, resp, nil 114 | } 115 | 116 | // WebhookResponse assembles an allowed or denied admission response with the same UID as the provided request. 117 | // The reason for allowed admission responses is not shown to the end user and is commonly empty string: "" 118 | func WebhookResponse(request admissionctl.Request, allowed bool, reason string) admissionctl.Response { 119 | resp := admissionctl.ValidationResponse(allowed, reason) 120 | resp.UID = request.UID 121 | return resp 122 | } 123 | 124 | func init() { 125 | utilruntime.Must(admissionv1.AddToScheme(admissionScheme)) 126 | } 127 | -------------------------------------------------------------------------------- /pkg/testutils/testutils.go: -------------------------------------------------------------------------------- 1 | package testutils 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "net/http" 7 | "net/http/httptest" 8 | 9 | admissionv1 "k8s.io/api/admission/v1" 10 | authenticationv1 "k8s.io/api/authentication/v1" 11 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 12 | "k8s.io/apimachinery/pkg/runtime" 13 | "k8s.io/apimachinery/pkg/types" 14 | admissionctl "sigs.k8s.io/controller-runtime/pkg/webhook/admission" 15 | 16 | responsehelper "github.com/openshift/managed-cluster-validating-webhooks/pkg/helpers" 17 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/utils" 18 | ) 19 | 20 | // Webhook interface 21 | type Webhook interface { 22 | // Authorized will determine if the request is allowed 23 | Authorized(request admissionctl.Request) admissionctl.Response 24 | } 25 | 26 | // CanCanNot helper to make English a bit nicer 27 | func CanCanNot(b bool) string { 28 | if b { 29 | return "can" 30 | } 31 | return "can not" 32 | } 33 | 34 | // CreateFakeRequestJSON will render the []byte slice needed for the (fake) HTTP request. 35 | // Inputs into this are the request UID, which GVK and GVR are being gated by this webhook, 36 | // User information (username and groups), what kind of operation is being gated by this webhook 37 | // and finally the runtime.RawExtension representation of the request's Object or OldObject 38 | // The Object/OldObject is automatically inferred by the operation; delete operations will force OldObject 39 | // To create the RawExtension: 40 | // 41 | // obj := runtime.RawExtension{ 42 | // Raw: []byte(rawObjString), 43 | // } 44 | // 45 | // where rawObjString is a literal JSON blob, eg: 46 | // 47 | // { 48 | // "metadata": { 49 | // "name": "namespace-name", 50 | // "uid": "request-userid", 51 | // "creationTimestamp": "2020-05-10T07:51:00Z" 52 | // }, 53 | // "users": null 54 | // } 55 | func CreateFakeRequestJSON(uid string, 56 | gvk metav1.GroupVersionKind, gvr metav1.GroupVersionResource, 57 | operation admissionv1.Operation, 58 | username string, userGroups []string, namespace string, 59 | obj, oldObject *runtime.RawExtension) ([]byte, error) { 60 | 61 | req := admissionv1.AdmissionReview{ 62 | Request: &admissionv1.AdmissionRequest{ 63 | UID: types.UID(uid), 64 | Kind: gvk, 65 | RequestKind: &gvk, 66 | Resource: gvr, 67 | Operation: operation, 68 | Namespace: namespace, 69 | UserInfo: authenticationv1.UserInfo{ 70 | Username: username, 71 | Groups: userGroups, 72 | }, 73 | }, 74 | } 75 | switch operation { 76 | case admissionv1.Create: 77 | req.Request.Object = *obj 78 | case admissionv1.Update: 79 | // TODO (lisa): Update should have a different object for Object than for OldObject 80 | req.Request.Object = *obj 81 | if oldObject != nil { 82 | req.Request.OldObject = *oldObject 83 | } else { 84 | req.Request.OldObject = *obj 85 | } 86 | case admissionv1.Delete: 87 | req.Request.OldObject = *obj 88 | } 89 | b, err := json.Marshal(req) 90 | if err != nil { 91 | return []byte{}, err 92 | } 93 | return b, nil 94 | } 95 | 96 | // CreateHTTPRequest takes all the information needed for an AdmissionReview. 97 | // See also CreateFakeRequestJSON for more. 98 | func CreateHTTPRequest(uri, uid string, 99 | gvk metav1.GroupVersionKind, gvr metav1.GroupVersionResource, 100 | operation admissionv1.Operation, 101 | username string, userGroups []string, namespace string, 102 | obj, oldObject *runtime.RawExtension) (*http.Request, error) { 103 | req, err := CreateFakeRequestJSON(uid, gvk, gvr, operation, username, userGroups, namespace, obj, oldObject) 104 | if err != nil { 105 | return nil, err 106 | } 107 | buf := bytes.NewBuffer(req) 108 | httprequest := httptest.NewRequest("POST", uri, buf) 109 | httprequest.Header["Content-Type"] = []string{"application/json"} 110 | return httprequest, nil 111 | } 112 | 113 | // SendHTTPRequest will send the fake request to be handled by the Webhook 114 | func SendHTTPRequest(req *http.Request, s Webhook) (*admissionv1.AdmissionResponse, error) { 115 | httpResponse := httptest.NewRecorder() 116 | request, _, err := utils.ParseHTTPRequest(req) 117 | if err != nil { 118 | return nil, err 119 | } 120 | resp := s.Authorized(request) 121 | responsehelper.SendResponse(httpResponse, resp) 122 | // at this popint, httpResponse should contain the data sent in response to the webhook query, which is the success/fail 123 | ret := &admissionv1.AdmissionReview{} 124 | err = json.Unmarshal(httpResponse.Body.Bytes(), ret) 125 | if err != nil { 126 | return nil, err 127 | } 128 | return ret.Response, nil 129 | } 130 | -------------------------------------------------------------------------------- /docs/hypershift.md: -------------------------------------------------------------------------------- 1 | # managed-cluster-validating-webhooks on Hypershift 2 | 3 | ## How it works 4 | 5 | Managed Cluster Validating Webhooks (MCVW) is deployed into Hypershift environments via several different components. 6 | 7 | - The webhook admission service is deployed into each hosted control plane (HCP) namespace on Hypershift management clusters, via [package-operator](https://package-operator.run/) 8 | - The `ValidatingWebhookConfiguration` resources are deployed directly onto Hypershift hosted clusters. 9 | 10 | The above components are both installed via a [package operator](https://package-operator.run/) (PKO) package. The package is distributed to Hypershift Management Clusters via an Advanced Cluster Management policy. These resources will be discussed in the section below. 11 | 12 | ## Package Operator package 13 | 14 | The PKO package consists of: 15 | - [a manifest](../config/package/manifest.yaml) which lists the phases involved in the package installation, any availability and promotion tests. 16 | - [a resource bundle](../config/package/resources.yaml.gotmpl) which contains all the resources needed for MCVW to run in the HCP namespace, as well as the ValidatingWebhookConfigurations installed on the hosted cluster. This bundle is dynamically generated by [resources.go](../build/resources.go). Each resource is annotated with a phase so that PKO knows during which phase the resource should be installed. 17 | - [a Containerfile](../config/package/managed-cluster-validating-webhooks-package.Containerfile) which builds the PKO package image. 18 | 19 | ### Building a package 20 | 21 | You can manually rebuild or generate the resource bundle by running: 22 | 23 | ```bash 24 | make package 25 | ``` 26 | 27 | You can manually build the PKO package image by running: 28 | ```bash 29 | make IMG_ORG= build-package-image 30 | ``` 31 | 32 | Note that the resulting package image will follow the naming convention `quay.io/$USER/managed-cluster-validating-webhooks-hs-package` 33 | and can be pushed to Quay for testing if needed. 34 | 35 | ### Testing a package 36 | 37 | Once a package has been built (and pushed to a public image repository) it can be manually installed on a PKO-running cluster by creating a simple `Package` spec: 38 | 39 | ```yaml 40 | apiVersion: package-operator.run/v1alpha1 41 | kind: Package 42 | metadata: 43 | name: validation-webhook 44 | namespace: validation-webhook 45 | spec: 46 | image: quay.io/$USER/managed-cluster-validating-webhooks-hs-package:$TAG 47 | ``` 48 | 49 | ## ACM Policy for Package distribution 50 | 51 | On Hypershift, the `Package` resource is distributed to all HCP Namespaces via a [SelectorSyncSet](../hack/templates/00-managed-cluster-validating-webhooks-hs.SelectorSyncSet.yaml.tmpl) containing ACM Policy. 52 | 53 | The application of the SelectorSyncSet to Hive clusters (in turn distributing it to the Hypershift service clusters) is performed by [app-interface](https://gitlab.cee.redhat.com/service/app-interface/-/blob/master/data/services/osd-operators/cicd/saas/saas-managed-cluster-validating-webhooks.yaml). 54 | 55 | ## How the CI/CD process works 56 | 57 | This section describes the main steps that enable a CI/CD flow for `managed-cluster-validating-webhooks`: 58 | 59 | - A new commit is merged to the MCVW repository. 60 | - This [triggers app-interface](https://gitlab.cee.redhat.com/service/app-interface/-/blob/master/data/services/osd-operators/cicd/ci-int/jobs-managed-cluster-validating-webhooks.yaml) to call the MCVW [build_deploy.sh](https://github.com/openshift/managed-cluster-validating-webhooks/blob/master/build/build_deploy.sh) script. 61 | - The `build_deploy.sh` script builds a new MCVW image and a new PKO package. Each are tagged with the same git short hash representing the commit that was just merged. 62 | - The `managed-cluster-validating-webhooks-hypershift` SaaS [resource template in app-interface](https://gitlab.cee.redhat.com/service/app-interface/-/blob/master/data/services/osd-operators/cicd/saas/saas-managed-cluster-validating-webhooks.yaml) will roll out the latest templated [SelectorSyncSet](https://github.com/openshift/managed-cluster-validating-webhooks/blob/master/hack/templates/00-managed-cluster-validating-webhooks-hs.SelectorSyncSet.yaml.tmpl) to staging/integration Hive shards. The `IMAGE_DIGEST` value will be replaced by the git short hash of the latest commit; therefore, the PKO image referenced will be the one built by the earlier step. 63 | - Because the ACM Policy has changed, the Policy will be updated on all Hypershift Management Clusters. This will result in the `Package` resource updating in every HCP Namespace to reference the new PKO image. 64 | - PKO will download that PKO image and install or update the resources contained within. -------------------------------------------------------------------------------- /boilerplate/_lib/release.sh: -------------------------------------------------------------------------------- 1 | # Helpers and variables for dealing with openshift/release 2 | 3 | # NOTE: This library is sourced from user-run scripts. It should not be 4 | # sourced in CI, as it relies on git config that's not necessarily 5 | # present there. 6 | 7 | RELEASE_REPO=openshift/release 8 | 9 | ## Information about the boilerplate consumer 10 | # E.g. "openshift/my-wizbang-operator" 11 | CONSUMER=$(repo_name .) 12 | [[ -z "$CONSUMER" ]] && err " 13 | Failed to determine current repository name" 14 | # 15 | # E.g. "openshift" 16 | CONSUMER_ORG=${CONSUMER%/*} 17 | [[ -z "$CONSUMER_ORG" ]] && err " 18 | Failed to determine consumer org" 19 | # 20 | # E.g. "my-wizbang-operator" 21 | CONSUMER_NAME=${CONSUMER#*/} 22 | [[ -z "$CONSUMER_NAME" ]] && err " 23 | Failed to determine consumer name" 24 | # 25 | # E.g. "master" 26 | # This will produce something like refs/remotes/origin/master 27 | DEFAULT_BRANCH=$(git symbolic-ref refs/remotes/upstream/HEAD 2>/dev/null || git symbolic-ref refs/remotes/origin/HEAD 2>/dev/null || echo defaulting/to/master) 28 | # Strip off refs/remotes/{upstream|origin}/ 29 | DEFAULT_BRANCH=${DEFAULT_BRANCH##*/} 30 | [[ -z "$DEFAULT_BRANCH" ]] && err " 31 | Failed to determine default branch name" 32 | 33 | ## release_process_args "$@" 34 | # 35 | # This is for use by commands expecting one optional argument which is 36 | # the file system path to a clone of the $RELEASE_REPO. 37 | # 38 | # Will invoke `usage` -- which must be defined by the caller -- if 39 | # the wrong number of arguments are received, or if the single argument 40 | # is `help` or a flag. 41 | # 42 | # If exactly one argument is specified and it is valid, it is assigned 43 | # to the global RELEASE_CLONE variable. 44 | release_process_args() { 45 | if [[ $# -eq 1 ]]; then 46 | # Special cases for usage queries 47 | if [[ "$1" == '-'* ]] || [[ "$1" == help ]]; then 48 | usage 49 | fi 50 | 51 | [[ -d $1 ]] || err " 52 | $1: Not a directory." 53 | 54 | [[ $(repo_name $1) == "$RELEASE_REPO" ]] || err " 55 | $1 is not a clone of $RELEASE_REPO; or its 'origin' remote is not set properly." 56 | 57 | # Got a usable clone of openshift/release 58 | RELEASE_CLONE="$1" 59 | 60 | elif [[ $# -ne 0 ]]; then 61 | usage 62 | fi 63 | } 64 | 65 | ## release_validate_invocation 66 | # 67 | # Make sure we were called from a reasonable place, that being: 68 | # - A boilerplate consumer 69 | # - ...that's actually subscribed to a convention 70 | # - ...containing the script being invoked 71 | release_validate_invocation() { 72 | # Make sure we were invoked from a boilerplate consumer. 73 | [[ -z "$CONVENTION_NAME" ]] && err " 74 | $cmd must be invoked from a consumer of an appropriate convention. Where did you get this script from?" 75 | # Or at least not from boilerplate itself 76 | [[ "$CONSUMER" == "openshift/boilerplate" ]] && err " 77 | $cmd must be invoked from a boilerplate consumer, not from boilerplate itself." 78 | 79 | [[ -s $CONVENTION_ROOT/_data/last-boilerplate-commit ]] || err " 80 | $cmd must be invoked from a boilerplate consumer!" 81 | 82 | grep -E -q "^$CONVENTION_NAME(\s.*)?$" $CONVENTION_ROOT/update.cfg || err " 83 | $CONSUMER is not subscribed to $CONVENTION_NAME!" 84 | } 85 | 86 | ## release_prep_clone 87 | # 88 | # If $RELEASE_CLONE is already set: 89 | # - It should represent a directory containing a clean checkout of the 90 | # release repository; otherwise we error. 91 | # - We checkout and pull master. 92 | # Otherwise: 93 | # - We clone the release repo to a temporary directory. 94 | # - We set the $RELEASE_CLONE global variable to point to that 95 | # directory. 96 | release_prep_clone() { 97 | # If a release repo clone wasn't specified, create one 98 | if [[ -z "$RELEASE_CLONE" ]]; then 99 | RELEASE_CLONE=$(mktemp -dt openshift_release_XXXXXXX) 100 | git clone --depth=1 git@github.com:${RELEASE_REPO}.git $RELEASE_CLONE 101 | else 102 | [[ -z "$(git -C $RELEASE_CLONE status --porcelain)" ]] || err " 103 | Your release clone must start clean." 104 | # These will blow up if it's misconfigured 105 | git -C $RELEASE_CLONE checkout master 106 | git -C $RELEASE_CLONE pull 107 | fi 108 | } 109 | 110 | ## release_done_msg BRANCH 111 | # 112 | # Print exit instructions for submitting the release PR. 113 | # BRANCH is a suggested branch name. 114 | release_done_msg() { 115 | echo 116 | git status 117 | 118 | cat <&2 146 | continue 147 | fi 148 | to_process[$a]=1 149 | done 150 | 151 | for subscriber in "${!to_process[@]}"; do 152 | [[ "${to_process[$subscriber]}" -eq 1 ]] || continue 153 | echo -n "${subscriber} " 154 | done 155 | } 156 | -------------------------------------------------------------------------------- /docs/webhooks-short.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "webhookName": "clusterlogging-validation", 4 | "documentString": "Managed OpenShift Customers may set log retention outside the allowed range of 0-7 days" 5 | }, 6 | { 7 | "webhookName": "clusterrolebindings-validation", 8 | "documentString": "Managed OpenShift Customers may not delete the cluster role bindings under the managed namespaces: (^openshift-.*|kube-system)" 9 | }, 10 | { 11 | "webhookName": "clusterroles-validation", 12 | "documentString": "Managed OpenShift Customers may not delete protected ClusterRoles including cluster-admin, view, edit, admin, specific system roles (system:admin, system:node, system:node-proxier, system:kube-scheduler, system:kube-controller-manager), and backplane-* roles" 13 | }, 14 | { 15 | "webhookName": "customresourcedefinitions-validation", 16 | "documentString": "Managed OpenShift Customers may not change CustomResourceDefinitions managed by Red Hat." 17 | }, 18 | { 19 | "webhookName": "hiveownership-validation", 20 | "documentString": "Managed OpenShift customers may not edit certain managed resources. A managed resource has a \"hive.openshift.io/managed\": \"true\" label." 21 | }, 22 | { 23 | "webhookName": "imagecontentpolicies-validation", 24 | "documentString": "Managed OpenShift customers may not create ImageContentSourcePolicy, ImageDigestMirrorSet, or ImageTagMirrorSet resources that configure mirrors that would conflict with system registries (e.g. quay.io, registry.redhat.io, registry.access.redhat.com, etc). For more details, see https://docs.openshift.com/" 25 | }, 26 | { 27 | "webhookName": "ingress-config-validation", 28 | "documentString": "Managed OpenShift customers may not modify ingress config resources because it can can degrade cluster operators and can interfere with OpenShift SRE monitoring." 29 | }, 30 | { 31 | "webhookName": "ingresscontroller-validation", 32 | "documentString": "Managed OpenShift Customer may create IngressControllers without necessary taints. This can cause those workloads to be provisioned on master nodes." 33 | }, 34 | { 35 | "webhookName": "namespace-validation", 36 | "documentString": "Managed OpenShift Customers may not modify namespaces specified in the [openshift-monitoring/managed-namespaces openshift-monitoring/ocp-namespaces] ConfigMaps because customer workloads should be placed in customer-created namespaces. Customers may not create namespaces identified by this regular expression (^com$|^io$|^in$) because it could interfere with critical DNS resolution. Additionally, customers may not set or change the values of these Namespace labels [managed.openshift.io/storage-pv-quota-exempt managed.openshift.io/service-lb-quota-exempt]." 37 | }, 38 | { 39 | "webhookName": "networkpolicies-validation", 40 | "documentString": "Managed OpenShift Customers may not create NetworkPolicies in namespaces managed by Red Hat." 41 | }, 42 | { 43 | "webhookName": "node-validation-osd", 44 | "documentString": "Managed OpenShift customers may not alter Node objects." 45 | }, 46 | { 47 | "webhookName": "pod-validation", 48 | "documentString": "Managed OpenShift Customers may use tolerations on Pods that could cause those Pods to be scheduled on infra or master nodes." 49 | }, 50 | { 51 | "webhookName": "podimagespec-mutation", 52 | "documentString": "OpenShift debugging tools on Managed OpenShift clusters must be available even if internal image registry is removed." 53 | }, 54 | { 55 | "webhookName": "prometheusrule-validation", 56 | "documentString": "Managed OpenShift Customers may not create PrometheusRule in namespaces managed by Red Hat." 57 | }, 58 | { 59 | "webhookName": "regular-user-validation", 60 | "documentString": "Managed OpenShift customers may not manage any objects in the following APIGroups [upgrade.managed.openshift.io config.openshift.io operator.openshift.io network.openshift.io admissionregistration.k8s.io addons.managed.openshift.io cloudingress.managed.openshift.io managed.openshift.io splunkforwarder.managed.openshift.io autoscaling.openshift.io machineconfiguration.openshift.io cloudcredential.openshift.io machine.openshift.io ocmagent.managed.openshift.io], nor may Managed OpenShift customers alter the APIServer, KubeAPIServer, OpenShiftAPIServer, ClusterVersion, Proxy or SubjectPermission objects." 61 | }, 62 | { 63 | "webhookName": "scc-validation", 64 | "documentString": "Managed OpenShift Customers may not modify the following default SCCs: [anyuid hostaccess hostmount-anyuid hostnetwork hostnetwork-v2 node-exporter nonroot nonroot-v2 privileged restricted restricted-v2]" 65 | }, 66 | { 67 | "webhookName": "sdn-migration-validation", 68 | "documentString": "Managed OpenShift customers may not modify the network config type because it can can degrade cluster operators and can interfere with OpenShift SRE monitoring." 69 | }, 70 | { 71 | "webhookName": "service-mutation", 72 | "documentString": "LoadBalancer-type services on Managed OpenShift clusters must contain an additional annotation for managed policy compliance." 73 | }, 74 | { 75 | "webhookName": "serviceaccount-validation", 76 | "documentString": "Managed OpenShift Customers may not delete the service accounts under the managed namespaces。" 77 | }, 78 | { 79 | "webhookName": "techpreviewnoupgrade-validation", 80 | "documentString": "Managed OpenShift Customers may not use TechPreviewNoUpgrade FeatureGate that could prevent any future ability to do a y-stream upgrade to their clusters." 81 | } 82 | ] 83 | -------------------------------------------------------------------------------- /pkg/webhooks/clusterrole/clusterrole_test.go: -------------------------------------------------------------------------------- 1 | package clusterrole 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/testutils" 7 | admissionv1 "k8s.io/api/admission/v1" 8 | authenticationv1 "k8s.io/api/authentication/v1" 9 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 10 | "k8s.io/apimachinery/pkg/runtime" 11 | admissionctl "sigs.k8s.io/controller-runtime/pkg/webhook/admission" 12 | ) 13 | 14 | type ClusterRoleTestSuites struct { 15 | testID string 16 | targetClusterRole string 17 | username string 18 | operation admissionv1.Operation 19 | userGroups []string 20 | shouldBeAllowed bool 21 | } 22 | 23 | var ( 24 | // testClusterRoleJSON represents a minimal ClusterRole JSON object for testing 25 | testClusterRoleJSON = `{ 26 | "apiVersion": "rbac.authorization.k8s.io/v1", 27 | "kind": "ClusterRole", 28 | "metadata": { 29 | "name": "cluster-admin" 30 | }, 31 | "rules": [ 32 | { 33 | "apiGroups": ["*"], 34 | "resources": ["*"], 35 | "verbs": ["*"] 36 | } 37 | ] 38 | }` 39 | 40 | // testOtherClusterRoleJSON represents a non-protected ClusterRole 41 | testOtherClusterRoleJSON = `{ 42 | "apiVersion": "rbac.authorization.k8s.io/v1", 43 | "kind": "ClusterRole", 44 | "metadata": { 45 | "name": "some-custom-role" 46 | }, 47 | "rules": [ 48 | { 49 | "apiGroups": [""], 50 | "resources": ["pods"], 51 | "verbs": ["get", "list"] 52 | } 53 | ] 54 | }` 55 | ) 56 | 57 | func runClusterRoleTests(t *testing.T, tests []ClusterRoleTestSuites) { 58 | for _, test := range tests { 59 | gvk := metav1.GroupVersionKind{ 60 | Group: "rbac.authorization.k8s.io", 61 | Version: "v1", 62 | Kind: "ClusterRole", 63 | } 64 | gvr := metav1.GroupVersionResource{ 65 | Group: "rbac.authorization.k8s.io", 66 | Version: "v1", 67 | Resource: "clusterroles", 68 | } 69 | 70 | var clusterRoleJSON string 71 | if test.targetClusterRole == "cluster-admin" { 72 | clusterRoleJSON = testClusterRoleJSON 73 | } else { 74 | clusterRoleJSON = testOtherClusterRoleJSON 75 | } 76 | 77 | rawOldObject := []byte(clusterRoleJSON) 78 | req := admissionctl.Request{ 79 | AdmissionRequest: admissionv1.AdmissionRequest{ 80 | UID: "test-uid", 81 | Kind: gvk, 82 | Resource: gvr, 83 | Operation: admissionv1.Delete, 84 | UserInfo: authenticationv1.UserInfo{ 85 | Username: test.username, 86 | Groups: test.userGroups, 87 | }, 88 | OldObject: runtime.RawExtension{ 89 | Raw: rawOldObject, 90 | }, 91 | }, 92 | } 93 | 94 | hook := NewWebhook() 95 | response := hook.Authorized(req) 96 | 97 | if response.Allowed != test.shouldBeAllowed { 98 | t.Fatalf("Mismatch: %s (groups=%s) %s %s the clusterrole. Test's expectation is that the user %s", test.username, test.userGroups, testutils.CanCanNot(response.Allowed), test.operation, testutils.CanCanNot(test.shouldBeAllowed)) 99 | } 100 | } 101 | } 102 | 103 | func TestClusterRoleDeletionNegative(t *testing.T) { 104 | tests := []ClusterRoleTestSuites{ 105 | { 106 | testID: "regular-user-deny", 107 | username: "test-user", 108 | userGroups: []string{"system:authenticated"}, 109 | operation: admissionv1.Delete, 110 | shouldBeAllowed: false, 111 | targetClusterRole: "cluster-admin", 112 | }, 113 | { 114 | testID: "cluster-admin-user-deny", 115 | username: "cluster-admin", 116 | userGroups: []string{"system:authenticated"}, 117 | operation: admissionv1.Delete, 118 | shouldBeAllowed: false, 119 | targetClusterRole: "cluster-admin", 120 | }, 121 | { 122 | testID: "customer-admin-deny", 123 | username: "customer-user", 124 | userGroups: []string{"system:authenticated", "customer-admin"}, 125 | operation: admissionv1.Delete, 126 | shouldBeAllowed: false, 127 | targetClusterRole: "cluster-admin", 128 | }, 129 | } 130 | 131 | runClusterRoleTests(t, tests) 132 | } 133 | 134 | func TestClusterRoleDeletionPositive(t *testing.T) { 135 | tests := []ClusterRoleTestSuites{ 136 | { 137 | testID: "backplane-admin-allow", 138 | username: "backplane-cluster-admin", 139 | userGroups: []string{"system:authenticated"}, 140 | operation: admissionv1.Delete, 141 | shouldBeAllowed: true, 142 | targetClusterRole: "cluster-admin", 143 | }, 144 | { 145 | testID: "backplane-srep-allow", 146 | username: "test-user", 147 | userGroups: []string{"system:authenticated", "system:serviceaccounts:openshift-backplane-srep"}, 148 | operation: admissionv1.Delete, 149 | shouldBeAllowed: true, 150 | targetClusterRole: "cluster-admin", 151 | }, 152 | { 153 | testID: "other-role-allow", 154 | username: "regular-user", 155 | userGroups: []string{"system:authenticated"}, 156 | operation: admissionv1.Delete, 157 | shouldBeAllowed: true, 158 | targetClusterRole: "some-custom-role", 159 | }, 160 | { 161 | testID: "system-user-allow", 162 | username: "system:kube-controller-manager", 163 | userGroups: []string{"system:authenticated"}, 164 | operation: admissionv1.Delete, 165 | shouldBeAllowed: true, 166 | targetClusterRole: "cluster-admin", 167 | }, 168 | } 169 | 170 | runClusterRoleTests(t, tests) 171 | } 172 | -------------------------------------------------------------------------------- /pkg/webhooks/manifestworks/manifestworks_test.go: -------------------------------------------------------------------------------- 1 | package manifestworks 2 | 3 | import ( 4 | "testing" 5 | 6 | admissionv1 "k8s.io/api/admission/v1" 7 | authenticationv1 "k8s.io/api/authentication/v1" 8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | admissionctl "sigs.k8s.io/controller-runtime/pkg/webhook/admission" 10 | ) 11 | 12 | func TestManifestWorksAuthorized(t *testing.T) { 13 | tests := []struct { 14 | name string 15 | username string 16 | operation admissionv1.Operation 17 | shouldBeAllowed bool 18 | }{ 19 | { 20 | name: "OCM SA can delete manifestworks", 21 | username: "system:serviceaccount:ocm:ocm", 22 | operation: admissionv1.Delete, 23 | shouldBeAllowed: true, 24 | }, 25 | { 26 | name: "ocm-foundation-s SA can delete manifestworks", 27 | username: "system:serviceaccount:multicluster-engine:ocm-foundation-sa", 28 | operation: admissionv1.Delete, 29 | shouldBeAllowed: true, 30 | }, 31 | { 32 | name: "Random user cannot delete manifestworks", 33 | username: "unknown-user", 34 | operation: admissionv1.Delete, 35 | shouldBeAllowed: false, 36 | }, 37 | { 38 | name: "Non-DELETE operation should be allowed", 39 | username: "unknown-user", 40 | operation: admissionv1.Create, 41 | shouldBeAllowed: true, 42 | }, 43 | } 44 | 45 | for _, test := range tests { 46 | t.Run(test.name, func(t *testing.T) { 47 | webhook := NewWebhook() 48 | request := admissionctl.Request{ 49 | AdmissionRequest: admissionv1.AdmissionRequest{ 50 | UserInfo: authenticationv1.UserInfo{ 51 | Username: test.username, 52 | }, 53 | Operation: test.operation, 54 | Kind: metav1.GroupVersionKind{ 55 | Group: "work.open-cluster-management.io", 56 | Kind: "ManifestWork", 57 | }, 58 | }, 59 | } 60 | 61 | response := webhook.Authorized(request) 62 | if response.Allowed != test.shouldBeAllowed { 63 | t.Errorf("Unexpected response for %s. Got %v, expected %v", test.name, response.Allowed, test.shouldBeAllowed) 64 | } 65 | }) 66 | } 67 | } 68 | 69 | func TestName(t *testing.T) { 70 | webhook := NewWebhook() 71 | if webhook.Name() != WebhookName { 72 | t.Errorf("Expected webhook name to be %s, got %s", WebhookName, webhook.Name()) 73 | } 74 | } 75 | 76 | func TestGetURI(t *testing.T) { 77 | webhook := NewWebhook() 78 | uri := webhook.GetURI() 79 | if uri[0] != '/' { 80 | t.Errorf("Expected URI to start with '/', got %s", uri) 81 | } 82 | if uri != "/manifestworks-validation" { 83 | t.Errorf("Expected URI to be /manifestworks-validation, got %s", uri) 84 | } 85 | } 86 | 87 | func TestRules(t *testing.T) { 88 | webhook := NewWebhook() 89 | rules := webhook.Rules() 90 | if len(rules) == 0 { 91 | t.Fatal("Expected at least one rule") 92 | } 93 | } 94 | 95 | func TestDoc(t *testing.T) { 96 | webhook := NewWebhook() 97 | doc := webhook.Doc() 98 | if doc == "" { 99 | t.Error("Expected non-empty documentation string") 100 | } 101 | } 102 | 103 | func TestTimeoutSeconds(t *testing.T) { 104 | webhook := NewWebhook() 105 | timeout := webhook.TimeoutSeconds() 106 | if timeout != 2 { 107 | t.Errorf("Expected timeout to be 2, got %d", timeout) 108 | } 109 | } 110 | 111 | func TestValidate(t *testing.T) { 112 | tests := []struct { 113 | name string 114 | request admissionctl.Request 115 | expected bool 116 | }{ 117 | { 118 | name: "Valid request", 119 | request: admissionctl.Request{ 120 | AdmissionRequest: admissionv1.AdmissionRequest{ 121 | UserInfo: authenticationv1.UserInfo{ 122 | Username: "test-user", 123 | }, 124 | Kind: metav1.GroupVersionKind{ 125 | Group: "work.open-cluster-management.io", 126 | Kind: "ManifestWork", 127 | }, 128 | }, 129 | }, 130 | expected: true, 131 | }, 132 | { 133 | name: "Invalid request without username", 134 | request: admissionctl.Request{ 135 | AdmissionRequest: admissionv1.AdmissionRequest{ 136 | UserInfo: authenticationv1.UserInfo{ 137 | Username: "", 138 | }, 139 | Kind: metav1.GroupVersionKind{ 140 | Group: "work.open-cluster-management.io", 141 | Kind: "ManifestWork", 142 | }, 143 | }, 144 | }, 145 | expected: false, 146 | }, 147 | { 148 | name: "Invalid request with wrong kind", 149 | request: admissionctl.Request{ 150 | AdmissionRequest: admissionv1.AdmissionRequest{ 151 | UserInfo: authenticationv1.UserInfo{ 152 | Username: "test-user", 153 | }, 154 | Kind: metav1.GroupVersionKind{ 155 | Group: "work.open-cluster-management.io", 156 | Kind: "Pod", 157 | }, 158 | }, 159 | }, 160 | expected: false, 161 | }, 162 | { 163 | name: "Invalid request with wrong group", 164 | request: admissionctl.Request{ 165 | AdmissionRequest: admissionv1.AdmissionRequest{ 166 | UserInfo: authenticationv1.UserInfo{ 167 | Username: "test-user", 168 | }, 169 | Kind: metav1.GroupVersionKind{ 170 | Group: "apps", 171 | Kind: "ManifestWork", 172 | }, 173 | }, 174 | }, 175 | expected: false, 176 | }, 177 | } 178 | 179 | for _, test := range tests { 180 | t.Run(test.name, func(t *testing.T) { 181 | webhook := NewWebhook() 182 | result := webhook.Validate(test.request) 183 | if result != test.expected { 184 | t.Errorf("Expected %v, got %v", test.expected, result) 185 | } 186 | }) 187 | } 188 | } 189 | -------------------------------------------------------------------------------- /pkg/webhooks/hiveownership/hiveownership.go: -------------------------------------------------------------------------------- 1 | package hiveownership 2 | 3 | import ( 4 | "os" 5 | "slices" 6 | "sync" 7 | 8 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/utils" 9 | admissionregv1 "k8s.io/api/admissionregistration/v1" 10 | admissionv1 "k8s.io/api/apps/v1" 11 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 12 | "k8s.io/apimachinery/pkg/runtime" 13 | logf "sigs.k8s.io/controller-runtime/pkg/log" 14 | admissionctl "sigs.k8s.io/controller-runtime/pkg/webhook/admission" 15 | ) 16 | 17 | // const 18 | const ( 19 | WebhookName string = "hiveownership-validation" 20 | docString string = `Managed OpenShift customers may not edit certain managed resources. A managed resource has a "hive.openshift.io/managed": "true" label.` 21 | ) 22 | 23 | // HiveOwnershipWebhook denies requests 24 | // if it made by a customer to manage hive-labeled resources 25 | type HiveOwnershipWebhook struct { 26 | mu sync.Mutex 27 | s runtime.Scheme 28 | } 29 | 30 | var ( 31 | privilegedUsers = []string{"kube:admin", "system:admin", "system:serviceaccount:kube-system:generic-garbage-collector", "backplane-cluster-admin"} 32 | adminGroups = []string{"system:serviceaccounts:openshift-backplane-srep"} 33 | 34 | log = logf.Log.WithName(WebhookName) 35 | 36 | scope = admissionregv1.ClusterScope 37 | rules = []admissionregv1.RuleWithOperations{ 38 | { 39 | Operations: []admissionregv1.OperationType{"UPDATE", "DELETE"}, 40 | Rule: admissionregv1.Rule{ 41 | APIGroups: []string{"quota.openshift.io"}, 42 | APIVersions: []string{"*"}, 43 | Resources: []string{"clusterresourcequotas"}, 44 | Scope: &scope, 45 | }, 46 | }, 47 | } 48 | ) 49 | 50 | // TimeoutSeconds implements Webhook interface 51 | func (s *HiveOwnershipWebhook) TimeoutSeconds() int32 { return 2 } 52 | 53 | // MatchPolicy implements Webhook interface 54 | func (s *HiveOwnershipWebhook) MatchPolicy() admissionregv1.MatchPolicyType { 55 | return admissionregv1.Equivalent 56 | } 57 | 58 | // Name implements Webhook interface 59 | func (s *HiveOwnershipWebhook) Name() string { return WebhookName } 60 | 61 | // FailurePolicy implements Webhook interface 62 | func (s *HiveOwnershipWebhook) FailurePolicy() admissionregv1.FailurePolicyType { 63 | return admissionregv1.Ignore 64 | } 65 | 66 | // Rules implements Webhook interface 67 | func (s *HiveOwnershipWebhook) Rules() []admissionregv1.RuleWithOperations { return rules } 68 | 69 | // GetURI implements Webhook interface 70 | func (s *HiveOwnershipWebhook) GetURI() string { return "/" + WebhookName } 71 | 72 | // SideEffects implements Webhook interface 73 | func (s *HiveOwnershipWebhook) SideEffects() admissionregv1.SideEffectClass { 74 | return admissionregv1.SideEffectClassNone 75 | } 76 | 77 | // Validate is the incoming request even valid? 78 | func (s *HiveOwnershipWebhook) Validate(req admissionctl.Request) bool { 79 | valid := true 80 | valid = valid && (req.UserInfo.Username != "") 81 | 82 | return valid 83 | } 84 | 85 | // Doc documents the hook 86 | func (s *HiveOwnershipWebhook) Doc() string { 87 | return docString 88 | } 89 | 90 | // ObjectSelector intercepts based on having the label 91 | // .metadata.labels["hive.openshift.io/managed"] == "true" 92 | func (s *HiveOwnershipWebhook) ObjectSelector() *metav1.LabelSelector { 93 | return &metav1.LabelSelector{ 94 | MatchLabels: map[string]string{ 95 | "hive.openshift.io/managed": "true", 96 | }, 97 | } 98 | } 99 | 100 | func (s *HiveOwnershipWebhook) authorized(request admissionctl.Request) admissionctl.Response { 101 | var ret admissionctl.Response 102 | 103 | // Admin users 104 | if slices.Contains(privilegedUsers, request.AdmissionRequest.UserInfo.Username) { 105 | ret = admissionctl.Allowed("Admin users may edit managed resources") 106 | ret.UID = request.AdmissionRequest.UID 107 | return ret 108 | } 109 | // Users in admin groups 110 | for _, group := range request.AdmissionRequest.UserInfo.Groups { 111 | if slices.Contains(adminGroups, group) { 112 | ret = admissionctl.Allowed("Members of admin group may edit managed resources") 113 | ret.UID = request.AdmissionRequest.UID 114 | return ret 115 | } 116 | } 117 | 118 | ret = admissionctl.Denied("Prevented from accessing Red Hat managed resources. This is in an effort to prevent harmful actions that may cause unintended consequences or affect the stability of the cluster. If you have any questions about this, please reach out to Red Hat support at https://access.redhat.com/support") 119 | ret.UID = request.AdmissionRequest.UID 120 | return ret 121 | } 122 | 123 | // Authorized implements Webhook interface 124 | func (s *HiveOwnershipWebhook) Authorized(request admissionctl.Request) admissionctl.Response { 125 | return s.authorized(request) 126 | } 127 | 128 | // CustomSelector implements Webhook interface, returning the custom label selector for the syncset, if any 129 | func (s *HiveOwnershipWebhook) SyncSetLabelSelector() metav1.LabelSelector { 130 | return utils.DefaultLabelSelector() 131 | } 132 | 133 | func (s *HiveOwnershipWebhook) ClassicEnabled() bool { return true } 134 | 135 | func (s *HiveOwnershipWebhook) HypershiftEnabled() bool { return false } 136 | 137 | // NewWebhook creates a new webhook 138 | func NewWebhook() *HiveOwnershipWebhook { 139 | scheme := runtime.NewScheme() 140 | err := admissionv1.AddToScheme(scheme) 141 | if err != nil { 142 | log.Error(err, "Fail adding admissionsv1 scheme to HiveOwnershipWebhook") 143 | os.Exit(1) 144 | } 145 | 146 | return &HiveOwnershipWebhook{ 147 | s: *scheme, 148 | } 149 | } 150 | -------------------------------------------------------------------------------- /pkg/webhooks/hostedcluster/hostedcluster.go: -------------------------------------------------------------------------------- 1 | package hostedcluster 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "sync" 7 | 8 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/utils" 9 | admissionv1 "k8s.io/api/admission/v1" 10 | admissionregv1 "k8s.io/api/admissionregistration/v1" 11 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 12 | "k8s.io/apimachinery/pkg/runtime" 13 | logf "sigs.k8s.io/controller-runtime/pkg/log" 14 | admissionctl "sigs.k8s.io/controller-runtime/pkg/webhook/admission" 15 | ) 16 | 17 | const ( 18 | WebhookName string = "hostedcluster-validation" 19 | docString string = "Validates HostedCluster deletion operations are only performed by authorized service accounts" 20 | ) 21 | 22 | var ( 23 | // Only this service account is allowed to delete HostedClusters 24 | allowedServiceAccount = "system:serviceaccount:open-cluster-management-agent:klusterlet-work-sa" 25 | 26 | scope = admissionregv1.NamespacedScope 27 | rules = []admissionregv1.RuleWithOperations{ 28 | { 29 | Operations: []admissionregv1.OperationType{"DELETE"}, 30 | Rule: admissionregv1.Rule{ 31 | APIGroups: []string{"hypershift.openshift.io"}, 32 | APIVersions: []string{"*"}, 33 | Resources: []string{"hostedclusters"}, 34 | Scope: &scope, 35 | }, 36 | }, 37 | } 38 | log = logf.Log.WithName(WebhookName) 39 | ) 40 | 41 | // HostedClusterWebhook validates HostedCluster deletion operations 42 | type HostedClusterWebhook struct { 43 | mu sync.Mutex 44 | s runtime.Scheme 45 | } 46 | 47 | // ObjectSelector implements Webhook interface 48 | func (s *HostedClusterWebhook) ObjectSelector() *metav1.LabelSelector { return nil } 49 | 50 | func (s *HostedClusterWebhook) Doc() string { 51 | return fmt.Sprintf(docString) 52 | } 53 | 54 | // TimeoutSeconds implements Webhook interface 55 | func (s *HostedClusterWebhook) TimeoutSeconds() int32 { return 2 } 56 | 57 | // MatchPolicy implements Webhook interface 58 | func (s *HostedClusterWebhook) MatchPolicy() admissionregv1.MatchPolicyType { 59 | return admissionregv1.Equivalent 60 | } 61 | 62 | // Name implements Webhook interface 63 | func (s *HostedClusterWebhook) Name() string { return WebhookName } 64 | 65 | // FailurePolicy implements Webhook interface 66 | func (s *HostedClusterWebhook) FailurePolicy() admissionregv1.FailurePolicyType { 67 | return admissionregv1.Ignore 68 | } 69 | 70 | // Rules implements Webhook interface 71 | func (s *HostedClusterWebhook) Rules() []admissionregv1.RuleWithOperations { return rules } 72 | 73 | // GetURI implements Webhook interface 74 | func (s *HostedClusterWebhook) GetURI() string { return "/hostedcluster-validation" } 75 | 76 | // SideEffects implements Webhook interface 77 | func (s *HostedClusterWebhook) SideEffects() admissionregv1.SideEffectClass { 78 | return admissionregv1.SideEffectClassNone 79 | } 80 | 81 | // Validate - Make sure we're working with a well-formed Admission Request object 82 | func (s *HostedClusterWebhook) Validate(req admissionctl.Request) bool { 83 | valid := true 84 | valid = valid && (req.UserInfo.Username != "") 85 | valid = valid && (req.Kind.Kind == "HostedCluster") 86 | valid = valid && (req.Kind.Group == "hypershift.openshift.io") 87 | 88 | return valid 89 | } 90 | 91 | // Authorized implements Webhook interface 92 | func (s *HostedClusterWebhook) Authorized(request admissionctl.Request) admissionctl.Response { 93 | return s.authorized(request) 94 | } 95 | 96 | // Is the request authorized 97 | func (s *HostedClusterWebhook) authorized(request admissionctl.Request) admissionctl.Response { 98 | var ret admissionctl.Response 99 | 100 | // Only allow DELETE operations from the specified service account 101 | if request.UserInfo.Username == allowedServiceAccount { 102 | ret = admissionctl.Allowed("Service account is authorized to delete HostedCluster resources") 103 | ret.UID = request.AdmissionRequest.UID 104 | return ret 105 | } 106 | 107 | // If not a delete operation, allow it 108 | if request.Operation != admissionv1.Delete { 109 | ret = admissionctl.Allowed("Only DELETE operations are restricted") 110 | ret.UID = request.AdmissionRequest.UID 111 | return ret 112 | } 113 | 114 | // Deny all other requests 115 | log.Info("Unauthorized attempt to delete HostedCluster", 116 | "user", request.UserInfo.Username, 117 | "groups", request.UserInfo.Groups) 118 | 119 | ret = admissionctl.Denied(fmt.Sprintf("Only %s is authorized to delete HostedCluster resources", allowedServiceAccount)) 120 | ret.UID = request.AdmissionRequest.UID 121 | return ret 122 | } 123 | 124 | // SyncSetLabelSelector returns the label selector to use in the SyncSet. 125 | func (s *HostedClusterWebhook) SyncSetLabelSelector() metav1.LabelSelector { 126 | customLabelSelector := utils.DefaultLabelSelector() 127 | customLabelSelector.MatchExpressions = append(customLabelSelector.MatchExpressions, 128 | metav1.LabelSelectorRequirement{ 129 | Key: "ext-hypershift.openshift.io/cluster-type", 130 | Operator: metav1.LabelSelectorOpIn, 131 | Values: []string{"management-cluster"}, 132 | }) 133 | return customLabelSelector 134 | } 135 | 136 | func (s *HostedClusterWebhook) ClassicEnabled() bool { return true } 137 | 138 | func (s *HostedClusterWebhook) HypershiftEnabled() bool { return false } 139 | 140 | // NewWebhook creates a new webhook 141 | func NewWebhook() *HostedClusterWebhook { 142 | scheme := runtime.NewScheme() 143 | err := admissionv1.AddToScheme(scheme) 144 | if err != nil { 145 | log.Error(err, "Fail adding admissionsv1 scheme to HostedClusterWebhook") 146 | os.Exit(1) 147 | } 148 | return &HostedClusterWebhook{ 149 | s: *scheme, 150 | } 151 | } 152 | -------------------------------------------------------------------------------- /pkg/webhooks/ingressconfig/ingressconfig.go: -------------------------------------------------------------------------------- 1 | package ingressconfig 2 | 3 | import ( 4 | "os" 5 | "regexp" 6 | "sync" 7 | 8 | admissionv1 "k8s.io/api/admission/v1" 9 | admissionregv1 "k8s.io/api/admissionregistration/v1" 10 | corev1 "k8s.io/api/core/v1" 11 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 12 | "k8s.io/apimachinery/pkg/runtime" 13 | logf "sigs.k8s.io/controller-runtime/pkg/log" 14 | admissionctl "sigs.k8s.io/controller-runtime/pkg/webhook/admission" 15 | 16 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/utils" 17 | ) 18 | 19 | const ( 20 | WebhookName string = "ingress-config-validation" 21 | privilegedUsers string = `system:admin` 22 | docString string = `Managed OpenShift customers may not modify ingress config resources because it can can degrade cluster operators and can interfere with OpenShift SRE monitoring.` 23 | ) 24 | 25 | var ( 26 | log = logf.Log.WithName(WebhookName) 27 | privilegedServiceAccountsRe = regexp.MustCompile(utils.PrivilegedServiceAccountGroups) 28 | privilegedUsersRe = regexp.MustCompile(privilegedUsers) 29 | 30 | scope = admissionregv1.ClusterScope 31 | rules = []admissionregv1.RuleWithOperations{ 32 | { 33 | Operations: []admissionregv1.OperationType{"CREATE", "UPDATE", "DELETE"}, 34 | Rule: admissionregv1.Rule{ 35 | APIGroups: []string{"config.openshift.io"}, 36 | APIVersions: []string{"*"}, 37 | Resources: []string{"ingresses"}, 38 | Scope: &scope, 39 | }, 40 | }, 41 | } 42 | ) 43 | 44 | type IngressConfigWebhook struct { 45 | mu sync.Mutex 46 | s runtime.Scheme 47 | } 48 | 49 | // Authorized will determine if the request is allowed 50 | func (w *IngressConfigWebhook) Authorized(request admissionctl.Request) (ret admissionctl.Response) { 51 | ret = admissionctl.Denied("Only privileged service accounts may access") 52 | ret.UID = request.AdmissionRequest.UID 53 | 54 | // allow if modified by an allowlist-ed service account 55 | for _, group := range request.UserInfo.Groups { 56 | if privilegedServiceAccountsRe.Match([]byte(group)) { 57 | ret = admissionctl.Allowed("Privileged service accounts may access") 58 | ret.UID = request.AdmissionRequest.UID 59 | } 60 | } 61 | 62 | // allow if modified by an allowliste-ed user 63 | if privilegedUsersRe.Match([]byte(request.UserInfo.Username)) { 64 | ret = admissionctl.Allowed("Privileged service accounts may access") 65 | ret.UID = request.AdmissionRequest.UID 66 | } 67 | 68 | return 69 | } 70 | 71 | // GetURI returns the URI for the webhook 72 | func (w *IngressConfigWebhook) GetURI() string { return "/ingressconfig-validation" } 73 | 74 | // Validate will validate the incoming request 75 | func (w *IngressConfigWebhook) Validate(req admissionctl.Request) bool { 76 | valid := true 77 | valid = valid && (req.UserInfo.Username != "") 78 | valid = valid && (req.Kind.Kind == "Ingress") 79 | 80 | return valid 81 | } 82 | 83 | // Name is the name of the webhook 84 | func (w *IngressConfigWebhook) Name() string { return WebhookName } 85 | 86 | // FailurePolicy is how the hook config should react if k8s can't access it 87 | func (w *IngressConfigWebhook) FailurePolicy() admissionregv1.FailurePolicyType { 88 | return admissionregv1.Ignore 89 | } 90 | 91 | // MatchPolicy mirrors validatingwebhookconfiguration.webhooks[].matchPolicy 92 | // If it is important to the webhook, be sure to check subResource vs 93 | // requestSubResource. 94 | func (w *IngressConfigWebhook) MatchPolicy() admissionregv1.MatchPolicyType { 95 | return admissionregv1.Equivalent 96 | } 97 | 98 | // Rules is a slice of rules on which this hook should trigger 99 | func (w *IngressConfigWebhook) Rules() []admissionregv1.RuleWithOperations { return rules } 100 | 101 | // ObjectSelector uses a *metav1.LabelSelector to augment the webhook's 102 | // Rules() to match only on incoming requests which match the specific 103 | // LabelSelector. 104 | func (w *IngressConfigWebhook) ObjectSelector() *metav1.LabelSelector { return nil } 105 | 106 | // SideEffects are what side effects, if any, this hook has. Refer to 107 | // https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#side-effects 108 | func (w *IngressConfigWebhook) SideEffects() admissionregv1.SideEffectClass { 109 | return admissionregv1.SideEffectClassNone 110 | } 111 | 112 | // TimeoutSeconds returns an int32 representing how long to wait for this hook to complete 113 | func (w *IngressConfigWebhook) TimeoutSeconds() int32 { return 2 } 114 | 115 | // Doc returns a string for end-customer documentation purposes. 116 | func (w *IngressConfigWebhook) Doc() string { return docString } 117 | 118 | // SyncSetLabelSelector returns the label selector to use in the SyncSet. 119 | // Return utils.DefaultLabelSelector() to stick with the default 120 | func (w *IngressConfigWebhook) SyncSetLabelSelector() metav1.LabelSelector { 121 | return utils.DefaultLabelSelector() 122 | } 123 | 124 | func (w *IngressConfigWebhook) ClassicEnabled() bool { return true } 125 | 126 | // HypershiftEnabled will return boolean value for hypershift enabled configurations 127 | func (w *IngressConfigWebhook) HypershiftEnabled() bool { return true } 128 | 129 | // NewWebhook creates a new webhook 130 | func NewWebhook() *IngressConfigWebhook { 131 | scheme := runtime.NewScheme() 132 | err := admissionv1.AddToScheme(scheme) 133 | if err != nil { 134 | log.Error(err, "Fail adding admissionsv1 scheme to IngressConfigWebhook") 135 | os.Exit(1) 136 | } 137 | 138 | err = corev1.AddToScheme(scheme) 139 | if err != nil { 140 | log.Error(err, "Fail adding corev1 scheme to IngressConfigWebhook") 141 | os.Exit(1) 142 | } 143 | 144 | return &IngressConfigWebhook{ 145 | s: *scheme, 146 | } 147 | } 148 | -------------------------------------------------------------------------------- /pkg/webhooks/hiveownership/hiveownership_test.go: -------------------------------------------------------------------------------- 1 | package hiveownership 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "reflect" 7 | "testing" 8 | 9 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/testutils" 10 | 11 | admissionv1 "k8s.io/api/admission/v1" 12 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 13 | "k8s.io/apimachinery/pkg/runtime" 14 | ) 15 | 16 | type hiveOwnershipTestSuites struct { 17 | testName string 18 | testID string 19 | username string 20 | userGroups []string 21 | oldObject *runtime.RawExtension 22 | operation admissionv1.Operation 23 | labels map[string]string 24 | shouldBeAllowed bool 25 | } 26 | 27 | const testObjectRaw string = `{ 28 | "metadata": { 29 | "name": "%s", 30 | "uid": "%s", 31 | "creationTimestamp": "2020-05-10T07:51:00Z", 32 | "labels": %s 33 | }, 34 | "users": null 35 | }` 36 | 37 | // labelsMapToString is a helper to turn a map into a JSON fragment to be 38 | // inserted into the testNamespaceRaw const. See createRawJSONString. 39 | func labelsMapToString(labels map[string]string) string { 40 | ret, _ := json.Marshal(labels) 41 | return string(ret) 42 | } 43 | 44 | func createRawJSONString(name, uid string, labels map[string]string) string { 45 | return fmt.Sprintf(testObjectRaw, name, uid, labelsMapToString(labels)) 46 | } 47 | func createOldObject(name, uid string, labels map[string]string) *runtime.RawExtension { 48 | return &runtime.RawExtension{ 49 | Raw: []byte(createRawJSONString(name, uid, labels)), 50 | } 51 | } 52 | 53 | func runTests(t *testing.T, tests []hiveOwnershipTestSuites) { 54 | gvk := metav1.GroupVersionKind{ 55 | Group: "quota.openshift.io", 56 | Version: "v1", 57 | Kind: "ClusterResourceQuota", 58 | } 59 | gvr := metav1.GroupVersionResource{ 60 | Group: "quota.openshift.io", 61 | Version: "v1", 62 | Resource: "clusterresourcequotas", 63 | } 64 | 65 | for _, test := range tests { 66 | obj := createOldObject(test.testName, test.testID, test.labels) 67 | hook := NewWebhook() 68 | httprequest, err := testutils.CreateHTTPRequest(hook.GetURI(), 69 | test.testID, 70 | gvk, gvr, test.operation, test.username, test.userGroups, "", obj, test.oldObject) 71 | if err != nil { 72 | t.Fatalf("Expected no error, got %s", err.Error()) 73 | } 74 | 75 | response, err := testutils.SendHTTPRequest(httprequest, hook) 76 | if err != nil { 77 | t.Fatalf("Expected no error, got %s", err.Error()) 78 | } 79 | if response.UID == "" { 80 | t.Fatalf("No tracking UID associated with the response: %+v", response) 81 | } 82 | 83 | if response.Allowed != test.shouldBeAllowed { 84 | t.Fatalf("Mismatch: %s (groups=%s) %s %s. Test's expectation is that the user %s", 85 | test.username, test.userGroups, 86 | testutils.CanCanNot(response.Allowed), string(test.operation), 87 | testutils.CanCanNot(test.shouldBeAllowed)) 88 | } 89 | } 90 | } 91 | 92 | func TestThing(t *testing.T) { 93 | tests := []hiveOwnershipTestSuites{ 94 | { 95 | testID: "kube-admin-test", 96 | username: "kube:admin", 97 | userGroups: []string{"kube:system", "system:authenticated", "system:authenticated:oauth"}, 98 | operation: admissionv1.Create, 99 | shouldBeAllowed: true, 100 | }, 101 | { 102 | testID: "kube-admin-test", 103 | username: "backplane-cluster-admin", 104 | userGroups: []string{"system:authenticated", "system:authenticated:oauth"}, 105 | operation: admissionv1.Create, 106 | shouldBeAllowed: true, 107 | }, 108 | { 109 | testID: "sre-test", 110 | username: "sre-foo@redhat.com", 111 | userGroups: []string{adminGroups[0], "system:authenticated", "system:authenticated:oauth"}, 112 | operation: admissionv1.Update, 113 | shouldBeAllowed: true, 114 | }, 115 | { 116 | // dedicated-admin users. This should be blocked as making changes as CU on clusterresourcequota which are managed are prohibited. 117 | testID: "dedicated-admin-test", 118 | username: "bob@foo.com", 119 | userGroups: []string{"dedicated-admins", "system:authenticated", "system:authenticated:oauth"}, 120 | operation: admissionv1.Update, 121 | labels: map[string]string{"hive.openshift.io/managed": "true"}, 122 | shouldBeAllowed: false, 123 | }, 124 | { 125 | // no special privileges, only an authenticated user. This should be blocked as making changes on clusterresourcequota which are managed are prohibited. 126 | testID: "unpriv-update-test", 127 | username: "unpriv-user", 128 | userGroups: []string{"system:authenticated", "system:authenticated:oauth"}, 129 | operation: admissionv1.Update, 130 | labels: map[string]string{"hive.openshift.io/managed": "true"}, 131 | shouldBeAllowed: false, 132 | }, 133 | } 134 | runTests(t, tests) 135 | } 136 | 137 | func TestBadRequests(t *testing.T) { 138 | t.Skip() 139 | } 140 | 141 | func TestName(t *testing.T) { 142 | if NewWebhook().Name() == "" { 143 | t.Fatalf("Empty hook name") 144 | } 145 | } 146 | 147 | func TestRules(t *testing.T) { 148 | if len(NewWebhook().Rules()) == 0 { 149 | t.Log("No rules for this webhook?") 150 | } 151 | } 152 | 153 | func TestGetURI(t *testing.T) { 154 | if NewWebhook().GetURI()[0] != '/' { 155 | t.Fatalf("Hook URI does not begin with a /") 156 | } 157 | } 158 | 159 | func TestObjectSelector(t *testing.T) { 160 | obj := &metav1.LabelSelector{ 161 | MatchLabels: map[string]string{ 162 | "hive.openshift.io/managed": "true", 163 | }, 164 | } 165 | 166 | if !reflect.DeepEqual(NewWebhook().ObjectSelector(), obj) { 167 | t.Fatalf("hive managed resources label name is not correct.") 168 | } 169 | } 170 | -------------------------------------------------------------------------------- /pkg/webhooks/hcpnamespace/hcpnamespace_test.go: -------------------------------------------------------------------------------- 1 | package hcpnamespace 2 | 3 | import ( 4 | "testing" 5 | 6 | admissionv1 "k8s.io/api/admission/v1" 7 | authenticationv1 "k8s.io/api/authentication/v1" 8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | admissionctl "sigs.k8s.io/controller-runtime/pkg/webhook/admission" 10 | ) 11 | 12 | func TestAuthorized(t *testing.T) { 13 | tests := []struct { 14 | name string 15 | username string 16 | namespace string 17 | operation admissionv1.Operation 18 | shouldBeAllowed bool 19 | }{ 20 | { 21 | name: "Allowed user can delete protected namespace", 22 | username: "system:admin", 23 | namespace: "ocm-staging-test", 24 | operation: admissionv1.Delete, 25 | shouldBeAllowed: true, 26 | }, 27 | { 28 | name: "Klusterlet SA can delete protected namespace", 29 | username: "system:serviceaccount:open-cluster-management-agent:klusterlet-work-sa", 30 | namespace: "klusterlet-test", 31 | operation: admissionv1.Delete, 32 | shouldBeAllowed: true, 33 | }, 34 | { 35 | name: "Hypershift operator can delete protected namespace", 36 | username: "system:serviceaccount:hypershift:operator", 37 | namespace: "hs-mc-test", 38 | operation: admissionv1.Delete, 39 | shouldBeAllowed: true, 40 | }, 41 | { 42 | name: "Random user cannot delete protected namespace", 43 | username: "unknown-user", 44 | namespace: "ocm-staging-test", 45 | operation: admissionv1.Delete, 46 | shouldBeAllowed: false, 47 | }, 48 | { 49 | name: "Random user can delete unprotected namespace", 50 | username: "unknown-user", 51 | namespace: "test-namespace", 52 | operation: admissionv1.Delete, 53 | shouldBeAllowed: true, 54 | }, 55 | { 56 | name: "Non-DELETE operation should be allowed on protected namespace", 57 | username: "unknown-user", 58 | namespace: "ocm-production-test", 59 | operation: admissionv1.Update, 60 | shouldBeAllowed: true, 61 | }, 62 | { 63 | name: "Klusterlet SA can delete protected namespace", 64 | username: "system:serviceaccount:open-cluster-management-agent:klusterlet", 65 | namespace: "ocm-integration-test", 66 | operation: admissionv1.Delete, 67 | shouldBeAllowed: true, 68 | }, 69 | } 70 | 71 | for _, test := range tests { 72 | t.Run(test.name, func(t *testing.T) { 73 | webhook := NewWebhook() 74 | request := admissionctl.Request{ 75 | AdmissionRequest: admissionv1.AdmissionRequest{ 76 | Name: test.namespace, 77 | UserInfo: authenticationv1.UserInfo{ 78 | Username: test.username, 79 | }, 80 | Operation: test.operation, 81 | }, 82 | } 83 | 84 | response := webhook.Authorized(request) 85 | if response.Allowed != test.shouldBeAllowed { 86 | t.Errorf("Unexpected response. Got %v, expected %v", response.Allowed, test.shouldBeAllowed) 87 | } 88 | }) 89 | } 90 | } 91 | 92 | func TestName(t *testing.T) { 93 | webhook := NewWebhook() 94 | if webhook.Name() != WebhookName { 95 | t.Errorf("Expected webhook name to be %s, got %s", WebhookName, webhook.Name()) 96 | } 97 | } 98 | 99 | func TestGetURI(t *testing.T) { 100 | webhook := NewWebhook() 101 | uri := webhook.GetURI() 102 | if uri[0] != '/' { 103 | t.Errorf("Expected URI to start with '/', got %s", uri) 104 | } 105 | if uri != "/hcpnamespace-validation" { 106 | t.Errorf("Expected URI to be /hcpnamespace-validation, got %s", uri) 107 | } 108 | } 109 | 110 | func TestRules(t *testing.T) { 111 | webhook := NewWebhook() 112 | rules := webhook.Rules() 113 | if len(rules) == 0 { 114 | t.Fatal("Expected at least one rule") 115 | } 116 | } 117 | 118 | func TestDoc(t *testing.T) { 119 | webhook := NewWebhook() 120 | doc := webhook.Doc() 121 | if doc == "" { 122 | t.Error("Expected non-empty documentation string") 123 | } 124 | } 125 | 126 | func TestTimeoutSeconds(t *testing.T) { 127 | webhook := NewWebhook() 128 | timeout := webhook.TimeoutSeconds() 129 | if timeout != 2 { 130 | t.Errorf("Expected timeout to be 2, got %d", timeout) 131 | } 132 | } 133 | 134 | func TestValidate(t *testing.T) { 135 | tests := []struct { 136 | name string 137 | request admissionctl.Request 138 | expected bool 139 | }{ 140 | { 141 | name: "Valid request", 142 | request: admissionctl.Request{ 143 | AdmissionRequest: admissionv1.AdmissionRequest{ 144 | UserInfo: authenticationv1.UserInfo{ 145 | Username: "test-user", 146 | }, 147 | Kind: metav1.GroupVersionKind{ 148 | Kind: "Namespace", 149 | }, 150 | }, 151 | }, 152 | expected: true, 153 | }, 154 | { 155 | name: "Invalid request without username", 156 | request: admissionctl.Request{ 157 | AdmissionRequest: admissionv1.AdmissionRequest{ 158 | UserInfo: authenticationv1.UserInfo{ 159 | Username: "", 160 | }, 161 | Kind: metav1.GroupVersionKind{ 162 | Kind: "Namespace", 163 | }, 164 | }, 165 | }, 166 | expected: false, 167 | }, 168 | { 169 | name: "Invalid request with wrong kind", 170 | request: admissionctl.Request{ 171 | AdmissionRequest: admissionv1.AdmissionRequest{ 172 | UserInfo: authenticationv1.UserInfo{ 173 | Username: "test-user", 174 | }, 175 | Kind: metav1.GroupVersionKind{ 176 | Kind: "Pod", 177 | }, 178 | }, 179 | }, 180 | expected: false, 181 | }, 182 | } 183 | 184 | for _, test := range tests { 185 | t.Run(test.name, func(t *testing.T) { 186 | webhook := NewWebhook() 187 | result := webhook.Validate(test.request) 188 | if result != test.expected { 189 | t.Errorf("Expected %v, got %v", test.expected, result) 190 | } 191 | }) 192 | } 193 | } 194 | -------------------------------------------------------------------------------- /pkg/webhooks/manifestworks/manifestworks.go: -------------------------------------------------------------------------------- 1 | package manifestworks 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "slices" 7 | "sync" 8 | 9 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/utils" 10 | admissionv1 "k8s.io/api/admission/v1" 11 | admissionregv1 "k8s.io/api/admissionregistration/v1" 12 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 13 | "k8s.io/apimachinery/pkg/runtime" 14 | logf "sigs.k8s.io/controller-runtime/pkg/log" 15 | admissionctl "sigs.k8s.io/controller-runtime/pkg/webhook/admission" 16 | ) 17 | 18 | const ( 19 | WebhookName string = "manifestworks-validation" 20 | docString string = "Validates ManifestWorks deletion operations are only performed by authorized service accounts" 21 | ) 22 | 23 | var ( 24 | // List of service accounts allowed to delete ManifestWorks 25 | allowedServiceAccounts = []string{ 26 | "system:serviceaccount:ocm:ocm", 27 | "system:serviceaccount:kube-system:generic-garbage-collector", 28 | "system:serviceaccount:multicluster-engine:ocm-foundation-sa", 29 | "system:serviceaccount:multicluster-hub:grc-policy-addon-sa", 30 | "system:serviceaccount:multicluster-engine:managedcluster-import-controller-v2", 31 | "system:serviceaccount:kube-system:namespace-controller", 32 | } 33 | 34 | scope = admissionregv1.NamespacedScope 35 | rules = []admissionregv1.RuleWithOperations{ 36 | { 37 | Operations: []admissionregv1.OperationType{"DELETE"}, 38 | Rule: admissionregv1.Rule{ 39 | APIGroups: []string{"work.open-cluster-management.io"}, 40 | APIVersions: []string{"*"}, 41 | Resources: []string{"manifestworks"}, 42 | Scope: &scope, 43 | }, 44 | }, 45 | } 46 | log = logf.Log.WithName(WebhookName) 47 | ) 48 | 49 | // ManifestWorksWebhook validates ManifestWorks deletion operations 50 | type ManifestWorksWebhook struct { 51 | mu sync.Mutex 52 | s runtime.Scheme 53 | } 54 | 55 | // ObjectSelector implements Webhook interface 56 | func (s *ManifestWorksWebhook) ObjectSelector() *metav1.LabelSelector { return nil } 57 | 58 | func (s *ManifestWorksWebhook) Doc() string { 59 | return fmt.Sprintf(docString) 60 | } 61 | 62 | // TimeoutSeconds implements Webhook interface 63 | func (s *ManifestWorksWebhook) TimeoutSeconds() int32 { return 2 } 64 | 65 | // MatchPolicy implements Webhook interface 66 | func (s *ManifestWorksWebhook) MatchPolicy() admissionregv1.MatchPolicyType { 67 | return admissionregv1.Equivalent 68 | } 69 | 70 | // Name implements Webhook interface 71 | func (s *ManifestWorksWebhook) Name() string { return WebhookName } 72 | 73 | // FailurePolicy implements Webhook interface 74 | func (s *ManifestWorksWebhook) FailurePolicy() admissionregv1.FailurePolicyType { 75 | return admissionregv1.Ignore 76 | } 77 | 78 | // Rules implements Webhook interface 79 | func (s *ManifestWorksWebhook) Rules() []admissionregv1.RuleWithOperations { return rules } 80 | 81 | // GetURI implements Webhook interface 82 | func (s *ManifestWorksWebhook) GetURI() string { return "/manifestworks-validation" } 83 | 84 | // SideEffects implements Webhook interface 85 | func (s *ManifestWorksWebhook) SideEffects() admissionregv1.SideEffectClass { 86 | return admissionregv1.SideEffectClassNone 87 | } 88 | 89 | // Validate - Make sure we're working with a well-formed Admission Request object 90 | func (s *ManifestWorksWebhook) Validate(req admissionctl.Request) bool { 91 | valid := true 92 | valid = valid && (req.UserInfo.Username != "") 93 | valid = valid && (req.Kind.Kind == "ManifestWork") 94 | valid = valid && (req.Kind.Group == "work.open-cluster-management.io") 95 | 96 | return valid 97 | } 98 | 99 | // Authorized implements Webhook interface 100 | func (s *ManifestWorksWebhook) Authorized(request admissionctl.Request) admissionctl.Response { 101 | return s.authorized(request) 102 | } 103 | 104 | // Is the request authorized? 105 | func (s *ManifestWorksWebhook) authorized(request admissionctl.Request) admissionctl.Response { 106 | var ret admissionctl.Response 107 | 108 | // Check if the requesting user is in the list of allowed service accounts 109 | if slices.Contains(allowedServiceAccounts, request.UserInfo.Username) { 110 | ret = admissionctl.Allowed("Service account is authorized to delete ManifestWork resources") 111 | ret.UID = request.AdmissionRequest.UID 112 | return ret 113 | } 114 | 115 | // If not a delete operation, allow it 116 | if request.Operation != admissionv1.Delete { 117 | ret = admissionctl.Allowed("Only DELETE operations are restricted") 118 | ret.UID = request.AdmissionRequest.UID 119 | return ret 120 | } 121 | 122 | // Deny all other requests 123 | log.Info("Unauthorized attempt to delete ManifestWork", 124 | "user", request.UserInfo.Username, 125 | "groups", request.UserInfo.Groups) 126 | 127 | ret = admissionctl.Denied(fmt.Sprintf("Only authorized service accounts can delete ManifestWork resources. Allowed service accounts: %v", allowedServiceAccounts)) 128 | ret.UID = request.AdmissionRequest.UID 129 | return ret 130 | } 131 | 132 | // SyncSetLabelSelector returns the label selector to use in the SyncSet. 133 | func (s *ManifestWorksWebhook) SyncSetLabelSelector() metav1.LabelSelector { 134 | customLabelSelector := utils.DefaultLabelSelector() 135 | customLabelSelector.MatchExpressions = append(customLabelSelector.MatchExpressions, 136 | metav1.LabelSelectorRequirement{ 137 | Key: "ext-hypershift.openshift.io/cluster-type", 138 | Operator: metav1.LabelSelectorOpIn, 139 | Values: []string{"service-cluster"}, 140 | }) 141 | return customLabelSelector 142 | } 143 | 144 | func (s *ManifestWorksWebhook) ClassicEnabled() bool { return true } 145 | 146 | func (s *ManifestWorksWebhook) HypershiftEnabled() bool { return false } 147 | 148 | // NewWebhook creates a new webhook 149 | func NewWebhook() *ManifestWorksWebhook { 150 | scheme := runtime.NewScheme() 151 | err := admissionv1.AddToScheme(scheme) 152 | if err != nil { 153 | log.Error(err, "Fail adding admissionsv1 scheme to ManifestWorksWebhook") 154 | os.Exit(1) 155 | } 156 | return &ManifestWorksWebhook{ 157 | s: *scheme, 158 | } 159 | } 160 | -------------------------------------------------------------------------------- /pkg/webhooks/scc/scc_test.go: -------------------------------------------------------------------------------- 1 | package scc 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | 7 | admissionv1 "k8s.io/api/admission/v1" 8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | 10 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/testutils" 11 | 12 | "k8s.io/apimachinery/pkg/runtime" 13 | ) 14 | 15 | type sccTestSuites struct { 16 | testID string 17 | targetSCC string 18 | username string 19 | operation admissionv1.Operation 20 | userGroups []string 21 | shouldBeAllowed bool 22 | } 23 | 24 | const testObjectRaw string = ` 25 | { 26 | "apiVersion": "security.openshift.io/v1", 27 | "kind": "SecurityContextConstraints", 28 | "metadata": { 29 | "name": "%s", 30 | "uid": "1234" 31 | } 32 | }` 33 | 34 | func createRawJSONString(name string) string { 35 | s := fmt.Sprintf(testObjectRaw, name) 36 | return s 37 | } 38 | 39 | func runSCCTests(t *testing.T, tests []sccTestSuites) { 40 | gvk := metav1.GroupVersionKind{ 41 | Group: "security.openshift.io", 42 | Version: "v1", 43 | Kind: "SecurityContextConstraints", 44 | } 45 | gvr := metav1.GroupVersionResource{ 46 | Group: "security.openshift.io", 47 | Version: "v1", 48 | Resource: "securitycontextcontraints", 49 | } 50 | 51 | for _, test := range tests { 52 | rawObjString := createRawJSONString(test.targetSCC) 53 | 54 | obj := runtime.RawExtension{ 55 | Raw: []byte(rawObjString), 56 | } 57 | 58 | oldObj := runtime.RawExtension{ 59 | Raw: []byte(rawObjString), 60 | } 61 | 62 | hook := NewWebhook() 63 | httprequest, err := testutils.CreateHTTPRequest(hook.GetURI(), 64 | test.testID, gvk, gvr, test.operation, test.username, test.userGroups, "", &obj, &oldObj) 65 | if err != nil { 66 | t.Fatalf("Expected no error, got %s", err.Error()) 67 | } 68 | 69 | response, err := testutils.SendHTTPRequest(httprequest, hook) 70 | if err != nil { 71 | t.Fatalf("Expected no error, got %s", err.Error()) 72 | } 73 | if response.UID == "" { 74 | t.Fatalf("No tracking UID associated with the response.") 75 | } 76 | 77 | if response.Allowed != test.shouldBeAllowed { 78 | t.Fatalf("Mismatch: %s (groups=%s) %s %s the scc. Test's expectation is that the user %s", test.username, test.userGroups, testutils.CanCanNot(response.Allowed), test.operation, testutils.CanCanNot(test.shouldBeAllowed)) 79 | } 80 | } 81 | } 82 | func TestUser(t *testing.T) { 83 | tests := []sccTestSuites{ 84 | { 85 | targetSCC: "hostnetwork", 86 | testID: "user-cant-delete-hostnetwork", 87 | username: "user1", 88 | operation: admissionv1.Delete, 89 | userGroups: []string{"system:authenticated", "system:authenticated:oauth"}, 90 | shouldBeAllowed: false, 91 | }, 92 | { 93 | targetSCC: "hostaccess", 94 | testID: "user-cant-delete-hostaccess", 95 | username: "user2", 96 | operation: admissionv1.Delete, 97 | userGroups: []string{"system:authenticated", "system:authenticated:oauth"}, 98 | shouldBeAllowed: false, 99 | }, 100 | { 101 | targetSCC: "anyuid", 102 | testID: "user-cant-delete-anyuid", 103 | username: "user3", 104 | operation: admissionv1.Delete, 105 | userGroups: []string{"system:authenticated", "system:authenticated:oauth"}, 106 | shouldBeAllowed: false, 107 | }, 108 | { 109 | targetSCC: "anyuid", 110 | testID: "user-cant-modify-hostnetwork", 111 | username: "user4", 112 | operation: admissionv1.Update, 113 | userGroups: []string{"system:authenticated", "system:authenticated:oauth"}, 114 | shouldBeAllowed: false, 115 | }, 116 | { 117 | targetSCC: "hostnetwork-v2", 118 | testID: "user-cant-delete-hostnetwork-v2", 119 | username: "user1", 120 | operation: admissionv1.Delete, 121 | userGroups: []string{"system:authenticated", "system:authenticated:oauth"}, 122 | shouldBeAllowed: false, 123 | }, 124 | { 125 | targetSCC: "testscc", 126 | testID: "user-can-modify-normal", 127 | username: "user1", 128 | operation: admissionv1.Update, 129 | userGroups: []string{"system:authenticated", "system:authenticated:oauth"}, 130 | shouldBeAllowed: true, 131 | }, 132 | { 133 | targetSCC: "hostaccess", 134 | testID: "allowed-user-can-modify-default", 135 | username: "system:serviceaccount:openshift-monitoring:cluster-monitoring-operator", 136 | operation: admissionv1.Update, 137 | userGroups: []string{"system:authenticated", "system:authenticated:oauth"}, 138 | shouldBeAllowed: true, 139 | }, 140 | { 141 | targetSCC: "hostaccess", 142 | testID: "allowed-system-admin-can-modify-default", 143 | username: "system:admin", 144 | operation: admissionv1.Update, 145 | userGroups: []string{"system:authenticated", "system:authenticated:oauth"}, 146 | shouldBeAllowed: true, 147 | }, 148 | { 149 | targetSCC: "testscc", 150 | testID: "user-can-delete-normal", 151 | username: "user1", 152 | operation: admissionv1.Delete, 153 | userGroups: []string{"system:authenticated", "system:authenticated:oauth"}, 154 | shouldBeAllowed: true, 155 | }, 156 | { 157 | targetSCC: "hostaccess", 158 | testID: "allowed-user-can-delete-default", 159 | username: "system:serviceaccount:openshift-monitoring:cluster-monitoring-operator", 160 | operation: admissionv1.Delete, 161 | userGroups: []string{"system:authenticated", "system:authenticated:oauth"}, 162 | shouldBeAllowed: true, 163 | }, 164 | { 165 | targetSCC: "privileged", 166 | testID: "osde2e-serviceaccounts-are-not-allowed", 167 | username: "system:serviceaccount:osde2e-abcde:osde2e-runner", 168 | operation: admissionv1.Update, 169 | userGroups: []string{"system:authenticated", "system:serviceaccounts:osde2e-abcde"}, 170 | shouldBeAllowed: false, 171 | }, 172 | { 173 | targetSCC: "anyuid", 174 | testID: "kube-apiserver-operator-allowed", 175 | username: "system:serviceaccount:openshift-kube-apiserver-operator:kube-apiserver-operator", 176 | operation: admissionv1.Update, 177 | userGroups: []string{}, 178 | shouldBeAllowed: true, 179 | }, 180 | } 181 | runSCCTests(t, tests) 182 | } 183 | -------------------------------------------------------------------------------- /pkg/webhooks/hostedcontrolplane/hostedcontrolplane.go: -------------------------------------------------------------------------------- 1 | package hostedcontrolplane 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "slices" 7 | "strings" 8 | 9 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/utils" 10 | admissionv1 "k8s.io/api/admission/v1" 11 | admissionregv1 "k8s.io/api/admissionregistration/v1" 12 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 13 | "k8s.io/apimachinery/pkg/runtime" 14 | logf "sigs.k8s.io/controller-runtime/pkg/log" 15 | admissionctl "sigs.k8s.io/controller-runtime/pkg/webhook/admission" 16 | ) 17 | 18 | const ( 19 | WebhookName string = "hostedcontrolplane-validation" 20 | docString string = "Validates HostedControlPlane deletion operations are only performed by authorized service accounts" 21 | ) 22 | 23 | var ( 24 | // Full usernames of Service accounts allowed to delete HostedControlPlanes 25 | allowedServiceAccountsUsernames = []string{ 26 | "system:serviceaccount:open-cluster-management-agent:klusterlet-work-sa", 27 | "system:serviceaccount:kube-system:generic-garbage-collector", 28 | "system:serviceaccount:hypershift:operator", 29 | } 30 | // Names of Service accounts allowed to delete HostedControlPlanes 31 | allowedServiceAccountsNames = []string{ 32 | "cluster-api", 33 | "control-plane-pki-operator", 34 | } 35 | 36 | scope = admissionregv1.NamespacedScope 37 | rules = []admissionregv1.RuleWithOperations{ 38 | { 39 | Operations: []admissionregv1.OperationType{"DELETE"}, 40 | Rule: admissionregv1.Rule{ 41 | APIGroups: []string{"hypershift.openshift.io"}, 42 | APIVersions: []string{"*"}, 43 | Resources: []string{"hostedcontrolplanes"}, 44 | Scope: &scope, 45 | }, 46 | }, 47 | } 48 | log = logf.Log.WithName(WebhookName) 49 | ) 50 | 51 | // HostedControlPlaneWebhook validates HostedControlPlane deletion operations 52 | type HostedControlPlaneWebhook struct { 53 | s runtime.Scheme 54 | } 55 | 56 | // ObjectSelector implements Webhook interface 57 | func (s *HostedControlPlaneWebhook) ObjectSelector() *metav1.LabelSelector { return nil } 58 | 59 | func (s *HostedControlPlaneWebhook) Doc() string { 60 | return fmt.Sprintf(docString) 61 | } 62 | 63 | // TimeoutSeconds implements Webhook interface 64 | func (s *HostedControlPlaneWebhook) TimeoutSeconds() int32 { return 2 } 65 | 66 | // MatchPolicy implements Webhook interface 67 | func (s *HostedControlPlaneWebhook) MatchPolicy() admissionregv1.MatchPolicyType { 68 | return admissionregv1.Equivalent 69 | } 70 | 71 | // Name implements Webhook interface 72 | func (s *HostedControlPlaneWebhook) Name() string { return WebhookName } 73 | 74 | // FailurePolicy implements Webhook interface 75 | func (s *HostedControlPlaneWebhook) FailurePolicy() admissionregv1.FailurePolicyType { 76 | return admissionregv1.Ignore 77 | } 78 | 79 | // Rules implements Webhook interface 80 | func (s *HostedControlPlaneWebhook) Rules() []admissionregv1.RuleWithOperations { return rules } 81 | 82 | // GetURI implements Webhook interface 83 | func (s *HostedControlPlaneWebhook) GetURI() string { return "/hostedcontrolplane-validation" } 84 | 85 | // SideEffects implements Webhook interface 86 | func (s *HostedControlPlaneWebhook) SideEffects() admissionregv1.SideEffectClass { 87 | return admissionregv1.SideEffectClassNone 88 | } 89 | 90 | // Validate - Make sure we're working with a well-formed Admission Request object 91 | func (s *HostedControlPlaneWebhook) Validate(req admissionctl.Request) bool { 92 | valid := true 93 | valid = valid && (req.UserInfo.Username != "") 94 | valid = valid && (req.Kind.Kind == "HostedControlPlane") 95 | valid = valid && (req.Kind.Group == "hypershift.openshift.io") 96 | 97 | return valid 98 | } 99 | 100 | // Authorized implements Webhook interface 101 | func (s *HostedControlPlaneWebhook) Authorized(request admissionctl.Request) admissionctl.Response { 102 | return s.authorized(request) 103 | } 104 | 105 | // Is the request authorized? 106 | func (s *HostedControlPlaneWebhook) authorized(request admissionctl.Request) admissionctl.Response { 107 | var ret admissionctl.Response 108 | 109 | // Allow authorized service accounts 110 | for _, sa := range allowedServiceAccountsUsernames { 111 | if request.UserInfo.Username == sa { 112 | ret = admissionctl.Allowed("Service account is authorized to delete HostedControlPlane resources") 113 | ret.UID = request.AdmissionRequest.UID 114 | return ret 115 | } 116 | } 117 | 118 | saName := strings.Split(request.UserInfo.Username, ":") 119 | if len(saName) > 0 && slices.Contains(allowedServiceAccountsNames, saName[len(saName)-1]) { 120 | ret = admissionctl.Allowed("Service account is authorized to delete HostedControlPlane resources") 121 | ret.UID = request.AdmissionRequest.UID 122 | return ret 123 | } 124 | 125 | // If not a delete operation, allow it 126 | if request.Operation != admissionv1.Delete { 127 | ret = admissionctl.Allowed("Only DELETE operations are restricted") 128 | ret.UID = request.AdmissionRequest.UID 129 | return ret 130 | } 131 | 132 | // Deny all other delete attempts 133 | log.Info("Unauthorized attempt to delete HostedControlPlane", 134 | "user", request.UserInfo.Username, 135 | "groups", request.UserInfo.Groups) 136 | 137 | ret = admissionctl.Denied(fmt.Sprintf("Only authorized service accounts %s can delete HostedControlPlane resources", strings.Join(append(allowedServiceAccountsUsernames, allowedServiceAccountsNames...), ", "))) 138 | ret.UID = request.AdmissionRequest.UID 139 | return ret 140 | 141 | } 142 | 143 | // SyncSetLabelSelector returns the label selector to use in the SyncSet. 144 | func (s *HostedControlPlaneWebhook) SyncSetLabelSelector() metav1.LabelSelector { 145 | customLabelSelector := utils.DefaultLabelSelector() 146 | customLabelSelector.MatchExpressions = append(customLabelSelector.MatchExpressions, 147 | metav1.LabelSelectorRequirement{ 148 | Key: "ext-hypershift.openshift.io/cluster-type", 149 | Operator: metav1.LabelSelectorOpIn, 150 | Values: []string{"management-cluster"}, 151 | }) 152 | return customLabelSelector 153 | } 154 | 155 | func (s *HostedControlPlaneWebhook) ClassicEnabled() bool { return true } 156 | 157 | func (s *HostedControlPlaneWebhook) HypershiftEnabled() bool { return false } 158 | 159 | // NewWebhook creates a new webhook 160 | func NewWebhook() *HostedControlPlaneWebhook { 161 | scheme := runtime.NewScheme() 162 | err := admissionv1.AddToScheme(scheme) 163 | if err != nil { 164 | log.Error(err, "Fail adding admissionsv1 scheme to HostedControlPlaneWebhook") 165 | os.Exit(1) 166 | } 167 | return &HostedControlPlaneWebhook{ 168 | s: *scheme, 169 | } 170 | } 171 | -------------------------------------------------------------------------------- /pkg/syncset/syncsetbylabelselector.go: -------------------------------------------------------------------------------- 1 | // Package syncset provides a type to map LabelSelectors to arbitrary objects 2 | // and render the minimal set of SelectorSyncSets based on the LabelSelectors. 3 | // The idea is to use it as a replacement for map[metav1.LabelSelector]runtime.RawExtension. 4 | // A map cannot be used because metav1.LabelSelector cannot be used as a key in a map. 5 | // This implementation uses reflect.DeepEqual to compare map keys. 6 | package syncset 7 | 8 | import ( 9 | "encoding/json" 10 | "fmt" 11 | "os" 12 | "reflect" 13 | 14 | admissionregv1 "k8s.io/api/admissionregistration/v1" 15 | v1 "k8s.io/api/apps/v1" 16 | 17 | hivev1 "github.com/openshift/hive/apis/hive/v1" 18 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 19 | "k8s.io/apimachinery/pkg/runtime" 20 | ) 21 | 22 | // SyncSetResourcesByLabelSelector is a mapping data structure. 23 | // It uses metav1.LabelSelector as key and runtime.RawExtension as value. 24 | // The builtin map type cannot be used because metav1.LabelSelector cannot be used as key. 25 | type SyncSetResourcesByLabelSelector struct { 26 | entries []mapEntry 27 | } 28 | 29 | type mapEntry struct { 30 | key metav1.LabelSelector 31 | values []runtime.RawExtension 32 | } 33 | 34 | // Add adds a resources to a SyncSetResourcesByLabelSelector object 35 | func (s *SyncSetResourcesByLabelSelector) Add(key metav1.LabelSelector, object runtime.RawExtension) { 36 | existingEntry := s.Get(key) 37 | 38 | if existingEntry != nil { 39 | existingEntry.values = append(existingEntry.values, object) 40 | return 41 | } 42 | 43 | s.entries = append(s.entries, mapEntry{key, []runtime.RawExtension{object}}) 44 | } 45 | 46 | // Get returns a single entry based on the passed key. If none exists, it returns nil 47 | func (s *SyncSetResourcesByLabelSelector) Get(key metav1.LabelSelector) *mapEntry { 48 | for i, entry := range s.entries { 49 | if reflect.DeepEqual(entry.key, key) { 50 | return &s.entries[i] 51 | } 52 | } 53 | return nil 54 | } 55 | 56 | // RenderSelectorSyncSets renders a minimal set of SelectorSyncSets based on the LabelSelectors 57 | // existing in the SyncSetResourcesByLabelSelector object 58 | func (s *SyncSetResourcesByLabelSelector) RenderSelectorSyncSets(labels map[string]string) []runtime.RawExtension { 59 | sss := []runtime.RawExtension{} 60 | for i, entry := range s.entries { 61 | sss = append(sss, runtime.RawExtension{ 62 | Raw: Encode(createSelectorSyncSet( 63 | fmt.Sprintf("managed-cluster-validating-webhooks-%d", i), 64 | entry.values, 65 | entry.key, 66 | labels, 67 | ), 68 | ), 69 | }) 70 | } 71 | return sss 72 | } 73 | 74 | func createSelectorSyncSet(name string, resources []runtime.RawExtension, selector metav1.LabelSelector, labels map[string]string) *hivev1.SelectorSyncSet { 75 | return &hivev1.SelectorSyncSet{ 76 | TypeMeta: metav1.TypeMeta{ 77 | Kind: "SelectorSyncSet", 78 | APIVersion: "hive.openshift.io/v1", 79 | }, 80 | ObjectMeta: metav1.ObjectMeta{ 81 | Name: name, 82 | Labels: labels, 83 | }, 84 | Spec: hivev1.SelectorSyncSetSpec{ 85 | SyncSetCommonSpec: hivev1.SyncSetCommonSpec{ 86 | ResourceApplyMode: hivev1.SyncResourceApplyMode, 87 | Resources: resources, 88 | }, 89 | ClusterDeploymentSelector: selector, 90 | }, 91 | } 92 | } 93 | 94 | func Encode(obj interface{}) []byte { 95 | o, err := json.Marshal(obj) 96 | if err != nil { 97 | fmt.Printf("Error encoding %+v\n", obj) 98 | os.Exit(1) 99 | } 100 | return o 101 | } 102 | 103 | // This is needed to override the omitempty on serviceAccount and serviceAccountName 104 | // which otherwise means we can't nullify them in the SelectorSyncSet 105 | func EncodeAndFixDaemonset(ds *v1.DaemonSet) ([]byte, error) { 106 | 107 | // Convert to json 108 | o, err := json.Marshal(ds) 109 | 110 | // explicitly set serviceAccount / serviceAccountName to emptystring 111 | var decoded interface{} 112 | json.Unmarshal(o, &decoded) 113 | 114 | // set the serviceAccount/serviceAccountName to emptystring 115 | // only empty-set serviceAccountName if it's not already defined 116 | if len(ds.Spec.Template.Spec.ServiceAccountName) == 0 { 117 | decoded.(map[string]interface{})["spec"].(map[string]interface{})["template"].(map[string]interface{})["spec"].(map[string]interface{})["serviceAccountName"] = "" 118 | } 119 | // serviceAccount is deprecated 120 | decoded.(map[string]interface{})["spec"].(map[string]interface{})["template"].(map[string]interface{})["spec"].(map[string]interface{})["serviceAccount"] = "" 121 | 122 | // convert back to json 123 | r, err := json.Marshal(decoded) 124 | if err != nil { 125 | return nil, fmt.Errorf("Error encoding %+v\n", decoded) 126 | } 127 | return r, nil 128 | 129 | } 130 | 131 | func EncodeValidatingAndFixCA(vw admissionregv1.ValidatingWebhookConfiguration) ([]byte, error) { 132 | 133 | // Get the existing caBundle value 134 | if len(vw.Webhooks) < 1 { 135 | return nil, fmt.Errorf("Require at least one webhook") 136 | } 137 | caBundleValue := string(vw.Webhooks[0].ClientConfig.CABundle) 138 | 139 | // Convert to json 140 | o, err := json.Marshal(vw) 141 | if caBundleValue == "" { 142 | return o, err 143 | } 144 | 145 | // fix broken CABundle setting here 146 | var decoded interface{} 147 | json.Unmarshal(o, &decoded) 148 | 149 | // set the CA 150 | decoded.(map[string]interface{})["webhooks"].([]interface{})[0].(map[string]interface{})["clientConfig"].(map[string]interface{})["caBundle"] = caBundleValue 151 | 152 | // convert back to json 153 | r, err := json.Marshal(decoded) 154 | if err != nil { 155 | return nil, fmt.Errorf("Error encoding %+v\n", decoded) 156 | } 157 | return r, nil 158 | } 159 | 160 | func EncodeMutatingAndFixCA(vw admissionregv1.MutatingWebhookConfiguration) ([]byte, error) { 161 | 162 | // Get the existing caBundle value 163 | if len(vw.Webhooks) < 1 { 164 | return nil, fmt.Errorf("Require at least one webhook") 165 | } 166 | caBundleValue := string(vw.Webhooks[0].ClientConfig.CABundle) 167 | 168 | // Convert to json 169 | o, err := json.Marshal(vw) 170 | if caBundleValue == "" { 171 | return o, err 172 | } 173 | 174 | // fix broken CABundle setting here 175 | var decoded interface{} 176 | json.Unmarshal(o, &decoded) 177 | 178 | // set the CA 179 | decoded.(map[string]interface{})["webhooks"].([]interface{})[0].(map[string]interface{})["clientConfig"].(map[string]interface{})["caBundle"] = caBundleValue 180 | 181 | // convert back to json 182 | r, err := json.Marshal(decoded) 183 | if err != nil { 184 | return nil, fmt.Errorf("Error encoding %+v\n", decoded) 185 | } 186 | return r, nil 187 | } 188 | --------------------------------------------------------------------------------