├── .gitattributes ├── .github ├── dependabot.yml └── renovate.json ├── .gitignore ├── .tekton ├── managed-cluster-validating-webhooks-e2e-pull-request.yaml ├── managed-cluster-validating-webhooks-e2e-push.yaml ├── managed-cluster-validating-webhooks-pull-request.yaml └── managed-cluster-validating-webhooks-push.yaml ├── CLAUDE.md ├── LICENSE ├── Makefile ├── OWNERS ├── OWNERS_ALIASES ├── README.md ├── boilerplate ├── _data │ ├── backing-image-tag │ └── last-boilerplate-commit ├── _lib │ ├── boilerplate-commit │ ├── boilerplate.mk │ ├── common.sh │ ├── container-make │ ├── freeze-check │ ├── release.sh │ ├── subscriber │ ├── subscriber-propose │ ├── subscriber-propose-update │ ├── subscriber-report │ ├── subscriber-report-onboarding │ ├── subscriber-report-pr │ ├── subscriber-report-release │ └── subscriber.sh ├── generated-includes.mk ├── openshift │ └── golang-osd-e2e │ │ ├── OWNERS │ │ ├── README.md │ │ ├── e2e-template.yml │ │ ├── project.mk │ │ ├── standard.mk │ │ └── update ├── update └── update.cfg ├── build ├── Dockerfile ├── bin │ ├── entrypoint │ └── user_setup ├── build_deploy.sh ├── build_push.sh ├── build_push_package.sh ├── pr_check.sh ├── resources.go └── selectorsyncset.yaml ├── cmd ├── fips.go └── main.go ├── config ├── config.go └── package │ ├── managed-cluster-validating-webhooks-package.Containerfile │ ├── manifest.yaml │ └── resources.yaml.gotmpl ├── designs └── validating_admission_policy.md ├── docs ├── hypershift.md ├── webhooks-short.json └── webhooks.json ├── go.mod ├── go.sum ├── hack ├── documentation │ └── document.go ├── templates │ └── 00-managed-cluster-validating-webhooks-hs.SelectorSyncSet.yaml.tmpl └── test.sh ├── pkg ├── config │ ├── config.go │ ├── generate │ │ └── namespaces.go │ └── namespaces.go ├── dispatcher │ └── dispatcher.go ├── helpers │ ├── response.go │ └── response_test.go ├── k8sutil │ └── k8sutil.go ├── localmetrics │ └── localmetrics.go ├── syncset │ └── syncsetbylabelselector.go ├── testutils │ └── testutils.go └── webhooks │ ├── add_clusterlogging.go │ ├── add_clusterrole.go │ ├── add_clusterrolebinding.go │ ├── add_customresourcedefinitions.go │ ├── add_hcpnamespace.go │ ├── add_hiveownership.go │ ├── add_hostedcluster.go │ ├── add_hostedcontrolplane.go │ ├── add_imagecontentpolicies.go │ ├── add_ingressconfig_hook.go │ ├── add_ingresscontroller.go │ ├── add_manifestworks.go │ ├── add_namespace_hook.go │ ├── add_networkpolicy.go │ ├── add_node.go │ ├── add_pod.go │ ├── add_podimagespec.go │ ├── add_prometheusrule.go │ ├── add_regularuser.go │ ├── add_scc.go │ ├── add_sdnmigration.go │ ├── add_service_hook.go │ ├── add_serviceaccount.go │ ├── add_techpreviewnoupgrade.go │ ├── clusterlogging │ ├── clusterlogging.go │ └── clusterlogging_test.go │ ├── clusterrole │ ├── clusterrole.go │ └── clusterrole_test.go │ ├── clusterrolebinding │ ├── clusterrolebinding.go │ └── clusterrolebinding_test.go │ ├── customresourcedefinitions │ ├── customresourcedefinitions.go │ └── customresourcedefinitions_test.go │ ├── hcpnamespace │ ├── hcpnamespace.go │ └── hcpnamespace_test.go │ ├── hiveownership │ ├── hiveownership.go │ └── hiveownership_test.go │ ├── hostedcluster │ ├── hostedcluster.go │ └── hostedcluster_test.go │ ├── hostedcontrolplane │ ├── hostedcontrolplane.go │ └── hostedcontrolplane_test.go │ ├── imagecontentpolicies │ ├── imagecontentpolicies.go │ └── imagecontentpolicies_test.go │ ├── ingressconfig │ ├── ingressconfig.go │ └── ingressconfig_test.go │ ├── ingresscontroller │ ├── ingresscontroller.go │ └── ingresscontroller_test.go │ ├── manifestworks │ ├── manifestworks.go │ └── manifestworks_test.go │ ├── namespace │ ├── namespace.go │ └── namespace_test.go │ ├── networkpolicies │ ├── networkpolicies.go │ └── networkpolicies_test.go │ ├── node │ ├── node.go │ └── node_test.go │ ├── pod │ ├── pod.go │ └── pod_test.go │ ├── podimagespec │ ├── podimagespec.go │ └── podimagespec_test.go │ ├── prometheusrule │ ├── prometheusrule.go │ └── prometheusrule_test.go │ ├── register.go │ ├── regularuser │ └── common │ │ ├── regularuser.go │ │ └── regularuser_test.go │ ├── scc │ ├── scc.go │ └── scc_test.go │ ├── sdnmigration │ ├── sdnmigration.go │ └── sdnmigration_test.go │ ├── service │ ├── service.go │ └── service_test.go │ ├── serviceaccount │ ├── serviceaccount.go │ └── serviceaccount_test.go │ ├── techpreviewnoupgrade │ ├── techpreviewnoupgrade.go │ └── techpreviewnoupgrade_test.go │ └── utils │ ├── utils.go │ └── utils_test.go └── test └── e2e ├── Dockerfile ├── README.md ├── e2e-template.yml ├── validation_webhook_runner_test.go └── validation_webhook_tests.go /.gitattributes: -------------------------------------------------------------------------------- 1 | ### BEGIN BOILERPLATE GENERATED -- DO NOT EDIT ### 2 | ### This block must be the last thing in your ### 3 | ### .gitattributes file; otherwise the 'validate' ### 4 | ### CI check will fail. ### 5 | # Used to ensure nobody mucked with boilerplate files. 6 | boilerplate/_lib/freeze-check linguist-generated=false 7 | # Show the boilerplate commit hash update. It's only one line anyway. 8 | boilerplate/_data/last-boilerplate-commit linguist-generated=false 9 | # Used by freeze-check. Good place for attackers to inject badness. 10 | boilerplate/update linguist-generated=false 11 | # Make sure attackers can't hide changes to this configuration 12 | .gitattributes linguist-generated=false 13 | ### END BOILERPLATE GENERATED ### 14 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "docker" 4 | directory: "/build" 5 | labels: 6 | - "area/dependency" 7 | - "ok-to-test" 8 | schedule: 9 | interval: "weekly" 10 | ignore: 11 | - dependency-name: "app-sre/boilerplate" 12 | # don't upgrade boilerplate via these means 13 | - dependency-name: "openshift/origin-operator-registry" 14 | # don't upgrade origin-operator-registry via these means 15 | -------------------------------------------------------------------------------- /.github/renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "extends": [ 4 | "github>openshift/boilerplate//.github/renovate.json" 5 | ] 6 | } 7 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .coverage 2 | /build/_output 3 | *.out 4 | /coverage.txt 5 | /.vscode 6 | *.code-workspace 7 | .idea 8 | -------------------------------------------------------------------------------- /CLAUDE.md: -------------------------------------------------------------------------------- 1 | # CLAUDE.md 2 | 3 | This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. 4 | 5 | ## Project Overview 6 | 7 | This is a Go-based Kubernetes validating/mutating webhooks framework for OpenShift Dedicated (OSD) and Red Hat OpenShift Service on AWS (ROSA) managed clusters. It implements 20+ webhooks that enforce security policies and operational constraints on managed clusters. 8 | 9 | ## Key Commands 10 | 11 | ```bash 12 | # Development 13 | make test # Run tests and webhook validation 14 | make build # Build binary 15 | make build-image # Build container image 16 | make build-package-image # Build PKO package for HyperShift 17 | 18 | # Resource Generation 19 | make syncset # Generate SelectorSyncSet YAML for Classic clusters 20 | make package # Generate Package Operator resources for HyperShift 21 | make docs # Generate webhook documentation 22 | make generate # Update namespace lists from ConfigMaps 23 | 24 | # Local Testing 25 | make test-webhook WEBHOOK=namespace # Test specific webhook 26 | go test ./pkg/webhooks/namespace/... # Run webhook unit tests 27 | ``` 28 | 29 | ## Architecture Overview 30 | 31 | ### Webhook Framework Design 32 | - **Interface-Based**: All webhooks implement the `Webhook` interface in `pkg/webhooks/register.go` 33 | - **Factory Pattern**: Webhooks register via `init()` functions in `add_*` files 34 | - **Centralized Dispatcher**: Single HTTP server routes requests to webhooks based on URI paths 35 | - **Plugin System**: Each webhook is a self-contained module in `pkg/webhooks/*/` 36 | 37 | ### Deployment Models 38 | - **OSD/ROSA Classic**: Deployed as DaemonSet via SelectorSyncSet to master nodes 39 | - **ROSA HyperShift**: Deployed via Package Operator (PKO) to hosted control plane 40 | 41 | ### Core Components 42 | - **Main Application** (`cmd/main.go`): HTTP server with TLS, metrics, and webhook routing 43 | - **Dispatcher** (`pkg/dispatcher/`): Thread-safe request routing and response handling 44 | - **Individual Webhooks** (`pkg/webhooks/*/`): Modular webhook implementations 45 | - **Configuration** (`pkg/config/`): Namespace protection lists and build-time config 46 | 47 | ## Adding New Webhooks 48 | 49 | 1. Create webhook directory: `pkg/webhooks/mywebhook/` 50 | 2. Implement the `Webhook` interface with required methods: 51 | - `Validate()`: Core webhook logic 52 | - `Authorized()`: Authorization checks 53 | - `GetURI()`: Unique webhook path 54 | - `Rules()`: AdmissionRules for K8s 55 | 3. Create registration file: `pkg/webhooks/add_mywebhook.go` 56 | 4. Add to Makefile webhook lists if needed 57 | 5. Update documentation with `make docs` 58 | 59 | ## Testing Guidelines 60 | 61 | - **Unit Tests**: Use `pkg/testutils` for HTTP integration testing 62 | - **Authorization Testing**: Test all user categories (cluster-admin, SRE, regular users) 63 | - **Local Testing**: Follow README.md guide for testing on live clusters 64 | - **Webhook Validation**: `make test` validates URI uniqueness and basic functionality 65 | 66 | ## Security Architecture 67 | 68 | ### Authorization Layers 69 | 1. **Cluster Admins**: `kube:admin`, `system:admin`, `backplane-cluster-admin` 70 | 2. **SRE Groups**: `system:serviceaccounts:openshift-backplane-srep` 71 | 3. **Privileged ServiceAccounts**: System service accounts with regex matching 72 | 4. **Layered Product Admins**: Special access for `redhat-.*` namespaces 73 | 5. **Regular Users**: Most restrictive validation 74 | 75 | ### Namespace Protection 76 | - **120+ Protected Namespaces**: Auto-generated from OpenShift ConfigMaps 77 | - **Pattern Matching**: `^redhat-.*`, `^openshift-.*`, `^kube-.*` patterns 78 | - **Label Protection**: Categories of immutable/removable protected labels 79 | 80 | ## Resource Generation 81 | 82 | Build system generates Kubernetes resources dynamically: 83 | - **SelectorSyncSet**: For Classic OSD/ROSA deployments 84 | - **Package Operator**: For HyperShift deployments 85 | - **Webhook Inclusion**: Control via `REGISTRY_*` Makefile variables 86 | - **Namespace Lists**: Auto-generated from cluster ConfigMaps 87 | 88 | ## CI/CD Integration 89 | 90 | - **Tekton Pipelines**: 4 configurations for PR/push scenarios in `tekton/` 91 | - **App-Interface**: Automated deployment via GitLab app-interface 92 | - **Multi-arch Builds**: Linux/AMD64 with UBI9 base images 93 | - **Registry**: Quay.io with git hash tagging 94 | 95 | ## Important Files 96 | 97 | - `pkg/webhooks/register.go`: Core webhook interface and registration 98 | - `pkg/config/namespaces.go`: Protected namespace definitions 99 | - `build/resources.go`: Dynamic resource generation logic 100 | - `cmd/main.go`: Main application entry point 101 | - `pkg/dispatcher/`: Request routing and response handling 102 | - `Makefile`: All build, test, and generation commands -------------------------------------------------------------------------------- /OWNERS: -------------------------------------------------------------------------------- 1 | reviewers: 2 | - joshbranham 3 | - srep-functional-team-rocket 4 | - srep-functional-team-thor 5 | 6 | approvers: 7 | - joshbranham 8 | - tnierman 9 | - srep-functional-leads 10 | - srep-team-leads 11 | - srep-functional-team-thor 12 | -------------------------------------------------------------------------------- /OWNERS_ALIASES: -------------------------------------------------------------------------------- 1 | # ================================ NOTICE ==================================== 2 | # This file is sourced from https://github.com/openshift/boilerplate 3 | # However, this repository is not currently subscribed to boilerplate, so manual updates are required 4 | # See the OWNERS_ALIASES docs: https://git.k8s.io/community/contributors/guide/owners.md#OWNERS_ALIASES 5 | # ============================================================================= 6 | aliases: 7 | srep-functional-team-aurora: 8 | - abyrne55 9 | - dakotalongRH 10 | - joshbranham 11 | - luis-falcon 12 | - reedcort 13 | srep-functional-team-fedramp: 14 | - tonytheleg 15 | - theautoroboto 16 | - rhdedgar 17 | - katherinelc321 18 | - rojasreinold 19 | - fsferraz-rh 20 | srep-functional-team-hulk: 21 | - a7vicky 22 | - ravitri 23 | - shitaljante 24 | - devppratik 25 | - Tafhim 26 | - tkong-redhat 27 | - TheUndeadKing 28 | - vaidehi411 29 | - chamalabey 30 | srep-functional-team-orange: 31 | - bergmannf 32 | - Makdaam 33 | - Nikokolas3270 34 | - RaphaelBut 35 | - MateSaary 36 | - rolandmkunkel 37 | - petrkotas 38 | - zmird-r 39 | - evlin-rh 40 | - hectorakemp 41 | srep-functional-team-rocket: 42 | - aliceh 43 | - anispate 44 | - clcollins 45 | - Mhodesty 46 | - nephomaniac 47 | - tnierman 48 | srep-functional-team-security: 49 | - jaybeeunix 50 | - sam-nguyen7 51 | - wshearn 52 | - dem4gus 53 | - npecka 54 | - pshickeydev 55 | - casey-williams-rh 56 | - boranx 57 | srep-functional-team-thor: 58 | - bmeng 59 | - diakovnec 60 | - MitaliBhalla 61 | - feichashao 62 | - samanthajayasinghe 63 | - xiaoyu74 64 | - Dee-6777 65 | - Tessg22 66 | - smarthall 67 | srep-infra-cicd: 68 | - mmazur 69 | - mrsantamaria 70 | - ritmun 71 | - jbpratt 72 | - yiqinzhang 73 | srep-functional-leads: 74 | - abyrne55 75 | - clcollins 76 | - Nikokolas3270 77 | - theautoroboto 78 | - smarthall 79 | - sam-nguyen7 80 | - ravitri 81 | srep-team-leads: 82 | - rafael-azevedo 83 | - iamkirkbater 84 | - rogbas 85 | - dustman9000 86 | - wanghaoran1988 87 | - bng0y 88 | - bmeng 89 | - typeid 90 | sre-group-leads: 91 | - apahim 92 | - maorfr 93 | - rogbas 94 | srep-architects: 95 | - jharrington22 96 | - cblecker 97 | -------------------------------------------------------------------------------- /boilerplate/_data/backing-image-tag: -------------------------------------------------------------------------------- 1 | image-v8.2.0 2 | -------------------------------------------------------------------------------- /boilerplate/_data/last-boilerplate-commit: -------------------------------------------------------------------------------- 1 | 27b681eaa783ae7183c53546a7194162a3041fa0 2 | -------------------------------------------------------------------------------- /boilerplate/_lib/boilerplate.mk: -------------------------------------------------------------------------------- 1 | .PHONY: boilerplate-commit 2 | boilerplate-commit: 3 | @boilerplate/_lib/boilerplate-commit 4 | 5 | .PHONY: boilerplate-freeze-check 6 | boilerplate-freeze-check: 7 | @boilerplate/_lib/freeze-check 8 | -------------------------------------------------------------------------------- /boilerplate/_lib/container-make: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | if [[ "$1" == "-h"* ]] || [[ "$1" == "--h"* ]]; then 4 | echo "Usage: $0 {arguments to the real 'make'}" 5 | echo "Runs 'make' in the boilerplate backing container." 6 | echo "If the command fails, starts a shell in the container so you can debug." 7 | exit -1 8 | fi 9 | 10 | source ${0%/*}/common.sh 11 | 12 | CONTAINER_ENGINE="${CONTAINER_ENGINE:-$(command -v podman || command -v docker)}" 13 | [[ -n "$CONTAINER_ENGINE" ]] || err "Couldn't find a container engine. Are you already in a container?" 14 | 15 | # Make sure the mount inside the container is named in such a way that 16 | # - openapi-gen (which relies on GOPATH) produces absolute paths; and 17 | # - other go-ish paths are writeable, e.g. for `go mod download`. 18 | CONTAINER_MOUNT=/go/src/$(repo_import $REPO_ROOT) 19 | 20 | # First set up a detached container with the repo mounted. 21 | banner "Starting the container" 22 | CE_OPTS="--platform=linux/amd64" 23 | if [[ "${CONTAINER_ENGINE##*/}" == "podman" ]]; then 24 | CE_OPTS="${CE_OPTS} --userns keep-id" 25 | fi 26 | if [[ "${CONTAINER_ENGINE##*/}" == "podman" ]] && [[ $OSTYPE == *"linux"* ]]; then 27 | CE_OPTS="${CE_OPTS} -v $REPO_ROOT:$CONTAINER_MOUNT:Z" 28 | else 29 | CE_OPTS="${CE_OPTS} -v $REPO_ROOT:$CONTAINER_MOUNT" 30 | fi 31 | container_id=$($CONTAINER_ENGINE run -d ${CE_OPTS} $IMAGE_PULL_PATH sleep infinity) 32 | 33 | if [[ $? -ne 0 ]] || [[ -z "$container_id" ]]; then 34 | err "Couldn't start detached container" 35 | fi 36 | 37 | # Now run our `make` command in it with the right UID and working directory 38 | args="exec -it -u $(id -u):0 -w $CONTAINER_MOUNT $container_id" 39 | banner "Running: make $@" 40 | $CONTAINER_ENGINE $args make "$@" 41 | rc=$? 42 | 43 | # If it failed, drop into the container in a shell 44 | if [[ $rc -ne 0 ]]; then 45 | banner "The 'make' command failed! Starting a shell in the container for debugging. Just 'exit' when done." 46 | $CONTAINER_ENGINE $args /bin/bash 47 | fi 48 | 49 | # Finally, remove the container 50 | banner "Cleaning up the container" 51 | $CONTAINER_ENGINE rm -f $container_id >/dev/null 52 | -------------------------------------------------------------------------------- /boilerplate/_lib/freeze-check: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # NOTE: For security reasons, everything imported or invoked (even 4 | # indirectly) by this script should be audited for vulnerabilities and 5 | # explicitly excluded from `linguist-generated` in the consuming 6 | # repository's .gitattributes. In other words, we want PRs to show 7 | # deltas to this script and all its dependencies by default so that 8 | # attempts to inject or circumvent code are visible. 9 | 10 | set -e 11 | 12 | REPO_ROOT=$(git rev-parse --show-toplevel) 13 | # Hardcoded rather than sourced to reduce attack surface. 14 | BOILERPLATE_GIT_REPO=https://github.com/openshift/boilerplate.git 15 | 16 | # Validate that no subscribed boilerplate artifacts have been changed. 17 | # PR checks may wish to gate on this. 18 | 19 | # This works by grabbing the commit hash of the boilerplate repository 20 | # at which the last update was applied, running the main `update` driver 21 | # against that, and failing if there's a resulting diff. 22 | 23 | # If we can't tell what that commit was, we must assume this is the 24 | # first update, and we'll (noisily) "succeed". 25 | 26 | # Note that this ought to work when you've just committed an update, 27 | # even if you've changed your update.cfg beforehand. We're basically 28 | # making sure you didn't muck with anything after updating. 29 | 30 | # For this to work, you have to be starting from a clean repository 31 | # state (any changes committed). 32 | # TODO(efried): This is not ideal -- it would be nice if I could check 33 | # this before committing my changes -- but how would that work? Diff to 34 | # a file, create a temporary commit, run the rest, remove the commit, 35 | # and reapply the diff? Messy and error-prone -- and I would be 36 | # seriously ticked off if something went wrong and lost my in-flight 37 | # changes. 38 | if ! [ -z "$(git status --porcelain -- ':!build/Dockerfile*')" ]; then 39 | echo "Can't validate boilerplate in a dirty repository. Please commit your changes and try again." >&2 40 | exit 1 41 | fi 42 | 43 | # We glean the last boilerplate commit from the 44 | # last-boilerplate-commit file, which gets laid down by the main 45 | # `update` driver each time it runs. 46 | LBCF=${REPO_ROOT}/boilerplate/_data/last-boilerplate-commit 47 | if ! [[ -f "$LBCF" ]]; then 48 | echo "Couldn't discover last boilerplate commit! Assuming you're bootstrapping." 49 | exit 0 50 | fi 51 | LBC=$(cat $LBCF) 52 | 53 | # Download just that commit 54 | echo "Fetching $LBC from $BOILERPLATE_GIT_REPO" 55 | # boilerplate/update cleans up this temp dir 56 | TMPD=$(mktemp -d) 57 | cd $TMPD 58 | git init 59 | # TODO(efried): DRY this remote. Make it configurable? 60 | git remote add origin $BOILERPLATE_GIT_REPO 61 | git fetch origin $(cat $LBCF) --tags 62 | git reset --hard FETCH_HEAD 63 | 64 | # Now invoke the update script, overriding the source repository we've 65 | # just downloaded at the appropriate commit. 66 | # We invoke the script explicitly rather than via the make target to 67 | # close a security hole whereby the latter is overridden. 68 | echo "Running update" 69 | cd $REPO_ROOT 70 | BOILERPLATE_GIT_REPO="${TMPD}" boilerplate/update 71 | 72 | # Okay, if anything has changed, that's bad. 73 | if [[ $(git status --porcelain -- ':!build/Dockerfile*' | wc -l) -ne 0 ]]; then 74 | echo "Your boilerplate is dirty!" >&2 75 | git status --porcelain -- ':!build/Dockerfile*' 76 | exit 1 77 | fi 78 | 79 | echo "Your boilerplate is clean!" 80 | exit 0 81 | -------------------------------------------------------------------------------- /boilerplate/_lib/release.sh: -------------------------------------------------------------------------------- 1 | # Helpers and variables for dealing with openshift/release 2 | 3 | # NOTE: This library is sourced from user-run scripts. It should not be 4 | # sourced in CI, as it relies on git config that's not necessarily 5 | # present there. 6 | 7 | RELEASE_REPO=openshift/release 8 | 9 | ## Information about the boilerplate consumer 10 | # E.g. "openshift/my-wizbang-operator" 11 | CONSUMER=$(repo_name .) 12 | [[ -z "$CONSUMER" ]] && err " 13 | Failed to determine current repository name" 14 | # 15 | # E.g. "openshift" 16 | CONSUMER_ORG=${CONSUMER%/*} 17 | [[ -z "$CONSUMER_ORG" ]] && err " 18 | Failed to determine consumer org" 19 | # 20 | # E.g. "my-wizbang-operator" 21 | CONSUMER_NAME=${CONSUMER#*/} 22 | [[ -z "$CONSUMER_NAME" ]] && err " 23 | Failed to determine consumer name" 24 | # 25 | # E.g. "master" 26 | # This will produce something like refs/remotes/origin/master 27 | DEFAULT_BRANCH=$(git symbolic-ref refs/remotes/upstream/HEAD 2>/dev/null || git symbolic-ref refs/remotes/origin/HEAD 2>/dev/null || echo defaulting/to/master) 28 | # Strip off refs/remotes/{upstream|origin}/ 29 | DEFAULT_BRANCH=${DEFAULT_BRANCH##*/} 30 | [[ -z "$DEFAULT_BRANCH" ]] && err " 31 | Failed to determine default branch name" 32 | 33 | ## release_process_args "$@" 34 | # 35 | # This is for use by commands expecting one optional argument which is 36 | # the file system path to a clone of the $RELEASE_REPO. 37 | # 38 | # Will invoke `usage` -- which must be defined by the caller -- if 39 | # the wrong number of arguments are received, or if the single argument 40 | # is `help` or a flag. 41 | # 42 | # If exactly one argument is specified and it is valid, it is assigned 43 | # to the global RELEASE_CLONE variable. 44 | release_process_args() { 45 | if [[ $# -eq 1 ]]; then 46 | # Special cases for usage queries 47 | if [[ "$1" == '-'* ]] || [[ "$1" == help ]]; then 48 | usage 49 | fi 50 | 51 | [[ -d $1 ]] || err " 52 | $1: Not a directory." 53 | 54 | [[ $(repo_name $1) == "$RELEASE_REPO" ]] || err " 55 | $1 is not a clone of $RELEASE_REPO; or its 'origin' remote is not set properly." 56 | 57 | # Got a usable clone of openshift/release 58 | RELEASE_CLONE="$1" 59 | 60 | elif [[ $# -ne 0 ]]; then 61 | usage 62 | fi 63 | } 64 | 65 | ## release_validate_invocation 66 | # 67 | # Make sure we were called from a reasonable place, that being: 68 | # - A boilerplate consumer 69 | # - ...that's actually subscribed to a convention 70 | # - ...containing the script being invoked 71 | release_validate_invocation() { 72 | # Make sure we were invoked from a boilerplate consumer. 73 | [[ -z "$CONVENTION_NAME" ]] && err " 74 | $cmd must be invoked from a consumer of an appropriate convention. Where did you get this script from?" 75 | # Or at least not from boilerplate itself 76 | [[ "$CONSUMER" == "openshift/boilerplate" ]] && err " 77 | $cmd must be invoked from a boilerplate consumer, not from boilerplate itself." 78 | 79 | [[ -s $CONVENTION_ROOT/_data/last-boilerplate-commit ]] || err " 80 | $cmd must be invoked from a boilerplate consumer!" 81 | 82 | grep -E -q "^$CONVENTION_NAME(\s.*)?$" $CONVENTION_ROOT/update.cfg || err " 83 | $CONSUMER is not subscribed to $CONVENTION_NAME!" 84 | } 85 | 86 | ## release_prep_clone 87 | # 88 | # If $RELEASE_CLONE is already set: 89 | # - It should represent a directory containing a clean checkout of the 90 | # release repository; otherwise we error. 91 | # - We checkout and pull master. 92 | # Otherwise: 93 | # - We clone the release repo to a temporary directory. 94 | # - We set the $RELEASE_CLONE global variable to point to that 95 | # directory. 96 | release_prep_clone() { 97 | # If a release repo clone wasn't specified, create one 98 | if [[ -z "$RELEASE_CLONE" ]]; then 99 | RELEASE_CLONE=$(mktemp -dt openshift_release_XXXXXXX) 100 | git clone --depth=1 git@github.com:${RELEASE_REPO}.git $RELEASE_CLONE 101 | else 102 | [[ -z "$(git -C $RELEASE_CLONE status --porcelain)" ]] || err " 103 | Your release clone must start clean." 104 | # These will blow up if it's misconfigured 105 | git -C $RELEASE_CLONE checkout master 106 | git -C $RELEASE_CLONE pull 107 | fi 108 | } 109 | 110 | ## release_done_msg BRANCH 111 | # 112 | # Print exit instructions for submitting the release PR. 113 | # BRANCH is a suggested branch name. 114 | release_done_msg() { 115 | echo 116 | git status 117 | 118 | cat < $TMPD/$f 46 | echo $TMPD/$f 47 | return 48 | fi 49 | done 50 | } 51 | 52 | ## expected_prow_config ORG PROJ BRANCH 53 | # 54 | # Prints to stdout the expected prow configuration for the specified 55 | # ORG/PROJ. 56 | expected_prow_config() { 57 | local org=$1 58 | local consumer_name=$2 59 | local branch=$3 60 | # TODO: DRY this with what's in prow-config. 61 | # Do it by making it a template in the convention dir. 62 | cat <&2 146 | continue 147 | fi 148 | to_process[$a]=1 149 | done 150 | 151 | for subscriber in "${!to_process[@]}"; do 152 | [[ "${to_process[$subscriber]}" -eq 1 ]] || continue 153 | echo -n "${subscriber} " 154 | done 155 | } 156 | -------------------------------------------------------------------------------- /boilerplate/generated-includes.mk: -------------------------------------------------------------------------------- 1 | # THIS FILE IS GENERATED BY BOILERPLATE. DO NOT EDIT. 2 | # This file automatically includes any *.mk files in your subscribed 3 | # conventions. Please ensure your base Makefile includes only this file. 4 | include boilerplate/_lib/boilerplate.mk 5 | include boilerplate/openshift/golang-osd-e2e/project.mk 6 | include boilerplate/openshift/golang-osd-e2e/standard.mk 7 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-e2e/OWNERS: -------------------------------------------------------------------------------- 1 | reviewers: 2 | - srep-infra-cicd 3 | approvers: 4 | - srep-infra-cicd 5 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-e2e/README.md: -------------------------------------------------------------------------------- 1 | # Conventions for Ginkgo based e2e tests 2 | 3 | - [Conventions for Ginkgo based e2e tests](#conventions-for-ginkgo-based-e2e-tests) 4 | - [Consuming](#consuming) 5 | - [`make` targets and functions.](#make-targets-and-functions) 6 | - [E2E Test](#e2e-test) 7 | - [Local Testing](#e2e-local-testing) 8 | 9 | ## Consuming 10 | Currently, this convention is only intended for OSD operators. To adopt this convention, your `boilerplate/update.cfg` should include: 11 | 12 | ``` 13 | openshift/golang-osd-e2e 14 | ``` 15 | 16 | ## `make` targets and functions. 17 | 18 | **Note:** Your repository's main `Makefile` needs to be edited to include: 19 | 20 | ``` 21 | include boilerplate/generated-includes.mk 22 | ``` 23 | 24 | One of the primary purposes of these `make` targets is to allow you to 25 | standardize your prow and app-sre pipeline configurations using the 26 | following: 27 | 28 | ### E2e Test 29 | 30 | | `make` target | Purpose | 31 | |------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| 32 | | `e2e-binary-build` | Compiles ginkgo tests under test/e2e and creates the ginkgo binary. | 33 | | `e2e-image-build-push` | Builds e2e image and pushes to operator's quay repo. Image name is defaulted to -test-harness. Quay repository must be created beforehand. | 34 | 35 | #### E2E Local Testing 36 | 37 | Please follow [this README](https://github.com/openshift/ops-sop/blob/master/v4/howto/osde2e/operator-test-harnesses.md#using-ginkgo) to run your e2e tests locally 38 | 39 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-e2e/e2e-template.yml: -------------------------------------------------------------------------------- 1 | # THIS FILE IS GENERATED BY BOILERPLATE. DO NOT EDIT. 2 | apiVersion: template.openshift.io/v1 3 | kind: Template 4 | metadata: 5 | name: osde2e-focused-tests 6 | parameters: 7 | - name: OSDE2E_CONFIGS 8 | required: true 9 | - name: TEST_IMAGE 10 | required: true 11 | - name: OCM_CLIENT_ID 12 | required: false 13 | - name: OCM_CLIENT_SECRET 14 | required: false 15 | - name: OCM_CCS 16 | required: false 17 | - name: AWS_ACCESS_KEY_ID 18 | required: false 19 | - name: AWS_SECRET_ACCESS_KEY 20 | required: false 21 | - name: CLOUD_PROVIDER_REGION 22 | required: false 23 | - name: GCP_CREDS_JSON 24 | required: false 25 | - name: JOBID 26 | generate: expression 27 | from: "[0-9a-z]{7}" 28 | - name: IMAGE_TAG 29 | value: '' 30 | required: true 31 | - name: LOG_BUCKET 32 | value: 'osde2e-logs' 33 | - name: USE_EXISTING_CLUSTER 34 | value: 'TRUE' 35 | - name: CAD_PAGERDUTY_ROUTING_KEY 36 | required: false 37 | objects: 38 | - apiVersion: batch/v1 39 | kind: Job 40 | metadata: 41 | name: osde2e-${OPERATOR_NAME}-${IMAGE_TAG}-${JOBID} 42 | spec: 43 | backoffLimit: 0 44 | template: 45 | spec: 46 | restartPolicy: Never 47 | containers: 48 | - name: osde2e 49 | image: quay.io/redhat-services-prod/osde2e-cicada-tenant/osde2e:latest 50 | command: 51 | - /osde2e 52 | args: 53 | - test 54 | - --only-health-check-nodes 55 | - --skip-destroy-cluster 56 | - --skip-must-gather 57 | - --configs 58 | - ${OSDE2E_CONFIGS} 59 | securityContext: 60 | runAsNonRoot: true 61 | allowPrivilegeEscalation: false 62 | capabilities: 63 | drop: ["ALL"] 64 | seccompProfile: 65 | type: RuntimeDefault 66 | env: 67 | - name: AD_HOC_TEST_IMAGES 68 | value: ${TEST_IMAGE}:${IMAGE_TAG} 69 | - name: OCM_CLIENT_ID 70 | value: ${OCM_CLIENT_ID} 71 | - name: OCM_CLIENT_SECRET 72 | value: ${OCM_CLIENT_SECRET} 73 | - name: OCM_CCS 74 | value: ${OCM_CCS} 75 | - name: AWS_ACCESS_KEY_ID 76 | value: ${AWS_ACCESS_KEY_ID} 77 | - name: AWS_SECRET_ACCESS_KEY 78 | value: ${AWS_SECRET_ACCESS_KEY} 79 | - name: CLOUD_PROVIDER_REGION 80 | value: ${CLOUD_PROVIDER_REGION} 81 | - name: GCP_CREDS_JSON 82 | value: ${GCP_CREDS_JSON} 83 | - name: LOG_BUCKET 84 | value: ${LOG_BUCKET} 85 | - name: USE_EXISTING_CLUSTER 86 | value: ${USE_EXISTING_CLUSTER} 87 | - name: CAD_PAGERDUTY_ROUTING_KEY 88 | value: ${CAD_PAGERDUTY_ROUTING_KEY} 89 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-e2e/project.mk: -------------------------------------------------------------------------------- 1 | # Project specific values 2 | OPERATOR_NAME?=$(shell sed -n 's/.*OperatorName .*"\([^"]*\)".*/\1/p' config/config.go) 3 | 4 | E2E_IMAGE_REGISTRY?=quay.io 5 | E2E_IMAGE_REPOSITORY?=app-sre 6 | E2E_IMAGE_NAME?=$(OPERATOR_NAME)-e2e 7 | 8 | 9 | REGISTRY_USER?=$(QUAY_USER) 10 | REGISTRY_TOKEN?=$(QUAY_TOKEN) 11 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-e2e/standard.mk: -------------------------------------------------------------------------------- 1 | # Validate variables in project.mk exist 2 | ifndef OPERATOR_NAME 3 | $(error OPERATOR_NAME is not set; only operators should consume this convention; check project.mk file) 4 | endif 5 | ifndef E2E_IMAGE_REGISTRY 6 | $(error E2E_IMAGE_REGISTRY is not set; check project.mk file) 7 | endif 8 | ifndef E2E_IMAGE_REPOSITORY 9 | $(error E2E_IMAGE_REPOSITORY is not set; check project.mk file) 10 | endif 11 | 12 | # Use current commit as e2e image tag 13 | CURRENT_COMMIT=$(shell git rev-parse --short=7 HEAD) 14 | E2E_IMAGE_TAG=$(CURRENT_COMMIT) 15 | 16 | ### Accommodate docker or podman 17 | # 18 | # The docker/podman creds cache needs to be in a location unique to this 19 | # invocation; otherwise it could collide across jenkins jobs. We'll use 20 | # a .docker folder relative to pwd (the repo root). 21 | CONTAINER_ENGINE_CONFIG_DIR = .docker 22 | JENKINS_DOCKER_CONFIG_FILE = /var/lib/jenkins/.docker/config.json 23 | export REGISTRY_AUTH_FILE = ${CONTAINER_ENGINE_CONFIG_DIR}/config.json 24 | 25 | # If this configuration file doesn't exist, podman will error out. So 26 | # we'll create it if it doesn't exist. 27 | ifeq (,$(wildcard $(REGISTRY_AUTH_FILE))) 28 | $(shell mkdir -p $(CONTAINER_ENGINE_CONFIG_DIR)) 29 | # Copy the node container auth file so that we get access to the registries the 30 | # parent node has access to 31 | $(shell if test -f $(JENKINS_DOCKER_CONFIG_FILE); then cp $(JENKINS_DOCKER_CONFIG_FILE) $(REGISTRY_AUTH_FILE); fi) 32 | endif 33 | 34 | # ==> Docker uses --config=PATH *before* (any) subcommand; so we'll glue 35 | # that to the CONTAINER_ENGINE variable itself. (NOTE: I tried half a 36 | # dozen other ways to do this. This was the least ugly one that actually 37 | # works.) 38 | ifndef CONTAINER_ENGINE 39 | CONTAINER_ENGINE=$(shell command -v podman 2>/dev/null || echo docker --config=$(CONTAINER_ENGINE_CONFIG_DIR)) 40 | endif 41 | 42 | REGISTRY_USER ?= 43 | REGISTRY_TOKEN ?= 44 | 45 | # TODO: Figure out how to discover this dynamically 46 | OSDE2E_CONVENTION_DIR := boilerplate/openshift/golang-osd-operator-osde2e 47 | 48 | # log into quay.io 49 | .PHONY: container-engine-login 50 | container-engine-login: 51 | @test "${REGISTRY_USER}" != "" && test "${REGISTRY_TOKEN}" != "" || (echo "REGISTRY_USER and REGISTRY_TOKEN must be defined" && exit 1) 52 | mkdir -p ${CONTAINER_ENGINE_CONFIG_DIR} 53 | @${CONTAINER_ENGINE} login -u="${REGISTRY_USER}" -p="${REGISTRY_TOKEN}" quay.io 54 | 55 | ###################### 56 | # Targets used by e2e test suite 57 | ###################### 58 | 59 | # create binary 60 | .PHONY: e2e-binary-build 61 | e2e-binary-build: GOFLAGS_MOD=-mod=mod 62 | e2e-binary-build: GOENV=GOOS=${GOOS} GOARCH=${GOARCH} CGO_ENABLED=0 GOFLAGS="${GOFLAGS_MOD}" 63 | e2e-binary-build: 64 | go mod tidy 65 | go test ./test/e2e -v -c --tags=osde2e -o e2e.test 66 | 67 | # push e2e image tagged as latest and as repo commit hash 68 | .PHONY: e2e-image-build-push 69 | e2e-image-build-push: container-engine-login 70 | ${CONTAINER_ENGINE} build --pull -f test/e2e/Dockerfile -t $(E2E_IMAGE_REGISTRY)/$(E2E_IMAGE_REPOSITORY)/$(E2E_IMAGE_NAME):$(E2E_IMAGE_TAG) . 71 | ${CONTAINER_ENGINE} tag $(E2E_IMAGE_REGISTRY)/$(E2E_IMAGE_REPOSITORY)/$(E2E_IMAGE_NAME):$(E2E_IMAGE_TAG) $(E2E_IMAGE_REGISTRY)/$(E2E_IMAGE_REPOSITORY)/$(E2E_IMAGE_NAME):latest 72 | ${CONTAINER_ENGINE} push $(E2E_IMAGE_REGISTRY)/$(E2E_IMAGE_REPOSITORY)/$(E2E_IMAGE_NAME):$(E2E_IMAGE_TAG) 73 | ${CONTAINER_ENGINE} push $(E2E_IMAGE_REGISTRY)/$(E2E_IMAGE_REPOSITORY)/$(E2E_IMAGE_NAME):latest 74 | -------------------------------------------------------------------------------- /boilerplate/openshift/golang-osd-e2e/update: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | source $CONVENTION_ROOT/_lib/common.sh 6 | 7 | # No PRE 8 | [[ "$1" == "PRE" ]] && exit 0 9 | 10 | # Expect POST 11 | [[ "$1" == "POST" ]] || err "Got a parameter I don't understand: '$1'. Did the infrastructure change?" 12 | 13 | REPO_ROOT=$(git rev-parse --show-toplevel) 14 | OPERATOR_NAME=$(sed -n 's/.*OperatorName .*=.*"\([^"]*\)".*/\1/p' "${REPO_ROOT}/config/config.go") 15 | E2E_SUITE_DIRECTORY=$REPO_ROOT/test/e2e 16 | 17 | # Update operator name in templates 18 | OPERATOR_UNDERSCORE_NAME=${OPERATOR_NAME//-/_} 19 | OPERATOR_PROPER_NAME=$(echo "$OPERATOR_NAME" | sed 's/-/ /g' | awk '{for(i=1;i<=NF;i++){ $i=toupper(substr($i,1,1)) substr($i,2) }}1') 20 | OPERATOR_NAME_CAMEL_CASE=${OPERATOR_PROPER_NAME// /} 21 | 22 | mkdir -p "${E2E_SUITE_DIRECTORY}" 23 | 24 | E2E_SUITE_BUILDER_IMAGE=registry.access.redhat.com/ubi9/go-toolset:1.24 25 | if [[ -n ${KONFLUX_BUILDS} ]]; then 26 | E2E_SUITE_BUILDER_IMAGE="brew.registry.redhat.io/rh-osbs/openshift-golang-builder:rhel_9_1.24" 27 | fi 28 | 29 | echo "syncing ${E2E_SUITE_DIRECTORY}/Dockerfile" 30 | tee "${E2E_SUITE_DIRECTORY}/Dockerfile" < /(path-to)/kubeconfig 101 | 102 | 5. Run test suite using 103 | 104 | DISABLE_JUNIT_REPORT=true KUBECONFIG=/(path-to)/kubeconfig ./(path-to)/bin/ginkgo --tags=osde2e -v test/e2e 105 | EOF 106 | 107 | sed -e "s/\${OPERATOR_NAME}/${OPERATOR_NAME}/" $(dirname $0)/e2e-template.yml >"${E2E_SUITE_DIRECTORY}/e2e-template.yml" 108 | 109 | # todo: remove after file is renamed in ALL consumer repos 110 | if [ -f "${E2E_SUITE_DIRECTORY}/test-harness-template.yml" ]; then 111 | rm -f "${E2E_SUITE_DIRECTORY}/test-harness-template.yml" 112 | fi 113 | -------------------------------------------------------------------------------- /boilerplate/update.cfg: -------------------------------------------------------------------------------- 1 | openshift/golang-osd-e2e 2 | -------------------------------------------------------------------------------- /build/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG BASE_IMAGE=registry.access.redhat.com/ubi9/go-toolset:1.24 2 | FROM ${BASE_IMAGE} AS builder 3 | 4 | WORKDIR /opt/app-root/src 5 | COPY go.mod go.sum ./ 6 | RUN go mod download 7 | COPY . . 8 | RUN GOOS=linux GOARCH=amd64 CGO_ENABLED=1 GOEXPERIMENT=boringcrypto GOFLAGS=-mod=mod \ 9 | go build -buildvcs=false -gcflags="all=-trimpath=/opt/app-root/src" -asmflags="all=-trimpath=/opt/app-root/src" -tags="fips_enabled" \ 10 | -o webhooks ./cmd 11 | 12 | #### 13 | FROM registry.access.redhat.com/ubi9/ubi-minimal:latest 14 | 15 | ENV USER_UID=1001 \ 16 | USER_NAME=webhooks 17 | 18 | COPY --from=builder /opt/app-root/src/webhooks /usr/local/bin/ 19 | 20 | COPY build/bin /usr/local/bin 21 | RUN /usr/local/bin/user_setup 22 | 23 | ENTRYPOINT ["/usr/local/bin/entrypoint"] 24 | 25 | USER ${USER_UID} 26 | 27 | LABEL io.openshift.managed.name="managed-cluster-validating-webhooks" \ 28 | io.openshift.managed.description="Validating Webhooks for Openshift Dedicated" \ 29 | com.redhat.component="managed-cluster-validating-webhooks-container" \ 30 | name="managed-cluster-validating-webhooks" \ 31 | version="1.0" \ 32 | release="1" \ 33 | summary="Validating Webhooks for OpenShift Dedicated" \ 34 | description="A framework supporting validating admission webhooks for OpenShift" \ 35 | io.k8s.description="A framework supporting validating admission webhooks for OpenShift" \ 36 | io.k8s.display-name="Managed Cluster Validating Webhooks" \ 37 | io.openshift.tags="openshift,webhooks,validation" 38 | -------------------------------------------------------------------------------- /build/bin/entrypoint: -------------------------------------------------------------------------------- 1 | #!/bin/sh -e 2 | 3 | # This is documented here: 4 | # https://docs.openshift.com/container-platform/3.11/creating_images/guidelines.html#openshift-specific-guidelines 5 | 6 | if ! whoami &>/dev/null; then 7 | if [ -w /etc/passwd ]; then 8 | echo "${USER_NAME:-webhooks}:x:$(id -u):$(id -g):${USER_NAME:-webhooks} user:${HOME}:/sbin/nologin" >> /etc/passwd 9 | fi 10 | fi 11 | app="${1}" 12 | if [[ -z $app ]]; then 13 | echo "First parameter to entrypoint should be webhooks or injector" 14 | exit 1 15 | fi 16 | 17 | shift 18 | exec /usr/local/bin/$app $@ 19 | -------------------------------------------------------------------------------- /build/bin/user_setup: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -x 3 | 4 | # ensure $HOME exists and is accessible by group 0 (we don't know what the runtime UID will be) 5 | mkdir -p ${HOME} 6 | chown ${USER_UID}:0 ${HOME} 7 | chmod ug+rwx ${HOME} 8 | 9 | # runtime user will need to be able to self-insert in /etc/passwd 10 | chmod g+rw /etc/passwd 11 | 12 | # no need for this script to remain in the image after running 13 | rm $0 -------------------------------------------------------------------------------- /build/build_deploy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # AppSRE team CD 4 | 5 | set -exv 6 | 7 | # TODO: Invoke this make target directly from appsre ci-int and scrap this file 8 | make -C $(dirname $0)/../ build-push 9 | make -C $(dirname $0)/../ build-push-package 10 | -------------------------------------------------------------------------------- /build/build_push.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | usage() { 4 | echo "Usage: $0 IMAGE_URI" >&2 5 | exit 1 6 | } 7 | 8 | ## image_exists_in_repo IMAGE_URI 9 | # 10 | # Checks whether IMAGE_URI -- e.g. quay.io/app-sre/osd-metrics-exporter:abcd123 11 | # -- exists in the remote repository. 12 | # If so, returns success. 13 | # If the image does not exist, but the query was otherwise successful, returns 14 | # failure. 15 | # If the query fails for any reason, prints an error and *exits* nonzero. 16 | # 17 | # This function cribbed from: 18 | # https://github.com/openshift/boilerplate/blob/0ba6566d544d0df9993a92b2286c131eb61f3e88/boilerplate/_lib/common.sh#L77-L135 19 | image_exists_in_repo() { 20 | local image_uri=$1 21 | local output 22 | local rc 23 | 24 | local skopeo_stderr=$(mktemp) 25 | 26 | output=$(skopeo inspect docker://${image_uri} 2>$skopeo_stderr) 27 | rc=$? 28 | # So we can delete the temp file right away... 29 | stderr=$(cat $skopeo_stderr) 30 | rm -f $skopeo_stderr 31 | if [[ $rc -eq 0 ]]; then 32 | # The image exists. Sanity check the output. 33 | local digest=$(echo $output | jq -r .Digest) 34 | if [[ -z "$digest" ]]; then 35 | echo "Unexpected error: skopeo inspect succeeded, but output contained no .Digest" 36 | echo "Here's the output:" 37 | echo "$output" 38 | echo "...and stderr:" 39 | echo "$stderr" 40 | exit 1 41 | fi 42 | echo "Image ${image_uri} exists with digest $digest." 43 | return 0 44 | elif [[ "$stderr" == *"manifest unknown"* ]]; then 45 | # We were able to talk to the repository, but the tag doesn't exist. 46 | # This is the normal "green field" case. 47 | echo "Image ${image_uri} does not exist in the repository." 48 | return 1 49 | elif [[ "$stderr" == *"was deleted or has expired"* ]]; then 50 | # This should be rare, but accounts for cases where we had to 51 | # manually delete an image. 52 | echo "Image ${image_uri} was deleted from the repository." 53 | echo "Proceeding as if it never existed." 54 | return 1 55 | else 56 | # Any other error. For example: 57 | # - "unauthorized: access to the requested resource is not 58 | # authorized". This happens not just on auth errors, but if we 59 | # reference a repository that doesn't exist. 60 | # - "no such host". 61 | # - Network or other infrastructure failures. 62 | # In all these cases, we want to bail, because we don't know whether 63 | # the image exists (and we'd likely fail to push it anyway). 64 | echo "Error querying the repository for ${image_uri}:" 65 | echo "stdout: $output" 66 | echo "stderr: $stderr" 67 | exit 1 68 | fi 69 | } 70 | 71 | set -exv 72 | 73 | IMAGE_URI=$1 74 | [[ -z "$IMAGE_URI" ]] && usage 75 | 76 | # NOTE(efried): Since we reference images by digest, rebuilding an image 77 | # with the same tag can be Bad. This is because the digest calculation 78 | # includes metadata such as date stamp, meaning that even though the 79 | # contents may be identical, the digest may change. In this situation, 80 | # the original digest URI no longer has any tags referring to it, so the 81 | # repository deletes it. This can break existing deployments referring 82 | # to the old digest. We could have solved this issue by generating a 83 | # permanent tag tied to each digest. We decided to do it this way 84 | # instead. 85 | # For testing purposes, if you need to force the build/push to rerun, 86 | # delete the image at $IMAGE_URI. 87 | if image_exists_in_repo "$IMAGE_URI"; then 88 | echo "Image ${IMAGE_URI} already exists. Nothing to do!" 89 | exit 0 90 | fi 91 | 92 | # build the image, the selectorsyncset, and push the image 93 | make -C $(dirname $0)/../ syncset build-base container-image-push 94 | -------------------------------------------------------------------------------- /build/build_push_package.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | usage() { 4 | echo "Usage: $0 IMAGE_URI" >&2 5 | exit 1 6 | } 7 | 8 | ## image_exists_in_repo IMAGE_URI 9 | # 10 | # Checks whether IMAGE_URI -- e.g. quay.io/app-sre/osd-metrics-exporter:abcd123 11 | # -- exists in the remote repository. 12 | # If so, returns success. 13 | # If the image does not exist, but the query was otherwise successful, returns 14 | # failure. 15 | # If the query fails for any reason, prints an error and *exits* nonzero. 16 | # 17 | # This function cribbed from: 18 | # https://github.com/openshift/boilerplate/blob/0ba6566d544d0df9993a92b2286c131eb61f3e88/boilerplate/_lib/common.sh#L77-L135 19 | image_exists_in_repo() { 20 | local image_uri=$1 21 | local output 22 | local rc 23 | 24 | local skopeo_stderr=$(mktemp) 25 | 26 | output=$(skopeo inspect docker://${image_uri} 2>$skopeo_stderr) 27 | rc=$? 28 | # So we can delete the temp file right away... 29 | stderr=$(cat $skopeo_stderr) 30 | rm -f $skopeo_stderr 31 | if [[ $rc -eq 0 ]]; then 32 | # The image exists. Sanity check the output. 33 | local digest=$(echo $output | jq -r .Digest) 34 | if [[ -z "$digest" ]]; then 35 | echo "Unexpected error: skopeo inspect succeeded, but output contained no .Digest" 36 | echo "Here's the output:" 37 | echo "$output" 38 | echo "...and stderr:" 39 | echo "$stderr" 40 | exit 1 41 | fi 42 | echo "Image ${image_uri} exists with digest $digest." 43 | return 0 44 | elif [[ "$stderr" == *"manifest unknown"* ]]; then 45 | # We were able to talk to the repository, but the tag doesn't exist. 46 | # This is the normal "green field" case. 47 | echo "Image ${image_uri} does not exist in the repository." 48 | return 1 49 | elif [[ "$stderr" == *"was deleted or has expired"* ]]; then 50 | # This should be rare, but accounts for cases where we had to 51 | # manually delete an image. 52 | echo "Image ${image_uri} was deleted from the repository." 53 | echo "Proceeding as if it never existed." 54 | return 1 55 | else 56 | # Any other error. For example: 57 | # - "unauthorized: access to the requested resource is not 58 | # authorized". This happens not just on auth errors, but if we 59 | # reference a repository that doesn't exist. 60 | # - "no such host". 61 | # - Network or other infrastructure failures. 62 | # In all these cases, we want to bail, because we don't know whether 63 | # the image exists (and we'd likely fail to push it anyway). 64 | echo "Error querying the repository for ${image_uri}:" 65 | echo "stdout: $output" 66 | echo "stderr: $stderr" 67 | exit 1 68 | fi 69 | } 70 | 71 | set -exv 72 | 73 | IMAGE_URI=$1 74 | [[ -z "$IMAGE_URI" ]] && usage 75 | 76 | # NOTE(efried): Since we reference images by digest, rebuilding an image 77 | # with the same tag can be Bad. This is because the digest calculation 78 | # includes metadata such as date stamp, meaning that even though the 79 | # contents may be identical, the digest may change. In this situation, 80 | # the original digest URI no longer has any tags referring to it, so the 81 | # repository deletes it. This can break existing deployments referring 82 | # to the old digest. We could have solved this issue by generating a 83 | # permanent tag tied to each digest. We decided to do it this way 84 | # instead. 85 | # For testing purposes, if you need to force the build/push to rerun, 86 | # delete the image at $IMAGE_URI. 87 | if image_exists_in_repo "$IMAGE_URI"; then 88 | echo "Image ${IMAGE_URI} already exists. Nothing to do!" 89 | exit 0 90 | fi 91 | 92 | # build the image, the selectorsyncset, and push the image 93 | make -C $(dirname $0)/../ package build-base package-push 94 | -------------------------------------------------------------------------------- /build/pr_check.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | echo "Using git version $(git version)" 6 | echo "Using go version $(go version)" 7 | 8 | CURRENT_DIR=$(dirname "$0") 9 | 10 | #BUILD_CMD="build-base" make lint test build-sss build-base 11 | make -C $(dirname $0)/../ container-test syncset package build-base 12 | 13 | # make sure nothing changed (i.e. SSS templates being invalid) 14 | git diff --exit-code 15 | MAKE_RC=$? 16 | 17 | if [ "$MAKE_RC" != "0" ]; 18 | then 19 | echo "FAILURE: unexpected changes after building." 20 | exit $MAKE_RC 21 | fi 22 | -------------------------------------------------------------------------------- /cmd/fips.go: -------------------------------------------------------------------------------- 1 | //go:build fips_enabled 2 | // +build fips_enabled 3 | 4 | // BOILERPLATE GENERATED -- DO NOT EDIT 5 | // Run 'make ensure-fips' to regenerate 6 | 7 | package main 8 | 9 | import ( 10 | _ "crypto/tls/fipsonly" 11 | "fmt" 12 | ) 13 | 14 | func init() { 15 | fmt.Println("***** Starting with FIPS crypto enabled *****") 16 | } 17 | -------------------------------------------------------------------------------- /cmd/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "crypto/tls" 6 | "crypto/x509" 7 | "errors" 8 | "flag" 9 | "fmt" 10 | "net" 11 | "net/http" 12 | "os" 13 | 14 | "github.com/openshift/operator-custom-metrics/pkg/metrics" 15 | klog "k8s.io/klog/v2" 16 | "k8s.io/klog/v2/klogr" 17 | logf "sigs.k8s.io/controller-runtime/pkg/log" 18 | 19 | "github.com/openshift/managed-cluster-validating-webhooks/config" 20 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/dispatcher" 21 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/k8sutil" 22 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/localmetrics" 23 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks" 24 | ) 25 | 26 | var log = logf.Log.WithName("handler") 27 | 28 | var ( 29 | listenAddress = flag.String("listen", "0.0.0.0", "listen address") 30 | listenPort = flag.String("port", "5000", "port to listen on") 31 | testHooks = flag.Bool("testhooks", false, "Test webhook URI uniqueness and quit?") 32 | 33 | useTLS = flag.Bool("tls", false, "Use TLS? Must specify -tlskey, -tlscert, -cacert") 34 | tlsKey = flag.String("tlskey", "", "TLS Key for TLS") 35 | tlsCert = flag.String("tlscert", "", "TLS Certificate") 36 | caCert = flag.String("cacert", "", "CA Cert file") 37 | 38 | metricsPath = "/metrics" 39 | metricsPort = "8080" 40 | ) 41 | 42 | func main() { 43 | var metricsAddr string 44 | flag.StringVar(&metricsAddr, "metrics-bind-address", ":"+metricsPort, "The address the metric endpoint binds to.") 45 | flag.Parse() 46 | klog.SetOutput(os.Stdout) 47 | 48 | logf.SetLogger(klogr.New()) 49 | 50 | if !*testHooks { 51 | log.Info("HTTP server running at", "listen", net.JoinHostPort(*listenAddress, *listenPort)) 52 | } 53 | dispatcher := dispatcher.NewDispatcher(webhooks.Webhooks) 54 | seen := make(map[string]bool) 55 | for name, hook := range webhooks.Webhooks { 56 | realHook := hook() 57 | if seen[realHook.GetURI()] { 58 | panic(fmt.Errorf("Duplicate webhook trying to listen on %s", realHook.GetURI())) 59 | } 60 | seen[name] = true 61 | if !*testHooks { 62 | log.Info("Listening", "webhookName", name, "URI", realHook.GetURI()) 63 | } 64 | http.HandleFunc(realHook.GetURI(), dispatcher.HandleRequest) 65 | } 66 | if *testHooks { 67 | os.Exit(0) 68 | } 69 | 70 | // start metrics server 71 | metricsServer := metrics.NewBuilder(config.OperatorNamespace, fmt.Sprintf("%s-metrics", config.OperatorName)). 72 | WithPort(metricsPort). 73 | WithPath(metricsPath). 74 | WithServiceLabel(map[string]string{"app": "validation-webhook"}). 75 | WithCollectors(localmetrics.MetricsList). 76 | GetConfig() 77 | 78 | // get the namespace we're running in to confirm if running in a cluster 79 | if _, err := k8sutil.GetOperatorNamespace(); err != nil { 80 | if errors.Is(err, k8sutil.ErrRunLocal) { 81 | log.Info("Skipping metrics server creation; not running in a cluster.") 82 | } else { 83 | log.Error(err, "Failed to get operator namespace") 84 | } 85 | } else { 86 | if err := metrics.ConfigureMetrics(context.TODO(), *metricsServer); err != nil { 87 | log.Error(err, "Failed to configure metrics") 88 | } else { 89 | log.Info("Successfully configured metrics") 90 | } 91 | } 92 | 93 | server := &http.Server{ 94 | Addr: net.JoinHostPort(*listenAddress, *listenPort), 95 | } 96 | if *useTLS { 97 | cafile, err := os.ReadFile(*caCert) 98 | if err != nil { 99 | log.Error(err, "Couldn't read CA cert file") 100 | os.Exit(1) 101 | } 102 | certpool := x509.NewCertPool() 103 | certpool.AppendCertsFromPEM(cafile) 104 | 105 | server.TLSConfig = &tls.Config{ 106 | RootCAs: certpool, 107 | } 108 | log.Error(server.ListenAndServeTLS(*tlsCert, *tlsKey), "Error serving TLS") 109 | } else { 110 | log.Error(server.ListenAndServe(), "Error serving non-TLS connection") 111 | } 112 | } 113 | -------------------------------------------------------------------------------- /config/config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | const ( 4 | // I know this isn't the operator's name but so much stuff has been coded to use this... 5 | OperatorName = "validation-webhook" 6 | OperatorNamespace = "openshift-validation-webhook" 7 | ) 8 | -------------------------------------------------------------------------------- /config/package/managed-cluster-validating-webhooks-package.Containerfile: -------------------------------------------------------------------------------- 1 | FROM scratch 2 | 3 | ADD config/package/*.yaml* /package/ 4 | -------------------------------------------------------------------------------- /config/package/manifest.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: manifests.package-operator.run/v1alpha1 2 | kind: PackageManifest 3 | metadata: 4 | name: validation-webhook 5 | spec: 6 | scopes: 7 | - Namespaced 8 | phases: 9 | - name: config 10 | - name: rbac 11 | - name: deploy 12 | - name: webhooks 13 | class: hosted-cluster 14 | config: 15 | openAPIV3Schema: 16 | properties: 17 | serviceca: 18 | description: Service Certificate Authority used for webhook client authentication 19 | type: string 20 | required: 21 | - serviceca 22 | type: object 23 | availabilityProbes: 24 | - probes: 25 | - condition: 26 | type: Available 27 | status: "True" 28 | - fieldsEqual: 29 | fieldA: .status.updatedReplicas 30 | fieldB: .status.replicas 31 | selector: 32 | kind: 33 | group: apps 34 | kind: Deployment 35 | -------------------------------------------------------------------------------- /docs/hypershift.md: -------------------------------------------------------------------------------- 1 | # managed-cluster-validating-webhooks on Hypershift 2 | 3 | ## How it works 4 | 5 | Managed Cluster Validating Webhooks (MCVW) is deployed into Hypershift environments via several different components. 6 | 7 | - The webhook admission service is deployed into each hosted control plane (HCP) namespace on Hypershift management clusters, via [package-operator](https://package-operator.run/) 8 | - The `ValidatingWebhookConfiguration` resources are deployed directly onto Hypershift hosted clusters. 9 | 10 | The above components are both installed via a [package operator](https://package-operator.run/) (PKO) package. The package is distributed to Hypershift Management Clusters via an Advanced Cluster Management policy. These resources will be discussed in the section below. 11 | 12 | ## Package Operator package 13 | 14 | The PKO package consists of: 15 | - [a manifest](../config/package/manifest.yaml) which lists the phases involved in the package installation, any availability and promotion tests. 16 | - [a resource bundle](../config/package/resources.yaml.gotmpl) which contains all the resources needed for MCVW to run in the HCP namespace, as well as the ValidatingWebhookConfigurations installed on the hosted cluster. This bundle is dynamically generated by [resources.go](../build/resources.go). Each resource is annotated with a phase so that PKO knows during which phase the resource should be installed. 17 | - [a Containerfile](../config/package/managed-cluster-validating-webhooks-package.Containerfile) which builds the PKO package image. 18 | 19 | ### Building a package 20 | 21 | You can manually rebuild or generate the resource bundle by running: 22 | 23 | ```bash 24 | make package 25 | ``` 26 | 27 | You can manually build the PKO package image by running: 28 | ```bash 29 | make IMG_ORG= build-package-image 30 | ``` 31 | 32 | Note that the resulting package image will follow the naming convention `quay.io/$USER/managed-cluster-validating-webhooks-hs-package` 33 | and can be pushed to Quay for testing if needed. 34 | 35 | ### Testing a package 36 | 37 | Once a package has been built (and pushed to a public image repository) it can be manually installed on a PKO-running cluster by creating a simple `Package` spec: 38 | 39 | ```yaml 40 | apiVersion: package-operator.run/v1alpha1 41 | kind: Package 42 | metadata: 43 | name: validation-webhook 44 | namespace: validation-webhook 45 | spec: 46 | image: quay.io/$USER/managed-cluster-validating-webhooks-hs-package:$TAG 47 | ``` 48 | 49 | ## ACM Policy for Package distribution 50 | 51 | On Hypershift, the `Package` resource is distributed to all HCP Namespaces via a [SelectorSyncSet](../hack/templates/00-managed-cluster-validating-webhooks-hs.SelectorSyncSet.yaml.tmpl) containing ACM Policy. 52 | 53 | The application of the SelectorSyncSet to Hive clusters (in turn distributing it to the Hypershift service clusters) is performed by [app-interface](https://gitlab.cee.redhat.com/service/app-interface/-/blob/master/data/services/osd-operators/cicd/saas/saas-managed-cluster-validating-webhooks.yaml). 54 | 55 | ## How the CI/CD process works 56 | 57 | This section describes the main steps that enable a CI/CD flow for `managed-cluster-validating-webhooks`: 58 | 59 | - A new commit is merged to the MCVW repository. 60 | - This [triggers app-interface](https://gitlab.cee.redhat.com/service/app-interface/-/blob/master/data/services/osd-operators/cicd/ci-int/jobs-managed-cluster-validating-webhooks.yaml) to call the MCVW [build_deploy.sh](https://github.com/openshift/managed-cluster-validating-webhooks/blob/master/build/build_deploy.sh) script. 61 | - The `build_deploy.sh` script builds a new MCVW image and a new PKO package. Each are tagged with the same git short hash representing the commit that was just merged. 62 | - The `managed-cluster-validating-webhooks-hypershift` SaaS [resource template in app-interface](https://gitlab.cee.redhat.com/service/app-interface/-/blob/master/data/services/osd-operators/cicd/saas/saas-managed-cluster-validating-webhooks.yaml) will roll out the latest templated [SelectorSyncSet](https://github.com/openshift/managed-cluster-validating-webhooks/blob/master/hack/templates/00-managed-cluster-validating-webhooks-hs.SelectorSyncSet.yaml.tmpl) to staging/integration Hive shards. The `IMAGE_DIGEST` value will be replaced by the git short hash of the latest commit; therefore, the PKO image referenced will be the one built by the earlier step. 63 | - Because the ACM Policy has changed, the Policy will be updated on all Hypershift Management Clusters. This will result in the `Package` resource updating in every HCP Namespace to reference the new PKO image. 64 | - PKO will download that PKO image and install or update the resources contained within. -------------------------------------------------------------------------------- /docs/webhooks-short.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "webhookName": "clusterlogging-validation", 4 | "documentString": "Managed OpenShift Customers may set log retention outside the allowed range of 0-7 days" 5 | }, 6 | { 7 | "webhookName": "clusterrolebindings-validation", 8 | "documentString": "Managed OpenShift Customers may not delete the cluster role bindings under the managed namespaces: (^openshift-.*|kube-system)" 9 | }, 10 | { 11 | "webhookName": "clusterroles-validation", 12 | "documentString": "Managed OpenShift Customers may not delete protected ClusterRoles including cluster-admin, view, edit, admin, specific system roles (system:admin, system:node, system:node-proxier, system:kube-scheduler, system:kube-controller-manager), and backplane-* roles" 13 | }, 14 | { 15 | "webhookName": "customresourcedefinitions-validation", 16 | "documentString": "Managed OpenShift Customers may not change CustomResourceDefinitions managed by Red Hat." 17 | }, 18 | { 19 | "webhookName": "hiveownership-validation", 20 | "documentString": "Managed OpenShift customers may not edit certain managed resources. A managed resource has a \"hive.openshift.io/managed\": \"true\" label." 21 | }, 22 | { 23 | "webhookName": "imagecontentpolicies-validation", 24 | "documentString": "Managed OpenShift customers may not create ImageContentSourcePolicy, ImageDigestMirrorSet, or ImageTagMirrorSet resources that configure mirrors that would conflict with system registries (e.g. quay.io, registry.redhat.io, registry.access.redhat.com, etc). For more details, see https://docs.openshift.com/" 25 | }, 26 | { 27 | "webhookName": "ingress-config-validation", 28 | "documentString": "Managed OpenShift customers may not modify ingress config resources because it can can degrade cluster operators and can interfere with OpenShift SRE monitoring." 29 | }, 30 | { 31 | "webhookName": "ingresscontroller-validation", 32 | "documentString": "Managed OpenShift Customer may create IngressControllers without necessary taints. This can cause those workloads to be provisioned on master nodes." 33 | }, 34 | { 35 | "webhookName": "namespace-validation", 36 | "documentString": "Managed OpenShift Customers may not modify namespaces specified in the [openshift-monitoring/managed-namespaces openshift-monitoring/ocp-namespaces] ConfigMaps because customer workloads should be placed in customer-created namespaces. Customers may not create namespaces identified by this regular expression (^com$|^io$|^in$) because it could interfere with critical DNS resolution. Additionally, customers may not set or change the values of these Namespace labels [managed.openshift.io/storage-pv-quota-exempt managed.openshift.io/service-lb-quota-exempt]." 37 | }, 38 | { 39 | "webhookName": "networkpolicies-validation", 40 | "documentString": "Managed OpenShift Customers may not create NetworkPolicies in namespaces managed by Red Hat." 41 | }, 42 | { 43 | "webhookName": "node-validation-osd", 44 | "documentString": "Managed OpenShift customers may not alter Node objects." 45 | }, 46 | { 47 | "webhookName": "pod-validation", 48 | "documentString": "Managed OpenShift Customers may use tolerations on Pods that could cause those Pods to be scheduled on infra or master nodes." 49 | }, 50 | { 51 | "webhookName": "podimagespec-mutation", 52 | "documentString": "OpenShift debugging tools on Managed OpenShift clusters must be available even if internal image registry is removed." 53 | }, 54 | { 55 | "webhookName": "prometheusrule-validation", 56 | "documentString": "Managed OpenShift Customers may not create PrometheusRule in namespaces managed by Red Hat." 57 | }, 58 | { 59 | "webhookName": "regular-user-validation", 60 | "documentString": "Managed OpenShift customers may not manage any objects in the following APIGroups [upgrade.managed.openshift.io config.openshift.io operator.openshift.io network.openshift.io admissionregistration.k8s.io addons.managed.openshift.io cloudingress.managed.openshift.io managed.openshift.io splunkforwarder.managed.openshift.io autoscaling.openshift.io machineconfiguration.openshift.io cloudcredential.openshift.io machine.openshift.io ocmagent.managed.openshift.io], nor may Managed OpenShift customers alter the APIServer, KubeAPIServer, OpenShiftAPIServer, ClusterVersion, Proxy or SubjectPermission objects." 61 | }, 62 | { 63 | "webhookName": "scc-validation", 64 | "documentString": "Managed OpenShift Customers may not modify the following default SCCs: [anyuid hostaccess hostmount-anyuid hostnetwork hostnetwork-v2 node-exporter nonroot nonroot-v2 privileged restricted restricted-v2]" 65 | }, 66 | { 67 | "webhookName": "sdn-migration-validation", 68 | "documentString": "Managed OpenShift customers may not modify the network config type because it can can degrade cluster operators and can interfere with OpenShift SRE monitoring." 69 | }, 70 | { 71 | "webhookName": "service-mutation", 72 | "documentString": "LoadBalancer-type services on Managed OpenShift clusters must contain an additional annotation for managed policy compliance." 73 | }, 74 | { 75 | "webhookName": "serviceaccount-validation", 76 | "documentString": "Managed OpenShift Customers may not delete the service accounts under the managed namespaces。" 77 | }, 78 | { 79 | "webhookName": "techpreviewnoupgrade-validation", 80 | "documentString": "Managed OpenShift Customers may not use TechPreviewNoUpgrade FeatureGate that could prevent any future ability to do a y-stream upgrade to their clusters." 81 | } 82 | ] 83 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/openshift/managed-cluster-validating-webhooks 2 | 3 | go 1.24.6 4 | 5 | require ( 6 | github.com/evanphx/json-patch v4.12.0+incompatible 7 | github.com/ghodss/yaml v1.0.1-0.20220118164431-d8423dcdf344 8 | github.com/go-logr/logr v1.4.2 9 | github.com/onsi/ginkgo/v2 v2.23.4 10 | github.com/onsi/gomega v1.37.0 11 | github.com/openshift/api v0.0.0-20250916150132-83b017b06367 12 | github.com/openshift/cluster-logging-operator v0.0.0-20230328172346-05f4f8be54d5 13 | github.com/openshift/hive/apis v0.0.0-20230327212335-7fd70848a6d5 14 | github.com/openshift/operator-custom-metrics v0.5.1 15 | github.com/openshift/osde2e-common v0.0.0-20231010150014-8a4449a371e6 16 | github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.55.1 17 | github.com/prometheus/client_golang v1.22.0 18 | gomodules.xyz/jsonpatch/v2 v2.4.0 19 | k8s.io/api v0.34.0 20 | k8s.io/apiextensions-apiserver v0.34.0 21 | k8s.io/apimachinery v0.34.0 22 | k8s.io/client-go v0.34.0 23 | k8s.io/klog/v2 v2.130.1 24 | k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 25 | sigs.k8s.io/controller-runtime v0.22.0 26 | sigs.k8s.io/e2e-framework v0.3.0 27 | ) 28 | 29 | require ( 30 | github.com/beorn7/perks v1.0.1 // indirect 31 | github.com/cespare/xxhash/v2 v2.3.0 // indirect 32 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect 33 | github.com/emicklei/go-restful/v3 v3.12.2 // indirect 34 | github.com/evanphx/json-patch/v5 v5.9.11 // indirect 35 | github.com/fxamacker/cbor/v2 v2.9.0 // indirect 36 | github.com/go-openapi/jsonpointer v0.21.0 // indirect 37 | github.com/go-openapi/jsonreference v0.21.0 // indirect 38 | github.com/go-openapi/swag v0.23.0 // indirect 39 | github.com/go-task/slim-sprig/v3 v3.0.0 // indirect 40 | github.com/gogo/protobuf v1.3.2 // indirect 41 | github.com/google/gnostic-models v0.7.0 // indirect 42 | github.com/google/go-cmp v0.7.0 // indirect 43 | github.com/google/pprof v0.0.0-20250630185457-6e76a2b096b5 // indirect 44 | github.com/google/uuid v1.6.0 // indirect 45 | github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect 46 | github.com/josharian/intern v1.0.0 // indirect 47 | github.com/json-iterator/go v1.1.12 // indirect 48 | github.com/mailru/easyjson v0.7.7 // indirect 49 | github.com/moby/spdystream v0.5.0 // indirect 50 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 51 | github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect 52 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 53 | github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect 54 | github.com/openshift/custom-resource-status v1.1.3-0.20220503160415-f2fdb4999d87 // indirect 55 | github.com/openshift/elasticsearch-operator v0.0.0-20241202183904-81cd6e70c15e // indirect 56 | github.com/pkg/errors v0.9.1 // indirect 57 | github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect 58 | github.com/prometheus/client_model v0.6.1 // indirect 59 | github.com/prometheus/common v0.62.0 // indirect 60 | github.com/prometheus/procfs v0.15.1 // indirect 61 | github.com/spf13/pflag v1.0.6 // indirect 62 | github.com/x448/float16 v0.8.4 // indirect 63 | go.uber.org/automaxprocs v1.6.0 // indirect 64 | go.yaml.in/yaml/v2 v2.4.2 // indirect 65 | go.yaml.in/yaml/v3 v3.0.4 // indirect 66 | golang.org/x/net v0.38.0 // indirect 67 | golang.org/x/oauth2 v0.27.0 // indirect 68 | golang.org/x/sys v0.32.0 // indirect 69 | golang.org/x/term v0.30.0 // indirect 70 | golang.org/x/text v0.23.0 // indirect 71 | golang.org/x/time v0.11.0 // indirect 72 | golang.org/x/tools v0.31.0 // indirect 73 | google.golang.org/protobuf v1.36.5 // indirect 74 | gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect 75 | gopkg.in/inf.v0 v0.9.1 // indirect 76 | gopkg.in/yaml.v2 v2.4.0 // indirect 77 | gopkg.in/yaml.v3 v3.0.1 // indirect 78 | k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect 79 | sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect 80 | sigs.k8s.io/randfill v1.0.0 // indirect 81 | sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect 82 | sigs.k8s.io/yaml v1.6.0 // indirect 83 | ) 84 | -------------------------------------------------------------------------------- /hack/documentation/document.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | // Offer a way to auto-generate documentation 4 | 5 | import ( 6 | "encoding/json" 7 | "flag" 8 | "fmt" 9 | "os" 10 | "sort" 11 | 12 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks" 13 | admissionregv1 "k8s.io/api/admissionregistration/v1" 14 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 15 | ) 16 | 17 | var ( 18 | hideRules = flag.Bool("hideRules", false, "Hide the Admission Rules?") 19 | ) 20 | 21 | type docuhook struct { 22 | Name string `json:"webhookName"` 23 | Rules []admissionregv1.RuleWithOperations `json:"rules,omitempty"` 24 | ObjectSelector *metav1.LabelSelector `json:"webhookObjectSelector,omitempty"` 25 | DocumentationString string `json:"documentString"` 26 | } 27 | 28 | // WriteDocs will write out all the docs. 29 | func WriteDocs() { 30 | hookNames := make([]string, 0) 31 | for name := range webhooks.Webhooks { 32 | hookNames = append(hookNames, name) 33 | } 34 | sort.Strings(hookNames) 35 | dochooks := make([]docuhook, len(hookNames)) 36 | 37 | for i, hookName := range hookNames { 38 | hook := webhooks.Webhooks[hookName] 39 | realHook := hook() 40 | dochooks[i].Name = realHook.Name() 41 | dochooks[i].DocumentationString = realHook.Doc() 42 | if !*hideRules { 43 | dochooks[i].Rules = realHook.Rules() 44 | dochooks[i].ObjectSelector = realHook.ObjectSelector() 45 | } 46 | } 47 | 48 | b, err := json.MarshalIndent(&dochooks, "", " ") 49 | if err != nil { 50 | fmt.Printf("Error encoding: %s\n", err.Error()) 51 | os.Exit(1) 52 | } 53 | _, err = os.Stdout.Write(b) 54 | if err != nil { 55 | fmt.Printf("Error Writing: %s\n", err.Error()) 56 | os.Exit(1) 57 | } 58 | 59 | fmt.Println() 60 | 61 | } 62 | 63 | func main() { 64 | flag.Parse() 65 | WriteDocs() 66 | } 67 | -------------------------------------------------------------------------------- /hack/templates/00-managed-cluster-validating-webhooks-hs.SelectorSyncSet.yaml.tmpl: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Template 3 | metadata: 4 | name: hs-managed-cluster-validating-webhooks-template 5 | parameters: 6 | - name: REGISTRY_IMG 7 | required: true 8 | - name: IMAGE_DIGEST 9 | required: true 10 | objects: 11 | - apiVersion: hive.openshift.io/v1 12 | kind: SelectorSyncSet 13 | metadata: 14 | name: managed-cluster-validating-webhooks-hs-policy 15 | spec: 16 | clusterDeploymentSelector: 17 | matchLabels: 18 | ext-hypershift.openshift.io/cluster-type: service-cluster 19 | resourceApplyMode: Sync 20 | resources: 21 | - apiVersion: apps.open-cluster-management.io/v1 22 | kind: PlacementRule 23 | metadata: 24 | name: managed-cluster-validating-webhooks 25 | namespace: openshift-acm-policies 26 | spec: 27 | clusterSelector: 28 | matchExpressions: 29 | - key: hypershift.open-cluster-management.io/management-cluster 30 | operator: In 31 | values: 32 | - "true" 33 | - apiVersion: policy.open-cluster-management.io/v1 34 | kind: PlacementBinding 35 | metadata: 36 | name: managed-cluster-validating-webhooks 37 | namespace: openshift-acm-policies 38 | placementRef: 39 | name: managed-cluster-validating-webhooks 40 | kind: PlacementRule 41 | apiGroup: apps.open-cluster-management.io 42 | subjects: 43 | - name: managed-cluster-validating-webhooks 44 | kind: Policy 45 | apiGroup: policy.open-cluster-management.io 46 | - apiVersion: policy.open-cluster-management.io/v1 47 | kind: Policy 48 | metadata: 49 | name: managed-cluster-validating-webhooks 50 | namespace: openshift-acm-policies 51 | spec: 52 | remediationAction: enforce 53 | disabled: false 54 | policy-templates: 55 | - objectDefinition: 56 | apiVersion: policy.open-cluster-management.io/v1 57 | kind: ConfigurationPolicy 58 | metadata: 59 | name: managed-cluster-validating-webhooks 60 | annotations: 61 | policy.open-cluster-management.io/disable-templates: "true" 62 | spec: 63 | namespaceSelector: 64 | matchLabels: 65 | hypershift.openshift.io/hosted-control-plane: "true" 66 | pruneObjectBehavior: DeleteIfCreated 67 | object-templates: 68 | - complianceType: MustHave 69 | objectDefinition: 70 | apiVersion: package-operator.run/v1alpha1 71 | kind: ObjectTemplate 72 | metadata: 73 | name: validation-webhooks 74 | spec: 75 | template: | 76 | apiVersion: package-operator.run/v1alpha1 77 | kind: Package 78 | metadata: 79 | name: validation-webhooks 80 | spec: 81 | image: ${REGISTRY_IMG}@${IMAGE_DIGEST} 82 | config: {{toJson .config}} 83 | sources: 84 | - apiVersion: v1 85 | kind: ConfigMap 86 | name: openshift-service-ca.crt 87 | items: 88 | - key: .data['service-ca\.crt'] 89 | destination: .serviceca 90 | -------------------------------------------------------------------------------- /hack/test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | cd $(dirname $0)/../ 6 | 7 | echo "" > coverage.txt 8 | 9 | for d in $(go list ./... | grep -v vendor); do 10 | go test -race -coverprofile=profile.out -covermode=atomic $d 11 | if [ -f profile.out ]; then 12 | cat profile.out >> coverage.txt 13 | rm profile.out 14 | fi 15 | done -------------------------------------------------------------------------------- /pkg/config/config.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | //go:generate go run ./generate/namespaces.go 4 | import ( 5 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/utils" 6 | ) 7 | 8 | func IsPrivilegedNamespace(ns string) bool { 9 | return utils.RegexSliceContains(ns, PrivilegedNamespaces) 10 | } 11 | -------------------------------------------------------------------------------- /pkg/config/generate/namespaces.go: -------------------------------------------------------------------------------- 1 | //go:build ignore 2 | // +build ignore 3 | 4 | package main 5 | 6 | import ( 7 | "bufio" 8 | "fmt" 9 | "log" 10 | "net/http" 11 | "os" 12 | "text/template" 13 | "time" 14 | 15 | "github.com/ghodss/yaml" 16 | corev1 "k8s.io/api/core/v1" 17 | ) 18 | 19 | var namespaceFiles = []string{ 20 | "managed-namespaces.ConfigMap.yaml", 21 | "ocp-namespaces.ConfigMap.yaml", 22 | } 23 | 24 | var ( 25 | // Base lists - default values which will always be enforced regardless of managed-cluster-config 26 | namespaces = []string{"^default$", "^openshift$", "^kube-.*", "^redhat-.*"} 27 | configmaps = []string{} 28 | ) 29 | 30 | const ( 31 | // generatedFileName defines the path to the generated file relative to the invoking go:generate command 32 | generatedFileName = "./namespaces.go" 33 | 34 | mccBaseUrl = "https://raw.githubusercontent.com/openshift/managed-cluster-config/master/deploy/osd-managed-resources" 35 | serviceAccountHeader = `^system:serviceaccounts:` 36 | namespacesKey = "managed_namespaces.yaml" 37 | ) 38 | 39 | const templateText = `// Code generated by pkg/config/generate/namespaces.go; DO NOT EDIT. 40 | // Generated at {{ .Timestamp }} 41 | package config 42 | 43 | var ConfigMapSources = []string{ 44 | {{- range .ConfigMaps }} 45 | "{{ printf "%s" . }}", 46 | {{- end }} 47 | } 48 | 49 | var PrivilegedNamespaces = []string{ 50 | {{- range .Namespaces }} 51 | "{{ printf "%s" . }}", 52 | {{- end }} 53 | } 54 | ` 55 | 56 | type templateArgs struct { 57 | Timestamp time.Time 58 | ConfigMaps []string 59 | Namespaces []string 60 | ServiceAccounts []string 61 | } 62 | 63 | // ManagedNamespacesConfig defines the structure of the managed_namespaces.yaml file from the managed-namespaces ConfigMap 64 | type NamespacesConfig struct { 65 | Resources NamespaceList `yaml:"Resources,omitempty" json:"Resources,omitempty"` 66 | } 67 | 68 | type NamespaceList struct { 69 | Namespace []Namespace `yaml:"Namespace,omitempty" json:"Namespace,omitempty"` 70 | } 71 | 72 | type Namespace struct { 73 | Name string `yaml:"name,omitempty" json:"name,omitempty"` 74 | } 75 | 76 | func main() { 77 | // Retrieve current configuration from managed-cluster-config 78 | for _, fileName := range namespaceFiles { 79 | // GET files and read contents 80 | fileUrl := fmt.Sprintf("%s/%s", mccBaseUrl, fileName) 81 | response, err := http.Get(fileUrl) 82 | if err != nil { 83 | log.Fatalf("Error retrieving file from managed-cluster-config: %v", err) 84 | return 85 | } 86 | defer func() { 87 | err := response.Body.Close() 88 | if err != nil { 89 | log.Fatalf("Error closing response body: %v", err) 90 | } 91 | }() 92 | 93 | scanner := bufio.NewScanner(response.Body) 94 | rawFile := []byte{} 95 | for scanner.Scan() { 96 | rawFile = append(rawFile, scanner.Bytes()...) 97 | // Newlines must be manually appended, Scan() only reads in the line contents 98 | rawFile = append(rawFile, []byte("\n")...) 99 | } 100 | if scanner.Err() != nil { 101 | log.Fatalf("Error reading response body: %v", scanner.Err()) 102 | } 103 | 104 | // Convert file contents to ConfigMap; convert ConfigMap data to NamespaceConfig format 105 | nsConfigMap := corev1.ConfigMap{} 106 | err = yaml.Unmarshal(rawFile, &nsConfigMap) 107 | if err != nil { 108 | log.Fatalf("Error decoding response: %v", err) 109 | } 110 | 111 | rawConfig := []byte(nsConfigMap.Data[namespacesKey]) 112 | nsConfig := NamespacesConfig{} 113 | err = yaml.Unmarshal(rawConfig, &nsConfig) 114 | if err != nil { 115 | log.Fatalf("Error decoding configMap: %v", err) 116 | } else if len(nsConfig.Resources.Namespace) == 0 { 117 | log.Fatalf("No namespaces retrieved from %s", fileName) 118 | } 119 | 120 | // Save retrieved namespaces, serviceaccounts, and configmap info 121 | configmaps = append(configmaps, fmt.Sprintf("%s/%s", nsConfigMap.Namespace, nsConfigMap.Name)) 122 | for _, ns := range nsConfig.Resources.Namespace { 123 | namespaces = append(namespaces, "^"+ns.Name+"$") 124 | } 125 | } 126 | 127 | // Write data to file 128 | genFile, err := os.Create(generatedFileName) 129 | if err != nil { 130 | log.Fatalf("Error creating file %s: %v", generatedFileName, err) 131 | } 132 | defer func() { 133 | err = genFile.Close() 134 | if err != nil { 135 | log.Fatalf("Error closing file %s: %v", genFile.Name(), err) 136 | } 137 | }() 138 | 139 | namespaceTemplateArgs := templateArgs{ 140 | Timestamp: time.Now().UTC(), 141 | ConfigMaps: configmaps, 142 | Namespaces: namespaces, 143 | } 144 | namespaceTemplate, err := template.New(generatedFileName).Parse(templateText) 145 | if err != nil { 146 | log.Fatalf("Error initializing template: %v", err) 147 | } 148 | 149 | err = namespaceTemplate.Execute(genFile, namespaceTemplateArgs) 150 | if err != nil { 151 | log.Fatalf("Error generating file from template: %v", err) 152 | } 153 | } 154 | -------------------------------------------------------------------------------- /pkg/config/namespaces.go: -------------------------------------------------------------------------------- 1 | // Code generated by pkg/config/generate/namespaces.go; DO NOT EDIT. 2 | // Generated at 2025-09-01 07:11:23.45239 +0000 UTC 3 | package config 4 | 5 | var ConfigMapSources = []string{ 6 | "openshift-monitoring/managed-namespaces", 7 | "openshift-monitoring/ocp-namespaces", 8 | } 9 | 10 | var PrivilegedNamespaces = []string{ 11 | "^default$", 12 | "^openshift$", 13 | "^kube-.*", 14 | "^redhat-.*", 15 | "^dedicated-admin$", 16 | "^openshift-addon-operator$", 17 | "^openshift-aqua$", 18 | "^openshift-aws-vpce-operator$", 19 | "^openshift-backplane$", 20 | "^openshift-backplane-cee$", 21 | "^openshift-backplane-csa$", 22 | "^openshift-backplane-cse$", 23 | "^openshift-backplane-csm$", 24 | "^openshift-backplane-managed-scripts$", 25 | "^openshift-backplane-mobb$", 26 | "^openshift-backplane-srep$", 27 | "^openshift-backplane-tam$", 28 | "^openshift-cloud-ingress-operator$", 29 | "^openshift-codeready-workspaces$", 30 | "^openshift-compliance$", 31 | "^openshift-compliance-monkey$", 32 | "^openshift-container-security$", 33 | "^openshift-custom-domains-operator$", 34 | "^openshift-customer-monitoring$", 35 | "^openshift-deployment-validation-operator$", 36 | "^openshift-managed-node-metadata-operator$", 37 | "^openshift-file-integrity$", 38 | "^openshift-logging$", 39 | "^openshift-managed-upgrade-operator$", 40 | "^openshift-must-gather-operator$", 41 | "^openshift-observability-operator$", 42 | "^openshift-ocm-agent-operator$", 43 | "^openshift-operators-redhat$", 44 | "^openshift-osd-metrics$", 45 | "^openshift-rbac-permissions$", 46 | "^openshift-route-monitor-operator$", 47 | "^openshift-scanning$", 48 | "^openshift-security$", 49 | "^openshift-splunk-forwarder-operator$", 50 | "^openshift-sre-pruning$", 51 | "^openshift-suricata$", 52 | "^openshift-validation-webhook$", 53 | "^openshift-velero$", 54 | "^openshift-monitoring$", 55 | "^openshift$", 56 | "^openshift-cluster-version$", 57 | "^goalert$", 58 | "^keycloak$", 59 | "^configure-goalert-operator$", 60 | "^kube-system$", 61 | "^openshift-apiserver$", 62 | "^openshift-apiserver-operator$", 63 | "^openshift-authentication$", 64 | "^openshift-authentication-operator$", 65 | "^openshift-cloud-controller-manager$", 66 | "^openshift-cloud-controller-manager-operator$", 67 | "^openshift-cloud-credential-operator$", 68 | "^openshift-cloud-network-config-controller$", 69 | "^openshift-cluster-api$", 70 | "^openshift-cluster-csi-drivers$", 71 | "^openshift-cluster-machine-approver$", 72 | "^openshift-cluster-node-tuning-operator$", 73 | "^openshift-cluster-samples-operator$", 74 | "^openshift-cluster-storage-operator$", 75 | "^openshift-config$", 76 | "^openshift-config-managed$", 77 | "^openshift-config-operator$", 78 | "^openshift-console$", 79 | "^openshift-console-operator$", 80 | "^openshift-console-user-settings$", 81 | "^openshift-controller-manager$", 82 | "^openshift-controller-manager-operator$", 83 | "^openshift-dns$", 84 | "^openshift-dns-operator$", 85 | "^openshift-etcd$", 86 | "^openshift-etcd-operator$", 87 | "^openshift-host-network$", 88 | "^openshift-image-registry$", 89 | "^openshift-ingress$", 90 | "^openshift-ingress-canary$", 91 | "^openshift-ingress-operator$", 92 | "^openshift-insights$", 93 | "^openshift-kni-infra$", 94 | "^openshift-kube-apiserver$", 95 | "^openshift-kube-apiserver-operator$", 96 | "^openshift-kube-controller-manager$", 97 | "^openshift-kube-controller-manager-operator$", 98 | "^openshift-kube-scheduler$", 99 | "^openshift-kube-scheduler-operator$", 100 | "^openshift-kube-storage-version-migrator$", 101 | "^openshift-kube-storage-version-migrator-operator$", 102 | "^openshift-machine-api$", 103 | "^openshift-machine-config-operator$", 104 | "^openshift-marketplace$", 105 | "^openshift-monitoring$", 106 | "^openshift-multus$", 107 | "^openshift-network-diagnostics$", 108 | "^openshift-network-operator$", 109 | "^openshift-nutanix-infra$", 110 | "^openshift-oauth-apiserver$", 111 | "^openshift-openstack-infra$", 112 | "^openshift-operator-lifecycle-manager$", 113 | "^openshift-operators$", 114 | "^openshift-ovirt-infra$", 115 | "^openshift-sdn$", 116 | "^openshift-ovn-kubernetes$", 117 | "^openshift-platform-operators$", 118 | "^openshift-route-controller-manager$", 119 | "^openshift-service-ca$", 120 | "^openshift-service-ca-operator$", 121 | "^openshift-user-workload-monitoring$", 122 | "^openshift-vsphere-infra$", 123 | } 124 | -------------------------------------------------------------------------------- /pkg/dispatcher/dispatcher.go: -------------------------------------------------------------------------------- 1 | package dispatcher 2 | 3 | import ( 4 | "fmt" 5 | "net/http" 6 | "net/url" 7 | "sync" 8 | 9 | logf "sigs.k8s.io/controller-runtime/pkg/log" 10 | admissionctl "sigs.k8s.io/controller-runtime/pkg/webhook/admission" 11 | 12 | responsehelper "github.com/openshift/managed-cluster-validating-webhooks/pkg/helpers" 13 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks" 14 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/utils" 15 | ) 16 | 17 | var log = logf.Log.WithName("dispatcher") 18 | 19 | // Dispatcher struct 20 | type Dispatcher struct { 21 | hooks *map[string]webhooks.WebhookFactory // uri -> hookfactory 22 | mu sync.Mutex 23 | } 24 | 25 | // NewDispatcher new dispatcher 26 | func NewDispatcher(hooks webhooks.RegisteredWebhooks) *Dispatcher { 27 | hookMap := make(map[string]webhooks.WebhookFactory) 28 | for _, hook := range hooks { 29 | hookMap[hook().GetURI()] = hook 30 | } 31 | return &Dispatcher{ 32 | hooks: &hookMap, 33 | } 34 | } 35 | 36 | // HandleRequest http request 37 | // HTTP status code usage: When the request body is correctly parsed into a 38 | // request (utils.ParseHTTPRequest) then we should always send 200 OK and use 39 | // the response body (response.status.code) to indicate a problem. When instead 40 | // there's a problem with the HTTP request itself (404, an inability to parse a 41 | // request, or some internal problem) it is appropriate to use the HTTP status 42 | // code to communicate. 43 | func (d *Dispatcher) HandleRequest(w http.ResponseWriter, r *http.Request) { 44 | d.mu.Lock() 45 | defer d.mu.Unlock() 46 | log.Info("Handling request", "request", r.RequestURI) 47 | url, err := url.Parse(r.RequestURI) 48 | if err != nil { 49 | w.WriteHeader(http.StatusBadRequest) 50 | log.Error(err, "Couldn't parse request %s", r.RequestURI) 51 | responsehelper.SendResponse(w, admissionctl.Errored(http.StatusBadRequest, err)) 52 | return 53 | } 54 | 55 | // is it one of ours? 56 | if hook, ok := (*d.hooks)[url.Path]; ok { 57 | // it's one of ours, so let's attempt to parse the request 58 | request, _, err := utils.ParseHTTPRequest(r) 59 | // Problem even parsing an AdmissionReview, so use HTTP status code 60 | if err != nil { 61 | w.WriteHeader(http.StatusBadRequest) 62 | log.Error(err, "Error parsing HTTP Request Body") 63 | responsehelper.SendResponse(w, admissionctl.Errored(http.StatusBadRequest, err)) 64 | return 65 | } 66 | // Valid AdmissionReview, but we can't do anything with it because we do not 67 | // think the request inside is valid. 68 | if !hook().Validate(request) { 69 | err = fmt.Errorf("not a valid webhook request") 70 | log.Error(err, "Error validaing HTTP Request Body") 71 | responsehelper.SendResponse(w, 72 | admissionctl.Errored(http.StatusBadRequest, err)) 73 | return 74 | } 75 | 76 | // Dispatch 77 | responsehelper.SendResponse(w, hook().Authorized(request)) 78 | return 79 | } 80 | log.Info("Request is not for a registered webhook.", "known_hooks", *d.hooks, "parsed_url", url, "lookup", (*d.hooks)[url.Path]) 81 | // Not a registered hook 82 | // Note: This segment is not likely to be reached because there will not be 83 | // any URI registered (handler set up) for an URI that would trigger this. 84 | w.WriteHeader(404) 85 | responsehelper.SendResponse(w, 86 | admissionctl.Errored(http.StatusBadRequest, 87 | fmt.Errorf("request is not for a registered webhook"))) 88 | } 89 | -------------------------------------------------------------------------------- /pkg/helpers/response.go: -------------------------------------------------------------------------------- 1 | package helpers 2 | 3 | import ( 4 | "encoding/json" 5 | "io" 6 | "net/http" 7 | 8 | admissionapi "k8s.io/api/admission/v1" 9 | logf "sigs.k8s.io/controller-runtime/pkg/log" 10 | admissionctl "sigs.k8s.io/controller-runtime/pkg/webhook/admission" 11 | ) 12 | 13 | var log = logf.Log.WithName("response_helper") 14 | 15 | // SendResponse Send the AdmissionReview. 16 | func SendResponse(w io.Writer, resp admissionctl.Response) { 17 | 18 | // Apply ownership annotation to allow for granular alerts for 19 | // manipulation of SREP owned webhooks. 20 | resp.AuditAnnotations = map[string]string{ 21 | "owner": "srep-managed-webhook", 22 | } 23 | 24 | encoder := json.NewEncoder(w) 25 | responseAdmissionReview := admissionapi.AdmissionReview{ 26 | Response: &resp.AdmissionResponse, 27 | } 28 | responseAdmissionReview.APIVersion = admissionapi.SchemeGroupVersion.String() 29 | responseAdmissionReview.Kind = "AdmissionReview" 30 | err := encoder.Encode(responseAdmissionReview) 31 | // TODO (lisa): handle this in a non-recursive way (why would the second one succeed)? 32 | if err != nil { 33 | log.Error(err, "Failed to encode Response", "response", resp) 34 | SendResponse(w, admissionctl.Errored(http.StatusInternalServerError, err)) 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /pkg/helpers/response_test.go: -------------------------------------------------------------------------------- 1 | package helpers 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "fmt" 7 | "net/http" 8 | "testing" 9 | 10 | admissionapi "k8s.io/api/admission/v1" 11 | "k8s.io/apimachinery/pkg/types" 12 | admissionctl "sigs.k8s.io/controller-runtime/pkg/webhook/admission" 13 | ) 14 | 15 | func makeBuffer() *bytes.Buffer { 16 | return new(bytes.Buffer) 17 | } 18 | 19 | func formatOutput(s string) string { 20 | return fmt.Sprintf("%s\n", s) 21 | } 22 | 23 | func makeResponseObj(uid string, allowed bool, e error) *admissionctl.Response { 24 | if e == nil { 25 | return &admissionctl.Response{ 26 | AdmissionResponse: admissionapi.AdmissionResponse{ 27 | UID: types.UID(uid), 28 | Allowed: allowed, 29 | }, 30 | } 31 | } else { 32 | n := admissionctl.Errored(http.StatusBadRequest, e) 33 | return &n 34 | } 35 | } 36 | 37 | func TestBadResponse(t *testing.T) { 38 | t.Skip("Not quite sure how to test json encoding error") 39 | } 40 | 41 | func TestResponse(t *testing.T) { 42 | tests := []struct { 43 | allowed bool 44 | uid string 45 | e error 46 | status int32 47 | expectedResult string 48 | }{ 49 | { 50 | allowed: true, 51 | uid: "test-uid", 52 | e: nil, 53 | status: http.StatusOK, 54 | // the writer sends a newline 55 | expectedResult: formatOutput(`{"kind":"AdmissionReview","apiVersion":"admission.k8s.io/v1","response":{"uid":"test-uid","allowed":true,"auditAnnotations":{"owner":"srep-managed-webhook"}}}`), 56 | }, 57 | { 58 | allowed: false, 59 | uid: "test-fail-with-error", 60 | e: fmt.Errorf("request body is empty"), 61 | status: http.StatusBadRequest, 62 | expectedResult: formatOutput(`{"kind":"AdmissionReview","apiVersion":"admission.k8s.io/v1","response":{"uid":"","allowed":false,"status":{"metadata":{},"message":"request body is empty","code":400},"auditAnnotations":{"owner":"srep-managed-webhook"}}}`), 63 | }, 64 | } 65 | for _, test := range tests { 66 | buf := makeBuffer() 67 | respObj := makeResponseObj(test.uid, test.allowed, test.e) 68 | SendResponse(buf, *respObj) 69 | if buf.String() != test.expectedResult { 70 | t.Fatalf("Expected to have `%s` but got `%s`", test.expectedResult, buf.String()) 71 | } 72 | decodedResult := &admissionapi.AdmissionReview{} 73 | err := json.Unmarshal([]byte(buf.String()), decodedResult) 74 | if err != nil { 75 | t.Errorf("Couldn't unmarshal the JSON blob: %s", err.Error()) 76 | } 77 | t.Logf("Response body = %s", buf.String()) 78 | 79 | if test.e != nil { 80 | if test.status == http.StatusOK { 81 | t.Errorf("It is weird to have an error result and a 200 OK. Check test's status field.") 82 | } 83 | // check for the Response.Result 84 | if decodedResult.Response.Result == nil { 85 | t.Fatalf("Error responses need a Response.Result, and this one didn't have one") 86 | } else { 87 | if decodedResult.Response.Result.Code != test.status { 88 | t.Fatalf("Expected HTTP status code of the Result to be %d, but got %d instead", test.status, decodedResult.Response.Result.Code) 89 | } 90 | } 91 | } 92 | 93 | } 94 | 95 | } 96 | -------------------------------------------------------------------------------- /pkg/k8sutil/k8sutil.go: -------------------------------------------------------------------------------- 1 | package k8sutil 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "strings" 7 | 8 | "k8s.io/apimachinery/pkg/runtime" 9 | "k8s.io/client-go/rest" 10 | "k8s.io/client-go/tools/clientcmd" 11 | "sigs.k8s.io/controller-runtime/pkg/client" 12 | logf "sigs.k8s.io/controller-runtime/pkg/log" 13 | ) 14 | 15 | type RunModeType string 16 | 17 | const ( 18 | LocalRunMode RunModeType = "local" 19 | ClusterRunMode RunModeType = "cluster" 20 | 21 | OperatorNameEnvVar = "OPERATOR_NAME" 22 | ) 23 | 24 | var ( 25 | log = logf.Log.WithName("k8sutil") 26 | 27 | ForceRunModeEnv = "OSDK_FORCE_RUN_MODE" 28 | ErrNoNamespace = fmt.Errorf("namespace not found for current environment") 29 | ErrRunLocal = fmt.Errorf("operator run mode forced to local") 30 | ) 31 | 32 | func buildConfig(kubeconfig string) (*rest.Config, error) { 33 | // Try loading KUBECONFIG env var. If not set fallback on InClusterConfig 34 | 35 | if kubeconfig != "" { 36 | cfg, err := clientcmd.BuildConfigFromFlags("", kubeconfig) 37 | if err != nil { 38 | return nil, err 39 | } 40 | return cfg, nil 41 | } 42 | 43 | cfg, err := rest.InClusterConfig() 44 | if err != nil { 45 | return nil, err 46 | } 47 | return cfg, nil 48 | } 49 | 50 | // KubeClient creates a new kubeclient that interacts with the Kube api with the service account secrets 51 | func KubeClient(s *runtime.Scheme) (client.Client, error) { 52 | // Try loading KUBECONFIG env var. Else falls back on in-cluster config 53 | config, err := buildConfig(os.Getenv("KUBECONFIG")) 54 | if err != nil { 55 | return nil, err 56 | } 57 | 58 | c, err := client.New(config, client.Options{ 59 | Scheme: s, 60 | }) 61 | if err != nil { 62 | return nil, err 63 | } 64 | return c, nil 65 | } 66 | 67 | func isRunModeLocal() bool { 68 | return os.Getenv(ForceRunModeEnv) == string(LocalRunMode) 69 | } 70 | 71 | // GetOperatorNamespace returns the namespace the operator should be running in. 72 | func GetOperatorNamespace() (string, error) { 73 | if isRunModeLocal() { 74 | return "", ErrRunLocal 75 | } 76 | nsBytes, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace") 77 | if err != nil { 78 | if os.IsNotExist(err) { 79 | return "", ErrNoNamespace 80 | } 81 | return "", err 82 | } 83 | ns := strings.TrimSpace(string(nsBytes)) 84 | log.V(1).Info("Found namespace", "Namespace", ns) 85 | return ns, nil 86 | } 87 | 88 | // GetOperatorName return the operator name 89 | func GetOperatorName() (string, error) { 90 | operatorName, found := os.LookupEnv(OperatorNameEnvVar) 91 | if !found { 92 | return "", fmt.Errorf("%s must be set", OperatorNameEnvVar) 93 | } 94 | if len(operatorName) == 0 { 95 | return "", fmt.Errorf("%s must not be empty", OperatorNameEnvVar) 96 | } 97 | return operatorName, nil 98 | } 99 | -------------------------------------------------------------------------------- /pkg/localmetrics/localmetrics.go: -------------------------------------------------------------------------------- 1 | package localmetrics 2 | 3 | import ( 4 | "github.com/prometheus/client_golang/prometheus" 5 | ) 6 | 7 | var ( 8 | MetricNodeWebhookBlockedReqeust = prometheus.NewCounterVec(prometheus.CounterOpts{ 9 | Name: "managed_webhook_node_blocked_request", 10 | Help: "Report how many times the managed node webhook has blocked requests", 11 | }, []string{"user"}) 12 | 13 | MetricsList = []prometheus.Collector{ 14 | MetricNodeWebhookBlockedReqeust, 15 | } 16 | ) 17 | 18 | func IncrementNodeWebhookBlockedRequest(user string) { 19 | MetricNodeWebhookBlockedReqeust.With(prometheus.Labels{"user": user}).Inc() 20 | } 21 | -------------------------------------------------------------------------------- /pkg/testutils/testutils.go: -------------------------------------------------------------------------------- 1 | package testutils 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "net/http" 7 | "net/http/httptest" 8 | 9 | admissionv1 "k8s.io/api/admission/v1" 10 | authenticationv1 "k8s.io/api/authentication/v1" 11 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 12 | "k8s.io/apimachinery/pkg/runtime" 13 | "k8s.io/apimachinery/pkg/types" 14 | admissionctl "sigs.k8s.io/controller-runtime/pkg/webhook/admission" 15 | 16 | responsehelper "github.com/openshift/managed-cluster-validating-webhooks/pkg/helpers" 17 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/utils" 18 | ) 19 | 20 | // Webhook interface 21 | type Webhook interface { 22 | // Authorized will determine if the request is allowed 23 | Authorized(request admissionctl.Request) admissionctl.Response 24 | } 25 | 26 | // CanCanNot helper to make English a bit nicer 27 | func CanCanNot(b bool) string { 28 | if b { 29 | return "can" 30 | } 31 | return "can not" 32 | } 33 | 34 | // CreateFakeRequestJSON will render the []byte slice needed for the (fake) HTTP request. 35 | // Inputs into this are the request UID, which GVK and GVR are being gated by this webhook, 36 | // User information (username and groups), what kind of operation is being gated by this webhook 37 | // and finally the runtime.RawExtension representation of the request's Object or OldObject 38 | // The Object/OldObject is automatically inferred by the operation; delete operations will force OldObject 39 | // To create the RawExtension: 40 | // 41 | // obj := runtime.RawExtension{ 42 | // Raw: []byte(rawObjString), 43 | // } 44 | // 45 | // where rawObjString is a literal JSON blob, eg: 46 | // 47 | // { 48 | // "metadata": { 49 | // "name": "namespace-name", 50 | // "uid": "request-userid", 51 | // "creationTimestamp": "2020-05-10T07:51:00Z" 52 | // }, 53 | // "users": null 54 | // } 55 | func CreateFakeRequestJSON(uid string, 56 | gvk metav1.GroupVersionKind, gvr metav1.GroupVersionResource, 57 | operation admissionv1.Operation, 58 | username string, userGroups []string, namespace string, 59 | obj, oldObject *runtime.RawExtension) ([]byte, error) { 60 | 61 | req := admissionv1.AdmissionReview{ 62 | Request: &admissionv1.AdmissionRequest{ 63 | UID: types.UID(uid), 64 | Kind: gvk, 65 | RequestKind: &gvk, 66 | Resource: gvr, 67 | Operation: operation, 68 | Namespace: namespace, 69 | UserInfo: authenticationv1.UserInfo{ 70 | Username: username, 71 | Groups: userGroups, 72 | }, 73 | }, 74 | } 75 | switch operation { 76 | case admissionv1.Create: 77 | req.Request.Object = *obj 78 | case admissionv1.Update: 79 | // TODO (lisa): Update should have a different object for Object than for OldObject 80 | req.Request.Object = *obj 81 | if oldObject != nil { 82 | req.Request.OldObject = *oldObject 83 | } else { 84 | req.Request.OldObject = *obj 85 | } 86 | case admissionv1.Delete: 87 | req.Request.OldObject = *obj 88 | } 89 | b, err := json.Marshal(req) 90 | if err != nil { 91 | return []byte{}, err 92 | } 93 | return b, nil 94 | } 95 | 96 | // CreateHTTPRequest takes all the information needed for an AdmissionReview. 97 | // See also CreateFakeRequestJSON for more. 98 | func CreateHTTPRequest(uri, uid string, 99 | gvk metav1.GroupVersionKind, gvr metav1.GroupVersionResource, 100 | operation admissionv1.Operation, 101 | username string, userGroups []string, namespace string, 102 | obj, oldObject *runtime.RawExtension) (*http.Request, error) { 103 | req, err := CreateFakeRequestJSON(uid, gvk, gvr, operation, username, userGroups, namespace, obj, oldObject) 104 | if err != nil { 105 | return nil, err 106 | } 107 | buf := bytes.NewBuffer(req) 108 | httprequest := httptest.NewRequest("POST", uri, buf) 109 | httprequest.Header["Content-Type"] = []string{"application/json"} 110 | return httprequest, nil 111 | } 112 | 113 | // SendHTTPRequest will send the fake request to be handled by the Webhook 114 | func SendHTTPRequest(req *http.Request, s Webhook) (*admissionv1.AdmissionResponse, error) { 115 | httpResponse := httptest.NewRecorder() 116 | request, _, err := utils.ParseHTTPRequest(req) 117 | if err != nil { 118 | return nil, err 119 | } 120 | resp := s.Authorized(request) 121 | responsehelper.SendResponse(httpResponse, resp) 122 | // at this popint, httpResponse should contain the data sent in response to the webhook query, which is the success/fail 123 | ret := &admissionv1.AdmissionReview{} 124 | err = json.Unmarshal(httpResponse.Body.Bytes(), ret) 125 | if err != nil { 126 | return nil, err 127 | } 128 | return ret.Response, nil 129 | } 130 | -------------------------------------------------------------------------------- /pkg/webhooks/add_clusterlogging.go: -------------------------------------------------------------------------------- 1 | package webhooks 2 | 3 | import ( 4 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/clusterlogging" 5 | ) 6 | 7 | func init() { 8 | Register(clusterlogging.WebhookName, func() Webhook { return clusterlogging.NewWebhook() }) 9 | } 10 | -------------------------------------------------------------------------------- /pkg/webhooks/add_clusterrole.go: -------------------------------------------------------------------------------- 1 | package webhooks 2 | 3 | import ( 4 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/clusterrole" 5 | ) 6 | 7 | func init() { 8 | Register(clusterrole.WebhookName, func() Webhook { return clusterrole.NewWebhook() }) 9 | } 10 | -------------------------------------------------------------------------------- /pkg/webhooks/add_clusterrolebinding.go: -------------------------------------------------------------------------------- 1 | package webhooks 2 | 3 | import ( 4 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/clusterrolebinding" 5 | ) 6 | 7 | func init() { 8 | Register(clusterrolebinding.WebhookName, func() Webhook { return clusterrolebinding.NewWebhook() }) 9 | } 10 | -------------------------------------------------------------------------------- /pkg/webhooks/add_customresourcedefinitions.go: -------------------------------------------------------------------------------- 1 | package webhooks 2 | 3 | import ( 4 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/customresourcedefinitions" 5 | ) 6 | 7 | func init() { 8 | Register(customresourcedefinitions.WebhookName, func() Webhook { return customresourcedefinitions.NewWebhook() }) 9 | } 10 | -------------------------------------------------------------------------------- /pkg/webhooks/add_hcpnamespace.go: -------------------------------------------------------------------------------- 1 | package webhooks 2 | 3 | import webhooks "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/hcpnamespace" 4 | 5 | func init() { 6 | Register(webhooks.WebhookName, func() Webhook { return webhooks.NewWebhook() }) 7 | } 8 | -------------------------------------------------------------------------------- /pkg/webhooks/add_hiveownership.go: -------------------------------------------------------------------------------- 1 | package webhooks 2 | 3 | import ( 4 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/hiveownership" 5 | ) 6 | 7 | func init() { 8 | Register(hiveownership.WebhookName, func() Webhook { return hiveownership.NewWebhook() }) 9 | } 10 | -------------------------------------------------------------------------------- /pkg/webhooks/add_hostedcluster.go: -------------------------------------------------------------------------------- 1 | package webhooks 2 | 3 | import ( 4 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/hostedcluster" 5 | ) 6 | 7 | func init() { 8 | Register(hostedcluster.WebhookName, func() Webhook { return hostedcluster.NewWebhook() }) 9 | } 10 | -------------------------------------------------------------------------------- /pkg/webhooks/add_hostedcontrolplane.go: -------------------------------------------------------------------------------- 1 | package webhooks 2 | 3 | import ( 4 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/hostedcontrolplane" 5 | ) 6 | 7 | func init() { 8 | Register(hostedcontrolplane.WebhookName, func() Webhook { return hostedcontrolplane.NewWebhook() }) 9 | } 10 | -------------------------------------------------------------------------------- /pkg/webhooks/add_imagecontentpolicies.go: -------------------------------------------------------------------------------- 1 | package webhooks 2 | 3 | import ( 4 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/imagecontentpolicies" 5 | ) 6 | 7 | func init() { 8 | Register(imagecontentpolicies.WebhookName, func() Webhook { return imagecontentpolicies.NewWebhook() }) 9 | } 10 | -------------------------------------------------------------------------------- /pkg/webhooks/add_ingressconfig_hook.go: -------------------------------------------------------------------------------- 1 | package webhooks 2 | 3 | import ( 4 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/ingressconfig" 5 | ) 6 | 7 | func init() { 8 | Register(ingressconfig.WebhookName, func() Webhook { return ingressconfig.NewWebhook() }) 9 | } 10 | -------------------------------------------------------------------------------- /pkg/webhooks/add_ingresscontroller.go: -------------------------------------------------------------------------------- 1 | package webhooks 2 | 3 | import ( 4 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/ingresscontroller" 5 | ) 6 | 7 | func init() { 8 | Register(ingresscontroller.WebhookName, func() Webhook { return ingresscontroller.NewWebhook() }) 9 | } 10 | -------------------------------------------------------------------------------- /pkg/webhooks/add_manifestworks.go: -------------------------------------------------------------------------------- 1 | package webhooks 2 | 3 | import ( 4 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/manifestworks" 5 | ) 6 | 7 | func init() { 8 | Register(manifestworks.WebhookName, func() Webhook { return manifestworks.NewWebhook() }) 9 | } 10 | -------------------------------------------------------------------------------- /pkg/webhooks/add_namespace_hook.go: -------------------------------------------------------------------------------- 1 | package webhooks 2 | 3 | import ( 4 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/namespace" 5 | ) 6 | 7 | func init() { 8 | Register(namespace.WebhookName, func() Webhook { return namespace.NewWebhook() }) 9 | } 10 | -------------------------------------------------------------------------------- /pkg/webhooks/add_networkpolicy.go: -------------------------------------------------------------------------------- 1 | package webhooks 2 | 3 | import ( 4 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/networkpolicies" 5 | ) 6 | 7 | func init() { 8 | Register(networkpolicies.WebhookName, func() Webhook { return networkpolicies.NewWebhook() }) 9 | } 10 | -------------------------------------------------------------------------------- /pkg/webhooks/add_node.go: -------------------------------------------------------------------------------- 1 | package webhooks 2 | 3 | import "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/node" 4 | 5 | func init() { 6 | Register(node.WebhookName, func() Webhook { return node.NewWebhook() }) 7 | } 8 | -------------------------------------------------------------------------------- /pkg/webhooks/add_pod.go: -------------------------------------------------------------------------------- 1 | package webhooks 2 | 3 | import ( 4 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/pod" 5 | ) 6 | 7 | func init() { 8 | Register(pod.WebhookName, func() Webhook { return pod.NewWebhook() }) 9 | } 10 | -------------------------------------------------------------------------------- /pkg/webhooks/add_podimagespec.go: -------------------------------------------------------------------------------- 1 | package webhooks 2 | 3 | import ( 4 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/podimagespec" 5 | ) 6 | 7 | func init() { 8 | Register(podimagespec.WebhookName, func() Webhook { return podimagespec.NewWebhook() }) 9 | } 10 | -------------------------------------------------------------------------------- /pkg/webhooks/add_prometheusrule.go: -------------------------------------------------------------------------------- 1 | package webhooks 2 | 3 | import ( 4 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/prometheusrule" 5 | ) 6 | 7 | func init() { 8 | Register(prometheusrule.WebhookName, func() Webhook { return prometheusrule.NewWebhook() }) 9 | } 10 | -------------------------------------------------------------------------------- /pkg/webhooks/add_regularuser.go: -------------------------------------------------------------------------------- 1 | package webhooks 2 | 3 | import ( 4 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/regularuser/common" 5 | ) 6 | 7 | func init() { 8 | Register(common.WebhookName, func() Webhook { return common.NewWebhook() }) 9 | } 10 | -------------------------------------------------------------------------------- /pkg/webhooks/add_scc.go: -------------------------------------------------------------------------------- 1 | package webhooks 2 | 3 | import ( 4 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/scc" 5 | ) 6 | 7 | func init() { 8 | Register(scc.WebhookName, func() Webhook { return scc.NewWebhook() }) 9 | } 10 | -------------------------------------------------------------------------------- /pkg/webhooks/add_sdnmigration.go: -------------------------------------------------------------------------------- 1 | package webhooks 2 | 3 | import ( 4 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/sdnmigration" 5 | ) 6 | 7 | func init() { 8 | Register(sdnmigration.WebhookName, func() Webhook { return sdnmigration.NewWebhook() }) 9 | } 10 | -------------------------------------------------------------------------------- /pkg/webhooks/add_service_hook.go: -------------------------------------------------------------------------------- 1 | package webhooks 2 | 3 | import ( 4 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/service" 5 | ) 6 | 7 | func init() { 8 | Register(service.WebhookName, func() Webhook { return service.NewWebhook() }) 9 | } 10 | -------------------------------------------------------------------------------- /pkg/webhooks/add_serviceaccount.go: -------------------------------------------------------------------------------- 1 | package webhooks 2 | 3 | import ( 4 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/serviceaccount" 5 | ) 6 | 7 | func init() { 8 | Register(serviceaccount.WebhookName, func() Webhook { return serviceaccount.NewWebhook() }) 9 | } 10 | -------------------------------------------------------------------------------- /pkg/webhooks/add_techpreviewnoupgrade.go: -------------------------------------------------------------------------------- 1 | package webhooks 2 | 3 | import ( 4 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/techpreviewnoupgrade" 5 | ) 6 | 7 | func init() { 8 | Register(techpreviewnoupgrade.WebhookName, func() Webhook { return techpreviewnoupgrade.NewWebhook() }) 9 | } 10 | -------------------------------------------------------------------------------- /pkg/webhooks/clusterlogging/clusterlogging_test.go: -------------------------------------------------------------------------------- 1 | package clusterlogging_test 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | 7 | admissionv1 "k8s.io/api/admission/v1" 8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | "k8s.io/apimachinery/pkg/runtime" 10 | 11 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/testutils" 12 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/clusterlogging" 13 | ) 14 | 15 | type clusterloggingTestSuite struct { 16 | testName string 17 | testID string 18 | username string 19 | userGroups []string 20 | oldObject *runtime.RawExtension 21 | operation admissionv1.Operation 22 | appMaxAge string 23 | infraMaxAge string 24 | auditMaxAge string 25 | shouldBeAllowed bool 26 | } 27 | 28 | const testObjectRaw string = ` 29 | { 30 | "apiVersion": "logging.openshift.io/v1", 31 | "kind": "ClusterLogging", 32 | "metadata": { 33 | "name": "test-subject", 34 | "uid": "1234", 35 | "creationTimestamp": "2020-05-10T07:51:00Z", 36 | "labels": {} 37 | }, 38 | "spec": { 39 | "managementState": "Managed" , 40 | "logStore": { 41 | "type": "elasticsearch", 42 | "retentionPolicy": { 43 | "application": { 44 | "maxAge": "%s" 45 | }, 46 | "infra": { 47 | "maxAge": "%s" 48 | }, 49 | "audit": { 50 | "maxAge": "%s" 51 | } 52 | } 53 | } 54 | } 55 | }` 56 | 57 | func NewTestSuite(appMaxAge, infraMaxAge, auditMaxAge string) clusterloggingTestSuite { 58 | return clusterloggingTestSuite{ 59 | testID: "1234", 60 | operation: admissionv1.Create, 61 | appMaxAge: appMaxAge, 62 | infraMaxAge: infraMaxAge, 63 | auditMaxAge: auditMaxAge, 64 | shouldBeAllowed: true, 65 | } 66 | } 67 | 68 | func (s clusterloggingTestSuite) ExpectNotAllowed() clusterloggingTestSuite { 69 | s.shouldBeAllowed = false 70 | return s 71 | } 72 | 73 | func createOldObject(appMaxAge, infraMaxAge, auditMaxAge string) *runtime.RawExtension { 74 | return &runtime.RawExtension{ 75 | Raw: []byte(createRawJSONString(appMaxAge, infraMaxAge, auditMaxAge)), 76 | } 77 | } 78 | 79 | func createRawJSONString(appMaxAge, infraMaxAge, auditMaxAge string) string { 80 | s := fmt.Sprintf(testObjectRaw, appMaxAge, infraMaxAge, auditMaxAge) 81 | return s 82 | } 83 | 84 | func Test_InvalidTimeUnit(t *testing.T) { 85 | testSuites := []clusterloggingTestSuite{ 86 | NewTestSuite("7x", "1h", "1h").ExpectNotAllowed(), 87 | NewTestSuite("7D", "1h", "1h").ExpectNotAllowed(), 88 | NewTestSuite("7S", "1h", "1h").ExpectNotAllowed(), 89 | NewTestSuite("m", "1h", "1h").ExpectNotAllowed(), 90 | } 91 | 92 | runTests(t, testSuites) 93 | } 94 | 95 | func Test_RetentionPeriodNotAllowed(t *testing.T) { 96 | testSuites := []clusterloggingTestSuite{ 97 | NewTestSuite("8d", "1h", "1h").ExpectNotAllowed(), 98 | NewTestSuite("169h", "1h", "1h").ExpectNotAllowed(), 99 | NewTestSuite("1h", "1m", "1h").ExpectNotAllowed(), 100 | NewTestSuite("1h", "1s", "1h").ExpectNotAllowed(), 101 | NewTestSuite("1h", "1h", "8d").ExpectNotAllowed(), 102 | NewTestSuite("7M", "1h", "1h").ExpectNotAllowed(), 103 | NewTestSuite("7M", "0h", "1h").ExpectNotAllowed(), 104 | NewTestSuite("7M", "1h", "0h").ExpectNotAllowed(), 105 | NewTestSuite("7M", "61m", "0h").ExpectNotAllowed(), 106 | NewTestSuite("7M", "60m", "61m").ExpectNotAllowed(), 107 | NewTestSuite("59m", "60m", "60m").ExpectNotAllowed(), 108 | NewTestSuite("1h", "59m", "60m").ExpectNotAllowed(), 109 | NewTestSuite("1h", "60m", "59m").ExpectNotAllowed(), 110 | } 111 | 112 | runTests(t, testSuites) 113 | } 114 | 115 | func Test_RetentionPeriodAllowed(t *testing.T) { 116 | testSuites := []clusterloggingTestSuite{ 117 | NewTestSuite("7d", "1h", "1h"), 118 | NewTestSuite("168h", "1h", "1h"), 119 | NewTestSuite("168h", "60m", "60m"), 120 | NewTestSuite("1h", "1h", "1h"), 121 | } 122 | 123 | runTests(t, testSuites) 124 | } 125 | 126 | func runTests(t *testing.T, tests []clusterloggingTestSuite) { 127 | for _, test := range tests { 128 | obj := createOldObject(test.appMaxAge, test.infraMaxAge, test.auditMaxAge) 129 | hook := clusterlogging.NewWebhook() 130 | httprequest, err := testutils.CreateHTTPRequest(hook.GetURI(), 131 | test.testID, 132 | metav1.GroupVersionKind{}, metav1.GroupVersionResource{}, test.operation, test.username, test.userGroups, "", obj, test.oldObject) 133 | if err != nil { 134 | t.Fatalf("Expected no error, got %s", err.Error()) 135 | } 136 | 137 | response, err := testutils.SendHTTPRequest(httprequest, hook) 138 | if err != nil { 139 | t.Fatalf("Expected no error, got %s", err.Error()) 140 | } 141 | if response.UID == "" { 142 | t.Fatalf("No tracking UID associated with the response: %+v", response) 143 | } 144 | 145 | if response.Allowed != test.shouldBeAllowed { 146 | t.Fatalf("Mismatch: %v %s %s. Test's expectation is that the user %s. Reason: %s, Message: %v", 147 | test, 148 | testutils.CanCanNot(response.Allowed), string(test.operation), 149 | testutils.CanCanNot(test.shouldBeAllowed), response.Result.Reason, response.Result.Message) 150 | } 151 | } 152 | } 153 | -------------------------------------------------------------------------------- /pkg/webhooks/clusterrole/clusterrole_test.go: -------------------------------------------------------------------------------- 1 | package clusterrole 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/testutils" 7 | admissionv1 "k8s.io/api/admission/v1" 8 | authenticationv1 "k8s.io/api/authentication/v1" 9 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 10 | "k8s.io/apimachinery/pkg/runtime" 11 | admissionctl "sigs.k8s.io/controller-runtime/pkg/webhook/admission" 12 | ) 13 | 14 | type ClusterRoleTestSuites struct { 15 | testID string 16 | targetClusterRole string 17 | username string 18 | operation admissionv1.Operation 19 | userGroups []string 20 | shouldBeAllowed bool 21 | } 22 | 23 | var ( 24 | // testClusterRoleJSON represents a minimal ClusterRole JSON object for testing 25 | testClusterRoleJSON = `{ 26 | "apiVersion": "rbac.authorization.k8s.io/v1", 27 | "kind": "ClusterRole", 28 | "metadata": { 29 | "name": "cluster-admin" 30 | }, 31 | "rules": [ 32 | { 33 | "apiGroups": ["*"], 34 | "resources": ["*"], 35 | "verbs": ["*"] 36 | } 37 | ] 38 | }` 39 | 40 | // testOtherClusterRoleJSON represents a non-protected ClusterRole 41 | testOtherClusterRoleJSON = `{ 42 | "apiVersion": "rbac.authorization.k8s.io/v1", 43 | "kind": "ClusterRole", 44 | "metadata": { 45 | "name": "some-custom-role" 46 | }, 47 | "rules": [ 48 | { 49 | "apiGroups": [""], 50 | "resources": ["pods"], 51 | "verbs": ["get", "list"] 52 | } 53 | ] 54 | }` 55 | ) 56 | 57 | func runClusterRoleTests(t *testing.T, tests []ClusterRoleTestSuites) { 58 | for _, test := range tests { 59 | gvk := metav1.GroupVersionKind{ 60 | Group: "rbac.authorization.k8s.io", 61 | Version: "v1", 62 | Kind: "ClusterRole", 63 | } 64 | gvr := metav1.GroupVersionResource{ 65 | Group: "rbac.authorization.k8s.io", 66 | Version: "v1", 67 | Resource: "clusterroles", 68 | } 69 | 70 | var clusterRoleJSON string 71 | if test.targetClusterRole == "cluster-admin" { 72 | clusterRoleJSON = testClusterRoleJSON 73 | } else { 74 | clusterRoleJSON = testOtherClusterRoleJSON 75 | } 76 | 77 | rawOldObject := []byte(clusterRoleJSON) 78 | req := admissionctl.Request{ 79 | AdmissionRequest: admissionv1.AdmissionRequest{ 80 | UID: "test-uid", 81 | Kind: gvk, 82 | Resource: gvr, 83 | Operation: admissionv1.Delete, 84 | UserInfo: authenticationv1.UserInfo{ 85 | Username: test.username, 86 | Groups: test.userGroups, 87 | }, 88 | OldObject: runtime.RawExtension{ 89 | Raw: rawOldObject, 90 | }, 91 | }, 92 | } 93 | 94 | hook := NewWebhook() 95 | response := hook.Authorized(req) 96 | 97 | if response.Allowed != test.shouldBeAllowed { 98 | t.Fatalf("Mismatch: %s (groups=%s) %s %s the clusterrole. Test's expectation is that the user %s", test.username, test.userGroups, testutils.CanCanNot(response.Allowed), test.operation, testutils.CanCanNot(test.shouldBeAllowed)) 99 | } 100 | } 101 | } 102 | 103 | func TestClusterRoleDeletionNegative(t *testing.T) { 104 | tests := []ClusterRoleTestSuites{ 105 | { 106 | testID: "regular-user-deny", 107 | username: "test-user", 108 | userGroups: []string{"system:authenticated"}, 109 | operation: admissionv1.Delete, 110 | shouldBeAllowed: false, 111 | targetClusterRole: "cluster-admin", 112 | }, 113 | { 114 | testID: "cluster-admin-user-deny", 115 | username: "cluster-admin", 116 | userGroups: []string{"system:authenticated"}, 117 | operation: admissionv1.Delete, 118 | shouldBeAllowed: false, 119 | targetClusterRole: "cluster-admin", 120 | }, 121 | { 122 | testID: "customer-admin-deny", 123 | username: "customer-user", 124 | userGroups: []string{"system:authenticated", "customer-admin"}, 125 | operation: admissionv1.Delete, 126 | shouldBeAllowed: false, 127 | targetClusterRole: "cluster-admin", 128 | }, 129 | } 130 | 131 | runClusterRoleTests(t, tests) 132 | } 133 | 134 | func TestClusterRoleDeletionPositive(t *testing.T) { 135 | tests := []ClusterRoleTestSuites{ 136 | { 137 | testID: "backplane-admin-allow", 138 | username: "backplane-cluster-admin", 139 | userGroups: []string{"system:authenticated"}, 140 | operation: admissionv1.Delete, 141 | shouldBeAllowed: true, 142 | targetClusterRole: "cluster-admin", 143 | }, 144 | { 145 | testID: "backplane-srep-allow", 146 | username: "test-user", 147 | userGroups: []string{"system:authenticated", "system:serviceaccounts:openshift-backplane-srep"}, 148 | operation: admissionv1.Delete, 149 | shouldBeAllowed: true, 150 | targetClusterRole: "cluster-admin", 151 | }, 152 | { 153 | testID: "other-role-allow", 154 | username: "regular-user", 155 | userGroups: []string{"system:authenticated"}, 156 | operation: admissionv1.Delete, 157 | shouldBeAllowed: true, 158 | targetClusterRole: "some-custom-role", 159 | }, 160 | { 161 | testID: "system-user-allow", 162 | username: "system:kube-controller-manager", 163 | userGroups: []string{"system:authenticated"}, 164 | operation: admissionv1.Delete, 165 | shouldBeAllowed: true, 166 | targetClusterRole: "cluster-admin", 167 | }, 168 | } 169 | 170 | runClusterRoleTests(t, tests) 171 | } 172 | -------------------------------------------------------------------------------- /pkg/webhooks/hcpnamespace/hcpnamespace_test.go: -------------------------------------------------------------------------------- 1 | package hcpnamespace 2 | 3 | import ( 4 | "testing" 5 | 6 | admissionv1 "k8s.io/api/admission/v1" 7 | authenticationv1 "k8s.io/api/authentication/v1" 8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | admissionctl "sigs.k8s.io/controller-runtime/pkg/webhook/admission" 10 | ) 11 | 12 | func TestAuthorized(t *testing.T) { 13 | tests := []struct { 14 | name string 15 | username string 16 | namespace string 17 | operation admissionv1.Operation 18 | shouldBeAllowed bool 19 | }{ 20 | { 21 | name: "Allowed user can delete protected namespace", 22 | username: "system:admin", 23 | namespace: "ocm-staging-test", 24 | operation: admissionv1.Delete, 25 | shouldBeAllowed: true, 26 | }, 27 | { 28 | name: "Klusterlet SA can delete protected namespace", 29 | username: "system:serviceaccount:open-cluster-management-agent:klusterlet-work-sa", 30 | namespace: "klusterlet-test", 31 | operation: admissionv1.Delete, 32 | shouldBeAllowed: true, 33 | }, 34 | { 35 | name: "Hypershift operator can delete protected namespace", 36 | username: "system:serviceaccount:hypershift:operator", 37 | namespace: "hs-mc-test", 38 | operation: admissionv1.Delete, 39 | shouldBeAllowed: true, 40 | }, 41 | { 42 | name: "Random user cannot delete protected namespace", 43 | username: "unknown-user", 44 | namespace: "ocm-staging-test", 45 | operation: admissionv1.Delete, 46 | shouldBeAllowed: false, 47 | }, 48 | { 49 | name: "Random user can delete unprotected namespace", 50 | username: "unknown-user", 51 | namespace: "test-namespace", 52 | operation: admissionv1.Delete, 53 | shouldBeAllowed: true, 54 | }, 55 | { 56 | name: "Non-DELETE operation should be allowed on protected namespace", 57 | username: "unknown-user", 58 | namespace: "ocm-production-test", 59 | operation: admissionv1.Update, 60 | shouldBeAllowed: true, 61 | }, 62 | { 63 | name: "Klusterlet SA can delete protected namespace", 64 | username: "system:serviceaccount:open-cluster-management-agent:klusterlet", 65 | namespace: "ocm-integration-test", 66 | operation: admissionv1.Delete, 67 | shouldBeAllowed: true, 68 | }, 69 | } 70 | 71 | for _, test := range tests { 72 | t.Run(test.name, func(t *testing.T) { 73 | webhook := NewWebhook() 74 | request := admissionctl.Request{ 75 | AdmissionRequest: admissionv1.AdmissionRequest{ 76 | Name: test.namespace, 77 | UserInfo: authenticationv1.UserInfo{ 78 | Username: test.username, 79 | }, 80 | Operation: test.operation, 81 | }, 82 | } 83 | 84 | response := webhook.Authorized(request) 85 | if response.Allowed != test.shouldBeAllowed { 86 | t.Errorf("Unexpected response. Got %v, expected %v", response.Allowed, test.shouldBeAllowed) 87 | } 88 | }) 89 | } 90 | } 91 | 92 | func TestName(t *testing.T) { 93 | webhook := NewWebhook() 94 | if webhook.Name() != WebhookName { 95 | t.Errorf("Expected webhook name to be %s, got %s", WebhookName, webhook.Name()) 96 | } 97 | } 98 | 99 | func TestGetURI(t *testing.T) { 100 | webhook := NewWebhook() 101 | uri := webhook.GetURI() 102 | if uri[0] != '/' { 103 | t.Errorf("Expected URI to start with '/', got %s", uri) 104 | } 105 | if uri != "/hcpnamespace-validation" { 106 | t.Errorf("Expected URI to be /hcpnamespace-validation, got %s", uri) 107 | } 108 | } 109 | 110 | func TestRules(t *testing.T) { 111 | webhook := NewWebhook() 112 | rules := webhook.Rules() 113 | if len(rules) == 0 { 114 | t.Fatal("Expected at least one rule") 115 | } 116 | } 117 | 118 | func TestDoc(t *testing.T) { 119 | webhook := NewWebhook() 120 | doc := webhook.Doc() 121 | if doc == "" { 122 | t.Error("Expected non-empty documentation string") 123 | } 124 | } 125 | 126 | func TestTimeoutSeconds(t *testing.T) { 127 | webhook := NewWebhook() 128 | timeout := webhook.TimeoutSeconds() 129 | if timeout != 2 { 130 | t.Errorf("Expected timeout to be 2, got %d", timeout) 131 | } 132 | } 133 | 134 | func TestValidate(t *testing.T) { 135 | tests := []struct { 136 | name string 137 | request admissionctl.Request 138 | expected bool 139 | }{ 140 | { 141 | name: "Valid request", 142 | request: admissionctl.Request{ 143 | AdmissionRequest: admissionv1.AdmissionRequest{ 144 | UserInfo: authenticationv1.UserInfo{ 145 | Username: "test-user", 146 | }, 147 | Kind: metav1.GroupVersionKind{ 148 | Kind: "Namespace", 149 | }, 150 | }, 151 | }, 152 | expected: true, 153 | }, 154 | { 155 | name: "Invalid request without username", 156 | request: admissionctl.Request{ 157 | AdmissionRequest: admissionv1.AdmissionRequest{ 158 | UserInfo: authenticationv1.UserInfo{ 159 | Username: "", 160 | }, 161 | Kind: metav1.GroupVersionKind{ 162 | Kind: "Namespace", 163 | }, 164 | }, 165 | }, 166 | expected: false, 167 | }, 168 | { 169 | name: "Invalid request with wrong kind", 170 | request: admissionctl.Request{ 171 | AdmissionRequest: admissionv1.AdmissionRequest{ 172 | UserInfo: authenticationv1.UserInfo{ 173 | Username: "test-user", 174 | }, 175 | Kind: metav1.GroupVersionKind{ 176 | Kind: "Pod", 177 | }, 178 | }, 179 | }, 180 | expected: false, 181 | }, 182 | } 183 | 184 | for _, test := range tests { 185 | t.Run(test.name, func(t *testing.T) { 186 | webhook := NewWebhook() 187 | result := webhook.Validate(test.request) 188 | if result != test.expected { 189 | t.Errorf("Expected %v, got %v", test.expected, result) 190 | } 191 | }) 192 | } 193 | } 194 | -------------------------------------------------------------------------------- /pkg/webhooks/hiveownership/hiveownership.go: -------------------------------------------------------------------------------- 1 | package hiveownership 2 | 3 | import ( 4 | "os" 5 | "slices" 6 | "sync" 7 | 8 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/utils" 9 | admissionregv1 "k8s.io/api/admissionregistration/v1" 10 | admissionv1 "k8s.io/api/apps/v1" 11 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 12 | "k8s.io/apimachinery/pkg/runtime" 13 | logf "sigs.k8s.io/controller-runtime/pkg/log" 14 | admissionctl "sigs.k8s.io/controller-runtime/pkg/webhook/admission" 15 | ) 16 | 17 | // const 18 | const ( 19 | WebhookName string = "hiveownership-validation" 20 | docString string = `Managed OpenShift customers may not edit certain managed resources. A managed resource has a "hive.openshift.io/managed": "true" label.` 21 | ) 22 | 23 | // HiveOwnershipWebhook denies requests 24 | // if it made by a customer to manage hive-labeled resources 25 | type HiveOwnershipWebhook struct { 26 | mu sync.Mutex 27 | s runtime.Scheme 28 | } 29 | 30 | var ( 31 | privilegedUsers = []string{"kube:admin", "system:admin", "system:serviceaccount:kube-system:generic-garbage-collector", "backplane-cluster-admin"} 32 | adminGroups = []string{"system:serviceaccounts:openshift-backplane-srep"} 33 | 34 | log = logf.Log.WithName(WebhookName) 35 | 36 | scope = admissionregv1.ClusterScope 37 | rules = []admissionregv1.RuleWithOperations{ 38 | { 39 | Operations: []admissionregv1.OperationType{"UPDATE", "DELETE"}, 40 | Rule: admissionregv1.Rule{ 41 | APIGroups: []string{"quota.openshift.io"}, 42 | APIVersions: []string{"*"}, 43 | Resources: []string{"clusterresourcequotas"}, 44 | Scope: &scope, 45 | }, 46 | }, 47 | } 48 | ) 49 | 50 | // TimeoutSeconds implements Webhook interface 51 | func (s *HiveOwnershipWebhook) TimeoutSeconds() int32 { return 2 } 52 | 53 | // MatchPolicy implements Webhook interface 54 | func (s *HiveOwnershipWebhook) MatchPolicy() admissionregv1.MatchPolicyType { 55 | return admissionregv1.Equivalent 56 | } 57 | 58 | // Name implements Webhook interface 59 | func (s *HiveOwnershipWebhook) Name() string { return WebhookName } 60 | 61 | // FailurePolicy implements Webhook interface 62 | func (s *HiveOwnershipWebhook) FailurePolicy() admissionregv1.FailurePolicyType { 63 | return admissionregv1.Ignore 64 | } 65 | 66 | // Rules implements Webhook interface 67 | func (s *HiveOwnershipWebhook) Rules() []admissionregv1.RuleWithOperations { return rules } 68 | 69 | // GetURI implements Webhook interface 70 | func (s *HiveOwnershipWebhook) GetURI() string { return "/" + WebhookName } 71 | 72 | // SideEffects implements Webhook interface 73 | func (s *HiveOwnershipWebhook) SideEffects() admissionregv1.SideEffectClass { 74 | return admissionregv1.SideEffectClassNone 75 | } 76 | 77 | // Validate is the incoming request even valid? 78 | func (s *HiveOwnershipWebhook) Validate(req admissionctl.Request) bool { 79 | valid := true 80 | valid = valid && (req.UserInfo.Username != "") 81 | 82 | return valid 83 | } 84 | 85 | // Doc documents the hook 86 | func (s *HiveOwnershipWebhook) Doc() string { 87 | return docString 88 | } 89 | 90 | // ObjectSelector intercepts based on having the label 91 | // .metadata.labels["hive.openshift.io/managed"] == "true" 92 | func (s *HiveOwnershipWebhook) ObjectSelector() *metav1.LabelSelector { 93 | return &metav1.LabelSelector{ 94 | MatchLabels: map[string]string{ 95 | "hive.openshift.io/managed": "true", 96 | }, 97 | } 98 | } 99 | 100 | func (s *HiveOwnershipWebhook) authorized(request admissionctl.Request) admissionctl.Response { 101 | var ret admissionctl.Response 102 | 103 | // Admin users 104 | if slices.Contains(privilegedUsers, request.AdmissionRequest.UserInfo.Username) { 105 | ret = admissionctl.Allowed("Admin users may edit managed resources") 106 | ret.UID = request.AdmissionRequest.UID 107 | return ret 108 | } 109 | // Users in admin groups 110 | for _, group := range request.AdmissionRequest.UserInfo.Groups { 111 | if slices.Contains(adminGroups, group) { 112 | ret = admissionctl.Allowed("Members of admin group may edit managed resources") 113 | ret.UID = request.AdmissionRequest.UID 114 | return ret 115 | } 116 | } 117 | 118 | ret = admissionctl.Denied("Prevented from accessing Red Hat managed resources. This is in an effort to prevent harmful actions that may cause unintended consequences or affect the stability of the cluster. If you have any questions about this, please reach out to Red Hat support at https://access.redhat.com/support") 119 | ret.UID = request.AdmissionRequest.UID 120 | return ret 121 | } 122 | 123 | // Authorized implements Webhook interface 124 | func (s *HiveOwnershipWebhook) Authorized(request admissionctl.Request) admissionctl.Response { 125 | return s.authorized(request) 126 | } 127 | 128 | // CustomSelector implements Webhook interface, returning the custom label selector for the syncset, if any 129 | func (s *HiveOwnershipWebhook) SyncSetLabelSelector() metav1.LabelSelector { 130 | return utils.DefaultLabelSelector() 131 | } 132 | 133 | func (s *HiveOwnershipWebhook) ClassicEnabled() bool { return true } 134 | 135 | func (s *HiveOwnershipWebhook) HypershiftEnabled() bool { return false } 136 | 137 | // NewWebhook creates a new webhook 138 | func NewWebhook() *HiveOwnershipWebhook { 139 | scheme := runtime.NewScheme() 140 | err := admissionv1.AddToScheme(scheme) 141 | if err != nil { 142 | log.Error(err, "Fail adding admissionsv1 scheme to HiveOwnershipWebhook") 143 | os.Exit(1) 144 | } 145 | 146 | return &HiveOwnershipWebhook{ 147 | s: *scheme, 148 | } 149 | } 150 | -------------------------------------------------------------------------------- /pkg/webhooks/hiveownership/hiveownership_test.go: -------------------------------------------------------------------------------- 1 | package hiveownership 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "reflect" 7 | "testing" 8 | 9 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/testutils" 10 | 11 | admissionv1 "k8s.io/api/admission/v1" 12 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 13 | "k8s.io/apimachinery/pkg/runtime" 14 | ) 15 | 16 | type hiveOwnershipTestSuites struct { 17 | testName string 18 | testID string 19 | username string 20 | userGroups []string 21 | oldObject *runtime.RawExtension 22 | operation admissionv1.Operation 23 | labels map[string]string 24 | shouldBeAllowed bool 25 | } 26 | 27 | const testObjectRaw string = `{ 28 | "metadata": { 29 | "name": "%s", 30 | "uid": "%s", 31 | "creationTimestamp": "2020-05-10T07:51:00Z", 32 | "labels": %s 33 | }, 34 | "users": null 35 | }` 36 | 37 | // labelsMapToString is a helper to turn a map into a JSON fragment to be 38 | // inserted into the testNamespaceRaw const. See createRawJSONString. 39 | func labelsMapToString(labels map[string]string) string { 40 | ret, _ := json.Marshal(labels) 41 | return string(ret) 42 | } 43 | 44 | func createRawJSONString(name, uid string, labels map[string]string) string { 45 | return fmt.Sprintf(testObjectRaw, name, uid, labelsMapToString(labels)) 46 | } 47 | func createOldObject(name, uid string, labels map[string]string) *runtime.RawExtension { 48 | return &runtime.RawExtension{ 49 | Raw: []byte(createRawJSONString(name, uid, labels)), 50 | } 51 | } 52 | 53 | func runTests(t *testing.T, tests []hiveOwnershipTestSuites) { 54 | gvk := metav1.GroupVersionKind{ 55 | Group: "quota.openshift.io", 56 | Version: "v1", 57 | Kind: "ClusterResourceQuota", 58 | } 59 | gvr := metav1.GroupVersionResource{ 60 | Group: "quota.openshift.io", 61 | Version: "v1", 62 | Resource: "clusterresourcequotas", 63 | } 64 | 65 | for _, test := range tests { 66 | obj := createOldObject(test.testName, test.testID, test.labels) 67 | hook := NewWebhook() 68 | httprequest, err := testutils.CreateHTTPRequest(hook.GetURI(), 69 | test.testID, 70 | gvk, gvr, test.operation, test.username, test.userGroups, "", obj, test.oldObject) 71 | if err != nil { 72 | t.Fatalf("Expected no error, got %s", err.Error()) 73 | } 74 | 75 | response, err := testutils.SendHTTPRequest(httprequest, hook) 76 | if err != nil { 77 | t.Fatalf("Expected no error, got %s", err.Error()) 78 | } 79 | if response.UID == "" { 80 | t.Fatalf("No tracking UID associated with the response: %+v", response) 81 | } 82 | 83 | if response.Allowed != test.shouldBeAllowed { 84 | t.Fatalf("Mismatch: %s (groups=%s) %s %s. Test's expectation is that the user %s", 85 | test.username, test.userGroups, 86 | testutils.CanCanNot(response.Allowed), string(test.operation), 87 | testutils.CanCanNot(test.shouldBeAllowed)) 88 | } 89 | } 90 | } 91 | 92 | func TestThing(t *testing.T) { 93 | tests := []hiveOwnershipTestSuites{ 94 | { 95 | testID: "kube-admin-test", 96 | username: "kube:admin", 97 | userGroups: []string{"kube:system", "system:authenticated", "system:authenticated:oauth"}, 98 | operation: admissionv1.Create, 99 | shouldBeAllowed: true, 100 | }, 101 | { 102 | testID: "kube-admin-test", 103 | username: "backplane-cluster-admin", 104 | userGroups: []string{"system:authenticated", "system:authenticated:oauth"}, 105 | operation: admissionv1.Create, 106 | shouldBeAllowed: true, 107 | }, 108 | { 109 | testID: "sre-test", 110 | username: "sre-foo@redhat.com", 111 | userGroups: []string{adminGroups[0], "system:authenticated", "system:authenticated:oauth"}, 112 | operation: admissionv1.Update, 113 | shouldBeAllowed: true, 114 | }, 115 | { 116 | // dedicated-admin users. This should be blocked as making changes as CU on clusterresourcequota which are managed are prohibited. 117 | testID: "dedicated-admin-test", 118 | username: "bob@foo.com", 119 | userGroups: []string{"dedicated-admins", "system:authenticated", "system:authenticated:oauth"}, 120 | operation: admissionv1.Update, 121 | labels: map[string]string{"hive.openshift.io/managed": "true"}, 122 | shouldBeAllowed: false, 123 | }, 124 | { 125 | // no special privileges, only an authenticated user. This should be blocked as making changes on clusterresourcequota which are managed are prohibited. 126 | testID: "unpriv-update-test", 127 | username: "unpriv-user", 128 | userGroups: []string{"system:authenticated", "system:authenticated:oauth"}, 129 | operation: admissionv1.Update, 130 | labels: map[string]string{"hive.openshift.io/managed": "true"}, 131 | shouldBeAllowed: false, 132 | }, 133 | } 134 | runTests(t, tests) 135 | } 136 | 137 | func TestBadRequests(t *testing.T) { 138 | t.Skip() 139 | } 140 | 141 | func TestName(t *testing.T) { 142 | if NewWebhook().Name() == "" { 143 | t.Fatalf("Empty hook name") 144 | } 145 | } 146 | 147 | func TestRules(t *testing.T) { 148 | if len(NewWebhook().Rules()) == 0 { 149 | t.Log("No rules for this webhook?") 150 | } 151 | } 152 | 153 | func TestGetURI(t *testing.T) { 154 | if NewWebhook().GetURI()[0] != '/' { 155 | t.Fatalf("Hook URI does not begin with a /") 156 | } 157 | } 158 | 159 | func TestObjectSelector(t *testing.T) { 160 | obj := &metav1.LabelSelector{ 161 | MatchLabels: map[string]string{ 162 | "hive.openshift.io/managed": "true", 163 | }, 164 | } 165 | 166 | if !reflect.DeepEqual(NewWebhook().ObjectSelector(), obj) { 167 | t.Fatalf("hive managed resources label name is not correct.") 168 | } 169 | } 170 | -------------------------------------------------------------------------------- /pkg/webhooks/hostedcluster/hostedcluster.go: -------------------------------------------------------------------------------- 1 | package hostedcluster 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "sync" 7 | 8 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/utils" 9 | admissionv1 "k8s.io/api/admission/v1" 10 | admissionregv1 "k8s.io/api/admissionregistration/v1" 11 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 12 | "k8s.io/apimachinery/pkg/runtime" 13 | logf "sigs.k8s.io/controller-runtime/pkg/log" 14 | admissionctl "sigs.k8s.io/controller-runtime/pkg/webhook/admission" 15 | ) 16 | 17 | const ( 18 | WebhookName string = "hostedcluster-validation" 19 | docString string = "Validates HostedCluster deletion operations are only performed by authorized service accounts" 20 | ) 21 | 22 | var ( 23 | // Only this service account is allowed to delete HostedClusters 24 | allowedServiceAccount = "system:serviceaccount:open-cluster-management-agent:klusterlet-work-sa" 25 | 26 | scope = admissionregv1.NamespacedScope 27 | rules = []admissionregv1.RuleWithOperations{ 28 | { 29 | Operations: []admissionregv1.OperationType{"DELETE"}, 30 | Rule: admissionregv1.Rule{ 31 | APIGroups: []string{"hypershift.openshift.io"}, 32 | APIVersions: []string{"*"}, 33 | Resources: []string{"hostedclusters"}, 34 | Scope: &scope, 35 | }, 36 | }, 37 | } 38 | log = logf.Log.WithName(WebhookName) 39 | ) 40 | 41 | // HostedClusterWebhook validates HostedCluster deletion operations 42 | type HostedClusterWebhook struct { 43 | mu sync.Mutex 44 | s runtime.Scheme 45 | } 46 | 47 | // ObjectSelector implements Webhook interface 48 | func (s *HostedClusterWebhook) ObjectSelector() *metav1.LabelSelector { return nil } 49 | 50 | func (s *HostedClusterWebhook) Doc() string { 51 | return fmt.Sprintf(docString) 52 | } 53 | 54 | // TimeoutSeconds implements Webhook interface 55 | func (s *HostedClusterWebhook) TimeoutSeconds() int32 { return 2 } 56 | 57 | // MatchPolicy implements Webhook interface 58 | func (s *HostedClusterWebhook) MatchPolicy() admissionregv1.MatchPolicyType { 59 | return admissionregv1.Equivalent 60 | } 61 | 62 | // Name implements Webhook interface 63 | func (s *HostedClusterWebhook) Name() string { return WebhookName } 64 | 65 | // FailurePolicy implements Webhook interface 66 | func (s *HostedClusterWebhook) FailurePolicy() admissionregv1.FailurePolicyType { 67 | return admissionregv1.Ignore 68 | } 69 | 70 | // Rules implements Webhook interface 71 | func (s *HostedClusterWebhook) Rules() []admissionregv1.RuleWithOperations { return rules } 72 | 73 | // GetURI implements Webhook interface 74 | func (s *HostedClusterWebhook) GetURI() string { return "/hostedcluster-validation" } 75 | 76 | // SideEffects implements Webhook interface 77 | func (s *HostedClusterWebhook) SideEffects() admissionregv1.SideEffectClass { 78 | return admissionregv1.SideEffectClassNone 79 | } 80 | 81 | // Validate - Make sure we're working with a well-formed Admission Request object 82 | func (s *HostedClusterWebhook) Validate(req admissionctl.Request) bool { 83 | valid := true 84 | valid = valid && (req.UserInfo.Username != "") 85 | valid = valid && (req.Kind.Kind == "HostedCluster") 86 | valid = valid && (req.Kind.Group == "hypershift.openshift.io") 87 | 88 | return valid 89 | } 90 | 91 | // Authorized implements Webhook interface 92 | func (s *HostedClusterWebhook) Authorized(request admissionctl.Request) admissionctl.Response { 93 | return s.authorized(request) 94 | } 95 | 96 | // Is the request authorized 97 | func (s *HostedClusterWebhook) authorized(request admissionctl.Request) admissionctl.Response { 98 | var ret admissionctl.Response 99 | 100 | // Only allow DELETE operations from the specified service account 101 | if request.UserInfo.Username == allowedServiceAccount { 102 | ret = admissionctl.Allowed("Service account is authorized to delete HostedCluster resources") 103 | ret.UID = request.AdmissionRequest.UID 104 | return ret 105 | } 106 | 107 | // If not a delete operation, allow it 108 | if request.Operation != admissionv1.Delete { 109 | ret = admissionctl.Allowed("Only DELETE operations are restricted") 110 | ret.UID = request.AdmissionRequest.UID 111 | return ret 112 | } 113 | 114 | // Deny all other requests 115 | log.Info("Unauthorized attempt to delete HostedCluster", 116 | "user", request.UserInfo.Username, 117 | "groups", request.UserInfo.Groups) 118 | 119 | ret = admissionctl.Denied(fmt.Sprintf("Only %s is authorized to delete HostedCluster resources", allowedServiceAccount)) 120 | ret.UID = request.AdmissionRequest.UID 121 | return ret 122 | } 123 | 124 | // SyncSetLabelSelector returns the label selector to use in the SyncSet. 125 | func (s *HostedClusterWebhook) SyncSetLabelSelector() metav1.LabelSelector { 126 | customLabelSelector := utils.DefaultLabelSelector() 127 | customLabelSelector.MatchExpressions = append(customLabelSelector.MatchExpressions, 128 | metav1.LabelSelectorRequirement{ 129 | Key: "ext-hypershift.openshift.io/cluster-type", 130 | Operator: metav1.LabelSelectorOpIn, 131 | Values: []string{"management-cluster"}, 132 | }) 133 | return customLabelSelector 134 | } 135 | 136 | func (s *HostedClusterWebhook) ClassicEnabled() bool { return true } 137 | 138 | func (s *HostedClusterWebhook) HypershiftEnabled() bool { return false } 139 | 140 | // NewWebhook creates a new webhook 141 | func NewWebhook() *HostedClusterWebhook { 142 | scheme := runtime.NewScheme() 143 | err := admissionv1.AddToScheme(scheme) 144 | if err != nil { 145 | log.Error(err, "Fail adding admissionsv1 scheme to HostedClusterWebhook") 146 | os.Exit(1) 147 | } 148 | return &HostedClusterWebhook{ 149 | s: *scheme, 150 | } 151 | } 152 | -------------------------------------------------------------------------------- /pkg/webhooks/hostedcluster/hostedcluster_test.go: -------------------------------------------------------------------------------- 1 | package hostedcluster 2 | 3 | import ( 4 | "testing" 5 | 6 | admissionv1 "k8s.io/api/admission/v1" 7 | authenticationv1 "k8s.io/api/authentication/v1" 8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | admissionctl "sigs.k8s.io/controller-runtime/pkg/webhook/admission" 10 | ) 11 | 12 | func TestHostedClusterAuthorized(t *testing.T) { 13 | tests := []struct { 14 | name string 15 | username string 16 | operation admissionv1.Operation 17 | shouldBeAllowed bool 18 | }{ 19 | { 20 | name: "Allowed service account can delete hostedcluster", 21 | username: "system:serviceaccount:open-cluster-management-agent:klusterlet-work-sa", 22 | operation: admissionv1.Delete, 23 | shouldBeAllowed: true, 24 | }, 25 | { 26 | name: "Random user cannot delete hostedcluster", 27 | username: "unknown-user", 28 | operation: admissionv1.Delete, 29 | shouldBeAllowed: false, 30 | }, 31 | { 32 | name: "Non-DELETE operation should be allowed", 33 | username: "unknown-user", 34 | operation: admissionv1.Create, 35 | shouldBeAllowed: true, 36 | }, 37 | } 38 | 39 | for _, test := range tests { 40 | t.Run(test.name, func(t *testing.T) { 41 | webhook := NewWebhook() 42 | request := admissionctl.Request{ 43 | AdmissionRequest: admissionv1.AdmissionRequest{ 44 | UserInfo: authenticationv1.UserInfo{ 45 | Username: test.username, 46 | }, 47 | Operation: test.operation, 48 | Kind: metav1.GroupVersionKind{ 49 | Group: "hypershift.openshift.io", 50 | Kind: "HostedCluster", 51 | }, 52 | }, 53 | } 54 | 55 | response := webhook.Authorized(request) 56 | if response.Allowed != test.shouldBeAllowed { 57 | t.Errorf("Unexpected response for %s. Got %v, expected %v", test.name, response.Allowed, test.shouldBeAllowed) 58 | } 59 | }) 60 | } 61 | } 62 | 63 | func TestName(t *testing.T) { 64 | webhook := NewWebhook() 65 | if webhook.Name() != WebhookName { 66 | t.Errorf("Expected webhook name to be %s, got %s", WebhookName, webhook.Name()) 67 | } 68 | } 69 | 70 | func TestGetURI(t *testing.T) { 71 | webhook := NewWebhook() 72 | uri := webhook.GetURI() 73 | if uri[0] != '/' { 74 | t.Errorf("Expected URI to start with '/', got %s", uri) 75 | } 76 | if uri != "/hostedcluster-validation" { 77 | t.Errorf("Expected URI to be /hostedcluster-validation, got %s", uri) 78 | } 79 | } 80 | 81 | func TestRules(t *testing.T) { 82 | webhook := NewWebhook() 83 | rules := webhook.Rules() 84 | if len(rules) == 0 { 85 | t.Fatal("Expected at least one rule") 86 | } 87 | } 88 | 89 | func TestDoc(t *testing.T) { 90 | webhook := NewWebhook() 91 | doc := webhook.Doc() 92 | if doc == "" { 93 | t.Error("Expected non-empty documentation string") 94 | } 95 | } 96 | 97 | func TestTimeoutSeconds(t *testing.T) { 98 | webhook := NewWebhook() 99 | timeout := webhook.TimeoutSeconds() 100 | if timeout != 2 { 101 | t.Errorf("Expected timeout to be 2, got %d", timeout) 102 | } 103 | } 104 | 105 | func TestValidate(t *testing.T) { 106 | tests := []struct { 107 | name string 108 | request admissionctl.Request 109 | expected bool 110 | }{ 111 | { 112 | name: "Valid request", 113 | request: admissionctl.Request{ 114 | AdmissionRequest: admissionv1.AdmissionRequest{ 115 | UserInfo: authenticationv1.UserInfo{ 116 | Username: "test-user", 117 | }, 118 | Kind: metav1.GroupVersionKind{ 119 | Group: "hypershift.openshift.io", 120 | Kind: "HostedCluster", 121 | }, 122 | }, 123 | }, 124 | expected: true, 125 | }, 126 | { 127 | name: "Invalid request without username", 128 | request: admissionctl.Request{ 129 | AdmissionRequest: admissionv1.AdmissionRequest{ 130 | UserInfo: authenticationv1.UserInfo{ 131 | Username: "", 132 | }, 133 | Kind: metav1.GroupVersionKind{ 134 | Group: "hypershift.openshift.io", 135 | Kind: "HostedCluster", 136 | }, 137 | }, 138 | }, 139 | expected: false, 140 | }, 141 | { 142 | name: "Invalid request with wrong kind", 143 | request: admissionctl.Request{ 144 | AdmissionRequest: admissionv1.AdmissionRequest{ 145 | UserInfo: authenticationv1.UserInfo{ 146 | Username: "test-user", 147 | }, 148 | Kind: metav1.GroupVersionKind{ 149 | Group: "hypershift.openshift.io", 150 | Kind: "Pod", 151 | }, 152 | }, 153 | }, 154 | expected: false, 155 | }, 156 | { 157 | name: "Invalid request with wrong group", 158 | request: admissionctl.Request{ 159 | AdmissionRequest: admissionv1.AdmissionRequest{ 160 | UserInfo: authenticationv1.UserInfo{ 161 | Username: "test-user", 162 | }, 163 | Kind: metav1.GroupVersionKind{ 164 | Group: "apps", 165 | Kind: "HostedCluster", 166 | }, 167 | }, 168 | }, 169 | expected: false, 170 | }, 171 | } 172 | 173 | for _, test := range tests { 174 | t.Run(test.name, func(t *testing.T) { 175 | webhook := NewWebhook() 176 | result := webhook.Validate(test.request) 177 | if result != test.expected { 178 | t.Errorf("Expected %v, got %v", test.expected, result) 179 | } 180 | }) 181 | } 182 | } 183 | -------------------------------------------------------------------------------- /pkg/webhooks/hostedcontrolplane/hostedcontrolplane_test.go: -------------------------------------------------------------------------------- 1 | package hostedcontrolplane 2 | 3 | import ( 4 | "testing" 5 | 6 | admissionv1 "k8s.io/api/admission/v1" 7 | authenticationv1 "k8s.io/api/authentication/v1" 8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | admissionctl "sigs.k8s.io/controller-runtime/pkg/webhook/admission" 10 | ) 11 | 12 | func TestHostedControlPlaneAuthorized(t *testing.T) { 13 | tests := []struct { 14 | name string 15 | username string 16 | operation admissionv1.Operation 17 | shouldBeAllowed bool 18 | }{ 19 | { 20 | name: "Allowed service account can delete hostedcontrolplane", 21 | username: "system:serviceaccount:open-cluster-management-agent:klusterlet-work-sa", 22 | operation: admissionv1.Delete, 23 | shouldBeAllowed: true, 24 | }, 25 | { 26 | name: "Random user cannot delete hostedcontrolplane", 27 | username: "unknown-user", 28 | operation: admissionv1.Delete, 29 | shouldBeAllowed: false, 30 | }, 31 | { 32 | name: "Non-DELETE operation should be allowed", 33 | username: "unknown-user", 34 | operation: admissionv1.Update, 35 | shouldBeAllowed: true, 36 | }, 37 | } 38 | 39 | for _, test := range tests { 40 | t.Run(test.name, func(t *testing.T) { 41 | webhook := NewWebhook() 42 | request := admissionctl.Request{ 43 | AdmissionRequest: admissionv1.AdmissionRequest{ 44 | UserInfo: authenticationv1.UserInfo{ 45 | Username: test.username, 46 | }, 47 | Operation: test.operation, 48 | Kind: metav1.GroupVersionKind{ 49 | Group: "hypershift.openshift.io", 50 | Kind: "HostedControlPlane", 51 | }, 52 | }, 53 | } 54 | 55 | response := webhook.Authorized(request) 56 | if response.Allowed != test.shouldBeAllowed { 57 | t.Errorf("Unexpected response for %s. Got %v, expected %v", test.name, response.Allowed, test.shouldBeAllowed) 58 | } 59 | }) 60 | } 61 | } 62 | 63 | func TestName(t *testing.T) { 64 | webhook := NewWebhook() 65 | if webhook.Name() != WebhookName { 66 | t.Errorf("Expected webhook name to be %s, got %s", WebhookName, webhook.Name()) 67 | } 68 | } 69 | 70 | func TestGetURI(t *testing.T) { 71 | webhook := NewWebhook() 72 | uri := webhook.GetURI() 73 | if uri[0] != '/' { 74 | t.Errorf("Expected URI to start with '/', got %s", uri) 75 | } 76 | if uri != "/hostedcontrolplane-validation" { 77 | t.Errorf("Expected URI to be /hostedcontrolplane-validation, got %s", uri) 78 | } 79 | } 80 | 81 | func TestRules(t *testing.T) { 82 | webhook := NewWebhook() 83 | rules := webhook.Rules() 84 | if len(rules) == 0 { 85 | t.Fatal("Expected at least one rule") 86 | } 87 | } 88 | 89 | func TestDoc(t *testing.T) { 90 | webhook := NewWebhook() 91 | doc := webhook.Doc() 92 | if doc == "" { 93 | t.Error("Expected non-empty documentation string") 94 | } 95 | } 96 | 97 | func TestTimeoutSeconds(t *testing.T) { 98 | webhook := NewWebhook() 99 | timeout := webhook.TimeoutSeconds() 100 | if timeout != 2 { 101 | t.Errorf("Expected timeout to be 2, got %d", timeout) 102 | } 103 | } 104 | 105 | func TestValidate(t *testing.T) { 106 | tests := []struct { 107 | name string 108 | request admissionctl.Request 109 | expected bool 110 | }{ 111 | { 112 | name: "Valid request", 113 | request: admissionctl.Request{ 114 | AdmissionRequest: admissionv1.AdmissionRequest{ 115 | UserInfo: authenticationv1.UserInfo{ 116 | Username: "test-user", 117 | }, 118 | Kind: metav1.GroupVersionKind{ 119 | Group: "hypershift.openshift.io", 120 | Kind: "HostedControlPlane", 121 | }, 122 | }, 123 | }, 124 | expected: true, 125 | }, 126 | { 127 | name: "Invalid request without username", 128 | request: admissionctl.Request{ 129 | AdmissionRequest: admissionv1.AdmissionRequest{ 130 | UserInfo: authenticationv1.UserInfo{ 131 | Username: "", 132 | }, 133 | Kind: metav1.GroupVersionKind{ 134 | Group: "hypershift.openshift.io", 135 | Kind: "HostedControlPlane", 136 | }, 137 | }, 138 | }, 139 | expected: false, 140 | }, 141 | { 142 | name: "Invalid request with wrong kind", 143 | request: admissionctl.Request{ 144 | AdmissionRequest: admissionv1.AdmissionRequest{ 145 | UserInfo: authenticationv1.UserInfo{ 146 | Username: "test-user", 147 | }, 148 | Kind: metav1.GroupVersionKind{ 149 | Group: "hypershift.openshift.io", 150 | Kind: "Pod", 151 | }, 152 | }, 153 | }, 154 | expected: false, 155 | }, 156 | { 157 | name: "Invalid request with wrong group", 158 | request: admissionctl.Request{ 159 | AdmissionRequest: admissionv1.AdmissionRequest{ 160 | UserInfo: authenticationv1.UserInfo{ 161 | Username: "test-user", 162 | }, 163 | Kind: metav1.GroupVersionKind{ 164 | Group: "apps", 165 | Kind: "HostedControlPlane", 166 | }, 167 | }, 168 | }, 169 | expected: false, 170 | }, 171 | } 172 | 173 | for _, test := range tests { 174 | t.Run(test.name, func(t *testing.T) { 175 | webhook := NewWebhook() 176 | result := webhook.Validate(test.request) 177 | if result != test.expected { 178 | t.Errorf("Expected %v, got %v", test.expected, result) 179 | } 180 | }) 181 | } 182 | } 183 | -------------------------------------------------------------------------------- /pkg/webhooks/ingressconfig/ingressconfig.go: -------------------------------------------------------------------------------- 1 | package ingressconfig 2 | 3 | import ( 4 | "os" 5 | "regexp" 6 | "sync" 7 | 8 | admissionv1 "k8s.io/api/admission/v1" 9 | admissionregv1 "k8s.io/api/admissionregistration/v1" 10 | corev1 "k8s.io/api/core/v1" 11 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 12 | "k8s.io/apimachinery/pkg/runtime" 13 | logf "sigs.k8s.io/controller-runtime/pkg/log" 14 | admissionctl "sigs.k8s.io/controller-runtime/pkg/webhook/admission" 15 | 16 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/utils" 17 | ) 18 | 19 | const ( 20 | WebhookName string = "ingress-config-validation" 21 | privilegedUsers string = `system:admin` 22 | docString string = `Managed OpenShift customers may not modify ingress config resources because it can can degrade cluster operators and can interfere with OpenShift SRE monitoring.` 23 | ) 24 | 25 | var ( 26 | log = logf.Log.WithName(WebhookName) 27 | privilegedServiceAccountsRe = regexp.MustCompile(utils.PrivilegedServiceAccountGroups) 28 | privilegedUsersRe = regexp.MustCompile(privilegedUsers) 29 | 30 | scope = admissionregv1.ClusterScope 31 | rules = []admissionregv1.RuleWithOperations{ 32 | { 33 | Operations: []admissionregv1.OperationType{"CREATE", "UPDATE", "DELETE"}, 34 | Rule: admissionregv1.Rule{ 35 | APIGroups: []string{"config.openshift.io"}, 36 | APIVersions: []string{"*"}, 37 | Resources: []string{"ingresses"}, 38 | Scope: &scope, 39 | }, 40 | }, 41 | } 42 | ) 43 | 44 | type IngressConfigWebhook struct { 45 | mu sync.Mutex 46 | s runtime.Scheme 47 | } 48 | 49 | // Authorized will determine if the request is allowed 50 | func (w *IngressConfigWebhook) Authorized(request admissionctl.Request) (ret admissionctl.Response) { 51 | ret = admissionctl.Denied("Only privileged service accounts may access") 52 | ret.UID = request.AdmissionRequest.UID 53 | 54 | // allow if modified by an allowlist-ed service account 55 | for _, group := range request.UserInfo.Groups { 56 | if privilegedServiceAccountsRe.Match([]byte(group)) { 57 | ret = admissionctl.Allowed("Privileged service accounts may access") 58 | ret.UID = request.AdmissionRequest.UID 59 | } 60 | } 61 | 62 | // allow if modified by an allowliste-ed user 63 | if privilegedUsersRe.Match([]byte(request.UserInfo.Username)) { 64 | ret = admissionctl.Allowed("Privileged service accounts may access") 65 | ret.UID = request.AdmissionRequest.UID 66 | } 67 | 68 | return 69 | } 70 | 71 | // GetURI returns the URI for the webhook 72 | func (w *IngressConfigWebhook) GetURI() string { return "/ingressconfig-validation" } 73 | 74 | // Validate will validate the incoming request 75 | func (w *IngressConfigWebhook) Validate(req admissionctl.Request) bool { 76 | valid := true 77 | valid = valid && (req.UserInfo.Username != "") 78 | valid = valid && (req.Kind.Kind == "Ingress") 79 | 80 | return valid 81 | } 82 | 83 | // Name is the name of the webhook 84 | func (w *IngressConfigWebhook) Name() string { return WebhookName } 85 | 86 | // FailurePolicy is how the hook config should react if k8s can't access it 87 | func (w *IngressConfigWebhook) FailurePolicy() admissionregv1.FailurePolicyType { 88 | return admissionregv1.Ignore 89 | } 90 | 91 | // MatchPolicy mirrors validatingwebhookconfiguration.webhooks[].matchPolicy 92 | // If it is important to the webhook, be sure to check subResource vs 93 | // requestSubResource. 94 | func (w *IngressConfigWebhook) MatchPolicy() admissionregv1.MatchPolicyType { 95 | return admissionregv1.Equivalent 96 | } 97 | 98 | // Rules is a slice of rules on which this hook should trigger 99 | func (w *IngressConfigWebhook) Rules() []admissionregv1.RuleWithOperations { return rules } 100 | 101 | // ObjectSelector uses a *metav1.LabelSelector to augment the webhook's 102 | // Rules() to match only on incoming requests which match the specific 103 | // LabelSelector. 104 | func (w *IngressConfigWebhook) ObjectSelector() *metav1.LabelSelector { return nil } 105 | 106 | // SideEffects are what side effects, if any, this hook has. Refer to 107 | // https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#side-effects 108 | func (w *IngressConfigWebhook) SideEffects() admissionregv1.SideEffectClass { 109 | return admissionregv1.SideEffectClassNone 110 | } 111 | 112 | // TimeoutSeconds returns an int32 representing how long to wait for this hook to complete 113 | func (w *IngressConfigWebhook) TimeoutSeconds() int32 { return 2 } 114 | 115 | // Doc returns a string for end-customer documentation purposes. 116 | func (w *IngressConfigWebhook) Doc() string { return docString } 117 | 118 | // SyncSetLabelSelector returns the label selector to use in the SyncSet. 119 | // Return utils.DefaultLabelSelector() to stick with the default 120 | func (w *IngressConfigWebhook) SyncSetLabelSelector() metav1.LabelSelector { 121 | return utils.DefaultLabelSelector() 122 | } 123 | 124 | func (w *IngressConfigWebhook) ClassicEnabled() bool { return true } 125 | 126 | // HypershiftEnabled will return boolean value for hypershift enabled configurations 127 | func (w *IngressConfigWebhook) HypershiftEnabled() bool { return true } 128 | 129 | // NewWebhook creates a new webhook 130 | func NewWebhook() *IngressConfigWebhook { 131 | scheme := runtime.NewScheme() 132 | err := admissionv1.AddToScheme(scheme) 133 | if err != nil { 134 | log.Error(err, "Fail adding admissionsv1 scheme to IngressConfigWebhook") 135 | os.Exit(1) 136 | } 137 | 138 | err = corev1.AddToScheme(scheme) 139 | if err != nil { 140 | log.Error(err, "Fail adding corev1 scheme to IngressConfigWebhook") 141 | os.Exit(1) 142 | } 143 | 144 | return &IngressConfigWebhook{ 145 | s: *scheme, 146 | } 147 | } 148 | -------------------------------------------------------------------------------- /pkg/webhooks/manifestworks/manifestworks.go: -------------------------------------------------------------------------------- 1 | package manifestworks 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "slices" 7 | "sync" 8 | 9 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/utils" 10 | admissionv1 "k8s.io/api/admission/v1" 11 | admissionregv1 "k8s.io/api/admissionregistration/v1" 12 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 13 | "k8s.io/apimachinery/pkg/runtime" 14 | logf "sigs.k8s.io/controller-runtime/pkg/log" 15 | admissionctl "sigs.k8s.io/controller-runtime/pkg/webhook/admission" 16 | ) 17 | 18 | const ( 19 | WebhookName string = "manifestworks-validation" 20 | docString string = "Validates ManifestWorks deletion operations are only performed by authorized service accounts" 21 | ) 22 | 23 | var ( 24 | // List of service accounts allowed to delete ManifestWorks 25 | allowedServiceAccounts = []string{ 26 | "system:serviceaccount:ocm:ocm", 27 | "system:serviceaccount:kube-system:generic-garbage-collector", 28 | "system:serviceaccount:multicluster-engine:ocm-foundation-sa", 29 | "system:serviceaccount:multicluster-hub:grc-policy-addon-sa", 30 | "system:serviceaccount:multicluster-engine:managedcluster-import-controller-v2", 31 | "system:serviceaccount:kube-system:namespace-controller", 32 | } 33 | 34 | scope = admissionregv1.NamespacedScope 35 | rules = []admissionregv1.RuleWithOperations{ 36 | { 37 | Operations: []admissionregv1.OperationType{"DELETE"}, 38 | Rule: admissionregv1.Rule{ 39 | APIGroups: []string{"work.open-cluster-management.io"}, 40 | APIVersions: []string{"*"}, 41 | Resources: []string{"manifestworks"}, 42 | Scope: &scope, 43 | }, 44 | }, 45 | } 46 | log = logf.Log.WithName(WebhookName) 47 | ) 48 | 49 | // ManifestWorksWebhook validates ManifestWorks deletion operations 50 | type ManifestWorksWebhook struct { 51 | mu sync.Mutex 52 | s runtime.Scheme 53 | } 54 | 55 | // ObjectSelector implements Webhook interface 56 | func (s *ManifestWorksWebhook) ObjectSelector() *metav1.LabelSelector { return nil } 57 | 58 | func (s *ManifestWorksWebhook) Doc() string { 59 | return fmt.Sprintf(docString) 60 | } 61 | 62 | // TimeoutSeconds implements Webhook interface 63 | func (s *ManifestWorksWebhook) TimeoutSeconds() int32 { return 2 } 64 | 65 | // MatchPolicy implements Webhook interface 66 | func (s *ManifestWorksWebhook) MatchPolicy() admissionregv1.MatchPolicyType { 67 | return admissionregv1.Equivalent 68 | } 69 | 70 | // Name implements Webhook interface 71 | func (s *ManifestWorksWebhook) Name() string { return WebhookName } 72 | 73 | // FailurePolicy implements Webhook interface 74 | func (s *ManifestWorksWebhook) FailurePolicy() admissionregv1.FailurePolicyType { 75 | return admissionregv1.Ignore 76 | } 77 | 78 | // Rules implements Webhook interface 79 | func (s *ManifestWorksWebhook) Rules() []admissionregv1.RuleWithOperations { return rules } 80 | 81 | // GetURI implements Webhook interface 82 | func (s *ManifestWorksWebhook) GetURI() string { return "/manifestworks-validation" } 83 | 84 | // SideEffects implements Webhook interface 85 | func (s *ManifestWorksWebhook) SideEffects() admissionregv1.SideEffectClass { 86 | return admissionregv1.SideEffectClassNone 87 | } 88 | 89 | // Validate - Make sure we're working with a well-formed Admission Request object 90 | func (s *ManifestWorksWebhook) Validate(req admissionctl.Request) bool { 91 | valid := true 92 | valid = valid && (req.UserInfo.Username != "") 93 | valid = valid && (req.Kind.Kind == "ManifestWork") 94 | valid = valid && (req.Kind.Group == "work.open-cluster-management.io") 95 | 96 | return valid 97 | } 98 | 99 | // Authorized implements Webhook interface 100 | func (s *ManifestWorksWebhook) Authorized(request admissionctl.Request) admissionctl.Response { 101 | return s.authorized(request) 102 | } 103 | 104 | // Is the request authorized? 105 | func (s *ManifestWorksWebhook) authorized(request admissionctl.Request) admissionctl.Response { 106 | var ret admissionctl.Response 107 | 108 | // Check if the requesting user is in the list of allowed service accounts 109 | if slices.Contains(allowedServiceAccounts, request.UserInfo.Username) { 110 | ret = admissionctl.Allowed("Service account is authorized to delete ManifestWork resources") 111 | ret.UID = request.AdmissionRequest.UID 112 | return ret 113 | } 114 | 115 | // If not a delete operation, allow it 116 | if request.Operation != admissionv1.Delete { 117 | ret = admissionctl.Allowed("Only DELETE operations are restricted") 118 | ret.UID = request.AdmissionRequest.UID 119 | return ret 120 | } 121 | 122 | // Deny all other requests 123 | log.Info("Unauthorized attempt to delete ManifestWork", 124 | "user", request.UserInfo.Username, 125 | "groups", request.UserInfo.Groups) 126 | 127 | ret = admissionctl.Denied(fmt.Sprintf("Only authorized service accounts can delete ManifestWork resources. Allowed service accounts: %v", allowedServiceAccounts)) 128 | ret.UID = request.AdmissionRequest.UID 129 | return ret 130 | } 131 | 132 | // SyncSetLabelSelector returns the label selector to use in the SyncSet. 133 | func (s *ManifestWorksWebhook) SyncSetLabelSelector() metav1.LabelSelector { 134 | customLabelSelector := utils.DefaultLabelSelector() 135 | customLabelSelector.MatchExpressions = append(customLabelSelector.MatchExpressions, 136 | metav1.LabelSelectorRequirement{ 137 | Key: "ext-hypershift.openshift.io/cluster-type", 138 | Operator: metav1.LabelSelectorOpIn, 139 | Values: []string{"service-cluster"}, 140 | }) 141 | return customLabelSelector 142 | } 143 | 144 | func (s *ManifestWorksWebhook) ClassicEnabled() bool { return true } 145 | 146 | func (s *ManifestWorksWebhook) HypershiftEnabled() bool { return false } 147 | 148 | // NewWebhook creates a new webhook 149 | func NewWebhook() *ManifestWorksWebhook { 150 | scheme := runtime.NewScheme() 151 | err := admissionv1.AddToScheme(scheme) 152 | if err != nil { 153 | log.Error(err, "Fail adding admissionsv1 scheme to ManifestWorksWebhook") 154 | os.Exit(1) 155 | } 156 | return &ManifestWorksWebhook{ 157 | s: *scheme, 158 | } 159 | } 160 | -------------------------------------------------------------------------------- /pkg/webhooks/manifestworks/manifestworks_test.go: -------------------------------------------------------------------------------- 1 | package manifestworks 2 | 3 | import ( 4 | "testing" 5 | 6 | admissionv1 "k8s.io/api/admission/v1" 7 | authenticationv1 "k8s.io/api/authentication/v1" 8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | admissionctl "sigs.k8s.io/controller-runtime/pkg/webhook/admission" 10 | ) 11 | 12 | func TestManifestWorksAuthorized(t *testing.T) { 13 | tests := []struct { 14 | name string 15 | username string 16 | operation admissionv1.Operation 17 | shouldBeAllowed bool 18 | }{ 19 | { 20 | name: "OCM SA can delete manifestworks", 21 | username: "system:serviceaccount:ocm:ocm", 22 | operation: admissionv1.Delete, 23 | shouldBeAllowed: true, 24 | }, 25 | { 26 | name: "ocm-foundation-s SA can delete manifestworks", 27 | username: "system:serviceaccount:multicluster-engine:ocm-foundation-sa", 28 | operation: admissionv1.Delete, 29 | shouldBeAllowed: true, 30 | }, 31 | { 32 | name: "Random user cannot delete manifestworks", 33 | username: "unknown-user", 34 | operation: admissionv1.Delete, 35 | shouldBeAllowed: false, 36 | }, 37 | { 38 | name: "Non-DELETE operation should be allowed", 39 | username: "unknown-user", 40 | operation: admissionv1.Create, 41 | shouldBeAllowed: true, 42 | }, 43 | } 44 | 45 | for _, test := range tests { 46 | t.Run(test.name, func(t *testing.T) { 47 | webhook := NewWebhook() 48 | request := admissionctl.Request{ 49 | AdmissionRequest: admissionv1.AdmissionRequest{ 50 | UserInfo: authenticationv1.UserInfo{ 51 | Username: test.username, 52 | }, 53 | Operation: test.operation, 54 | Kind: metav1.GroupVersionKind{ 55 | Group: "work.open-cluster-management.io", 56 | Kind: "ManifestWork", 57 | }, 58 | }, 59 | } 60 | 61 | response := webhook.Authorized(request) 62 | if response.Allowed != test.shouldBeAllowed { 63 | t.Errorf("Unexpected response for %s. Got %v, expected %v", test.name, response.Allowed, test.shouldBeAllowed) 64 | } 65 | }) 66 | } 67 | } 68 | 69 | func TestName(t *testing.T) { 70 | webhook := NewWebhook() 71 | if webhook.Name() != WebhookName { 72 | t.Errorf("Expected webhook name to be %s, got %s", WebhookName, webhook.Name()) 73 | } 74 | } 75 | 76 | func TestGetURI(t *testing.T) { 77 | webhook := NewWebhook() 78 | uri := webhook.GetURI() 79 | if uri[0] != '/' { 80 | t.Errorf("Expected URI to start with '/', got %s", uri) 81 | } 82 | if uri != "/manifestworks-validation" { 83 | t.Errorf("Expected URI to be /manifestworks-validation, got %s", uri) 84 | } 85 | } 86 | 87 | func TestRules(t *testing.T) { 88 | webhook := NewWebhook() 89 | rules := webhook.Rules() 90 | if len(rules) == 0 { 91 | t.Fatal("Expected at least one rule") 92 | } 93 | } 94 | 95 | func TestDoc(t *testing.T) { 96 | webhook := NewWebhook() 97 | doc := webhook.Doc() 98 | if doc == "" { 99 | t.Error("Expected non-empty documentation string") 100 | } 101 | } 102 | 103 | func TestTimeoutSeconds(t *testing.T) { 104 | webhook := NewWebhook() 105 | timeout := webhook.TimeoutSeconds() 106 | if timeout != 2 { 107 | t.Errorf("Expected timeout to be 2, got %d", timeout) 108 | } 109 | } 110 | 111 | func TestValidate(t *testing.T) { 112 | tests := []struct { 113 | name string 114 | request admissionctl.Request 115 | expected bool 116 | }{ 117 | { 118 | name: "Valid request", 119 | request: admissionctl.Request{ 120 | AdmissionRequest: admissionv1.AdmissionRequest{ 121 | UserInfo: authenticationv1.UserInfo{ 122 | Username: "test-user", 123 | }, 124 | Kind: metav1.GroupVersionKind{ 125 | Group: "work.open-cluster-management.io", 126 | Kind: "ManifestWork", 127 | }, 128 | }, 129 | }, 130 | expected: true, 131 | }, 132 | { 133 | name: "Invalid request without username", 134 | request: admissionctl.Request{ 135 | AdmissionRequest: admissionv1.AdmissionRequest{ 136 | UserInfo: authenticationv1.UserInfo{ 137 | Username: "", 138 | }, 139 | Kind: metav1.GroupVersionKind{ 140 | Group: "work.open-cluster-management.io", 141 | Kind: "ManifestWork", 142 | }, 143 | }, 144 | }, 145 | expected: false, 146 | }, 147 | { 148 | name: "Invalid request with wrong kind", 149 | request: admissionctl.Request{ 150 | AdmissionRequest: admissionv1.AdmissionRequest{ 151 | UserInfo: authenticationv1.UserInfo{ 152 | Username: "test-user", 153 | }, 154 | Kind: metav1.GroupVersionKind{ 155 | Group: "work.open-cluster-management.io", 156 | Kind: "Pod", 157 | }, 158 | }, 159 | }, 160 | expected: false, 161 | }, 162 | { 163 | name: "Invalid request with wrong group", 164 | request: admissionctl.Request{ 165 | AdmissionRequest: admissionv1.AdmissionRequest{ 166 | UserInfo: authenticationv1.UserInfo{ 167 | Username: "test-user", 168 | }, 169 | Kind: metav1.GroupVersionKind{ 170 | Group: "apps", 171 | Kind: "ManifestWork", 172 | }, 173 | }, 174 | }, 175 | expected: false, 176 | }, 177 | } 178 | 179 | for _, test := range tests { 180 | t.Run(test.name, func(t *testing.T) { 181 | webhook := NewWebhook() 182 | result := webhook.Validate(test.request) 183 | if result != test.expected { 184 | t.Errorf("Expected %v, got %v", test.expected, result) 185 | } 186 | }) 187 | } 188 | } 189 | -------------------------------------------------------------------------------- /pkg/webhooks/register.go: -------------------------------------------------------------------------------- 1 | package webhooks 2 | 3 | import ( 4 | admissionregv1 "k8s.io/api/admissionregistration/v1" 5 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 6 | admissionctl "sigs.k8s.io/controller-runtime/pkg/webhook/admission" 7 | ) 8 | 9 | type RegisteredWebhooks map[string]WebhookFactory 10 | 11 | // Webhooks are all registered webhooks mapping name to hook 12 | var Webhooks = RegisteredWebhooks{} 13 | 14 | // Webhook interface 15 | type Webhook interface { 16 | // Authorized will determine if the request is allowed 17 | Authorized(request admissionctl.Request) admissionctl.Response 18 | // GetURI returns the URI for the webhook 19 | GetURI() string 20 | // Validate will validate the incoming request 21 | Validate(admissionctl.Request) bool 22 | // Name is the name of the webhook 23 | Name() string 24 | // FailurePolicy is how the hook config should react if k8s can't access it 25 | // https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#failure-policy 26 | FailurePolicy() admissionregv1.FailurePolicyType 27 | // MatchPolicy mirrors validatingwebhookconfiguration.webhooks[].matchPolicy. 28 | // If it is important to the webhook, be sure to check subResource vs 29 | // requestSubResource. 30 | // https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-matchpolicy 31 | MatchPolicy() admissionregv1.MatchPolicyType 32 | // Rules is a slice of rules on which this hook should trigger 33 | Rules() []admissionregv1.RuleWithOperations 34 | // ObjectSelector uses a *metav1.LabelSelector to augment the webhook's 35 | // Rules() to match only on incoming requests which match the specific 36 | // LabelSelector. 37 | ObjectSelector() *metav1.LabelSelector 38 | // SideEffects are what side effects, if any, this hook has. Refer to 39 | // https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#side-effects 40 | SideEffects() admissionregv1.SideEffectClass 41 | // TimeoutSeconds returns an int32 representing how long to wait for this hook to complete 42 | // The timeout value must be between 1 and 30 seconds. 43 | // https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#timeouts 44 | TimeoutSeconds() int32 45 | // Doc returns a string for end-customer documentation purposes. 46 | Doc() string 47 | // SyncSetLabelSelector returns the label selector to use in the SyncSet. 48 | // Return utils.DefaultLabelSelector() to stick with the default 49 | SyncSetLabelSelector() metav1.LabelSelector 50 | // ClassicEnabled will return true if the webhook should be deployed to OSD/ROSA Classic clusters 51 | ClassicEnabled() bool 52 | // HypershiftEnabled will return true if the webhook should be deployed to ROSA HCP clusters 53 | HypershiftEnabled() bool 54 | } 55 | 56 | // WebhookFactory return a kind of Webhook 57 | type WebhookFactory func() Webhook 58 | 59 | // Register webhooks 60 | func Register(name string, input WebhookFactory) { 61 | Webhooks[name] = input 62 | } 63 | -------------------------------------------------------------------------------- /pkg/webhooks/scc/scc_test.go: -------------------------------------------------------------------------------- 1 | package scc 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | 7 | admissionv1 "k8s.io/api/admission/v1" 8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | 10 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/testutils" 11 | 12 | "k8s.io/apimachinery/pkg/runtime" 13 | ) 14 | 15 | type sccTestSuites struct { 16 | testID string 17 | targetSCC string 18 | username string 19 | operation admissionv1.Operation 20 | userGroups []string 21 | shouldBeAllowed bool 22 | } 23 | 24 | const testObjectRaw string = ` 25 | { 26 | "apiVersion": "security.openshift.io/v1", 27 | "kind": "SecurityContextConstraints", 28 | "metadata": { 29 | "name": "%s", 30 | "uid": "1234" 31 | } 32 | }` 33 | 34 | func createRawJSONString(name string) string { 35 | s := fmt.Sprintf(testObjectRaw, name) 36 | return s 37 | } 38 | 39 | func runSCCTests(t *testing.T, tests []sccTestSuites) { 40 | gvk := metav1.GroupVersionKind{ 41 | Group: "security.openshift.io", 42 | Version: "v1", 43 | Kind: "SecurityContextConstraints", 44 | } 45 | gvr := metav1.GroupVersionResource{ 46 | Group: "security.openshift.io", 47 | Version: "v1", 48 | Resource: "securitycontextcontraints", 49 | } 50 | 51 | for _, test := range tests { 52 | rawObjString := createRawJSONString(test.targetSCC) 53 | 54 | obj := runtime.RawExtension{ 55 | Raw: []byte(rawObjString), 56 | } 57 | 58 | oldObj := runtime.RawExtension{ 59 | Raw: []byte(rawObjString), 60 | } 61 | 62 | hook := NewWebhook() 63 | httprequest, err := testutils.CreateHTTPRequest(hook.GetURI(), 64 | test.testID, gvk, gvr, test.operation, test.username, test.userGroups, "", &obj, &oldObj) 65 | if err != nil { 66 | t.Fatalf("Expected no error, got %s", err.Error()) 67 | } 68 | 69 | response, err := testutils.SendHTTPRequest(httprequest, hook) 70 | if err != nil { 71 | t.Fatalf("Expected no error, got %s", err.Error()) 72 | } 73 | if response.UID == "" { 74 | t.Fatalf("No tracking UID associated with the response.") 75 | } 76 | 77 | if response.Allowed != test.shouldBeAllowed { 78 | t.Fatalf("Mismatch: %s (groups=%s) %s %s the scc. Test's expectation is that the user %s", test.username, test.userGroups, testutils.CanCanNot(response.Allowed), test.operation, testutils.CanCanNot(test.shouldBeAllowed)) 79 | } 80 | } 81 | } 82 | func TestUser(t *testing.T) { 83 | tests := []sccTestSuites{ 84 | { 85 | targetSCC: "hostnetwork", 86 | testID: "user-cant-delete-hostnetwork", 87 | username: "user1", 88 | operation: admissionv1.Delete, 89 | userGroups: []string{"system:authenticated", "system:authenticated:oauth"}, 90 | shouldBeAllowed: false, 91 | }, 92 | { 93 | targetSCC: "hostaccess", 94 | testID: "user-cant-delete-hostaccess", 95 | username: "user2", 96 | operation: admissionv1.Delete, 97 | userGroups: []string{"system:authenticated", "system:authenticated:oauth"}, 98 | shouldBeAllowed: false, 99 | }, 100 | { 101 | targetSCC: "anyuid", 102 | testID: "user-cant-delete-anyuid", 103 | username: "user3", 104 | operation: admissionv1.Delete, 105 | userGroups: []string{"system:authenticated", "system:authenticated:oauth"}, 106 | shouldBeAllowed: false, 107 | }, 108 | { 109 | targetSCC: "anyuid", 110 | testID: "user-cant-modify-hostnetwork", 111 | username: "user4", 112 | operation: admissionv1.Update, 113 | userGroups: []string{"system:authenticated", "system:authenticated:oauth"}, 114 | shouldBeAllowed: false, 115 | }, 116 | { 117 | targetSCC: "hostnetwork-v2", 118 | testID: "user-cant-delete-hostnetwork-v2", 119 | username: "user1", 120 | operation: admissionv1.Delete, 121 | userGroups: []string{"system:authenticated", "system:authenticated:oauth"}, 122 | shouldBeAllowed: false, 123 | }, 124 | { 125 | targetSCC: "testscc", 126 | testID: "user-can-modify-normal", 127 | username: "user1", 128 | operation: admissionv1.Update, 129 | userGroups: []string{"system:authenticated", "system:authenticated:oauth"}, 130 | shouldBeAllowed: true, 131 | }, 132 | { 133 | targetSCC: "hostaccess", 134 | testID: "allowed-user-can-modify-default", 135 | username: "system:serviceaccount:openshift-monitoring:cluster-monitoring-operator", 136 | operation: admissionv1.Update, 137 | userGroups: []string{"system:authenticated", "system:authenticated:oauth"}, 138 | shouldBeAllowed: true, 139 | }, 140 | { 141 | targetSCC: "hostaccess", 142 | testID: "allowed-system-admin-can-modify-default", 143 | username: "system:admin", 144 | operation: admissionv1.Update, 145 | userGroups: []string{"system:authenticated", "system:authenticated:oauth"}, 146 | shouldBeAllowed: true, 147 | }, 148 | { 149 | targetSCC: "testscc", 150 | testID: "user-can-delete-normal", 151 | username: "user1", 152 | operation: admissionv1.Delete, 153 | userGroups: []string{"system:authenticated", "system:authenticated:oauth"}, 154 | shouldBeAllowed: true, 155 | }, 156 | { 157 | targetSCC: "hostaccess", 158 | testID: "allowed-user-can-delete-default", 159 | username: "system:serviceaccount:openshift-monitoring:cluster-monitoring-operator", 160 | operation: admissionv1.Delete, 161 | userGroups: []string{"system:authenticated", "system:authenticated:oauth"}, 162 | shouldBeAllowed: true, 163 | }, 164 | { 165 | targetSCC: "privileged", 166 | testID: "osde2e-serviceaccounts-are-not-allowed", 167 | username: "system:serviceaccount:osde2e-abcde:osde2e-runner", 168 | operation: admissionv1.Update, 169 | userGroups: []string{"system:authenticated", "system:serviceaccounts:osde2e-abcde"}, 170 | shouldBeAllowed: false, 171 | }, 172 | { 173 | targetSCC: "anyuid", 174 | testID: "kube-apiserver-operator-allowed", 175 | username: "system:serviceaccount:openshift-kube-apiserver-operator:kube-apiserver-operator", 176 | operation: admissionv1.Update, 177 | userGroups: []string{}, 178 | shouldBeAllowed: true, 179 | }, 180 | } 181 | runSCCTests(t, tests) 182 | } 183 | -------------------------------------------------------------------------------- /pkg/webhooks/serviceaccount/serviceaccount_test.go: -------------------------------------------------------------------------------- 1 | package serviceaccount 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | 7 | admissionv1 "k8s.io/api/admission/v1" 8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | "k8s.io/apimachinery/pkg/runtime" 10 | 11 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/testutils" 12 | ) 13 | 14 | type serviceAccountTestSuites struct { 15 | testID string 16 | targetSA string 17 | username string 18 | operation admissionv1.Operation 19 | userGroups []string 20 | namespace string 21 | shouldBeAllowed bool 22 | } 23 | 24 | const testObjectRaw string = ` 25 | { 26 | "apiVersion": "v1", 27 | "kind": "ServiceAccount", 28 | "metadata": { 29 | "name": "%s", 30 | "namespace": "%s", 31 | "uid": "1234" 32 | } 33 | }` 34 | 35 | func createRawJSONString(name, namespace string) string { 36 | s := fmt.Sprintf(testObjectRaw, name, namespace) 37 | return s 38 | } 39 | 40 | func runServiceAccountTests(t *testing.T, tests []serviceAccountTestSuites) { 41 | gvk := metav1.GroupVersionKind{ 42 | Group: "", 43 | Version: "v1", 44 | Kind: "ServiceAccount", 45 | } 46 | gvr := metav1.GroupVersionResource{ 47 | Group: "", 48 | Version: "v1", 49 | Resource: "serviceaccounts", 50 | } 51 | 52 | for _, test := range tests { 53 | rawObjString := createRawJSONString(test.targetSA, test.namespace) 54 | 55 | obj := runtime.RawExtension{ 56 | Raw: []byte(rawObjString), 57 | } 58 | 59 | oldObj := runtime.RawExtension{ 60 | Raw: []byte(rawObjString), 61 | } 62 | 63 | hook := NewWebhook() 64 | httpRequest, err := testutils.CreateHTTPRequest(hook.GetURI(), 65 | test.testID, gvk, gvr, test.operation, test.username, test.userGroups, test.namespace, &obj, &oldObj) 66 | if err != nil { 67 | t.Fatalf("Expected no error, got %s", err.Error()) 68 | } 69 | 70 | response, err := testutils.SendHTTPRequest(httpRequest, hook) 71 | if err != nil { 72 | t.Fatalf("Expected no error, got %s", err.Error()) 73 | } 74 | if response.UID == "" { 75 | t.Fatalf("No tracking UID associated with the response.") 76 | } 77 | 78 | if response.Allowed != test.shouldBeAllowed { 79 | t.Fatalf("Mismatch: %s (groups=%s) %s %s the serviceaccount. Test's expectation is that the user %s", test.username, test.userGroups, testutils.CanCanNot(response.Allowed), test.operation, testutils.CanCanNot(test.shouldBeAllowed)) 80 | } 81 | } 82 | } 83 | func TestSADeletion(t *testing.T) { 84 | tests := []serviceAccountTestSuites{ 85 | { 86 | targetSA: "whatever", 87 | testID: "user-cant-delete-protected-sa-in-protected-ns", 88 | username: "user1", 89 | userGroups: []string{"system:authenticated", "system:authenticated:oauth"}, 90 | namespace: "openshift-ingress-operator", 91 | operation: admissionv1.Delete, 92 | shouldBeAllowed: false, 93 | }, 94 | { 95 | targetSA: "default", 96 | testID: "user-can-delete-normal-sa-in-protected-ns", 97 | username: "user1", 98 | operation: admissionv1.Delete, 99 | userGroups: []string{"system:authenticated", "system:authenticated:oauth"}, 100 | namespace: "openshift-ingress-operator", 101 | shouldBeAllowed: true, 102 | }, 103 | { 104 | targetSA: "whatever", 105 | testID: "user-can-delete-sa-in-normal-ns", 106 | username: "user1", 107 | operation: admissionv1.Delete, 108 | userGroups: []string{"system:authenticated", "system:authenticated:oauth"}, 109 | namespace: "whatever", 110 | shouldBeAllowed: true, 111 | }, 112 | { 113 | targetSA: "whatever", 114 | testID: "user-can-delete-sa-in-exception-ns", 115 | username: "user1", 116 | operation: admissionv1.Delete, 117 | userGroups: []string{"system:authenticated", "system:authenticated:oauth"}, 118 | namespace: "openshift-operators", 119 | shouldBeAllowed: true, 120 | }, 121 | { 122 | targetSA: "whatever", 123 | testID: "sre-can-delete-sa-in-protected-ns", 124 | username: "user1", 125 | operation: admissionv1.Delete, 126 | userGroups: []string{"system:serviceaccounts:openshift-backplane-srep"}, 127 | namespace: "openshift-ingress-operator", 128 | shouldBeAllowed: true, 129 | }, 130 | { 131 | targetSA: "whatever", 132 | testID: "elevated-sre-can-delete-sa-in-protected-ns", 133 | username: "backplane-cluster-admin", 134 | operation: admissionv1.Delete, 135 | userGroups: []string{"system:authenticated", "system:authenticated:oauth"}, 136 | namespace: "openshift-ingress-operator", 137 | shouldBeAllowed: true, 138 | }, 139 | { 140 | targetSA: "whatever", 141 | testID: "kube-account-can-delete-sa-in-protected-ns", 142 | username: "kube:admin", 143 | operation: admissionv1.Delete, 144 | userGroups: []string{"system:authenticated"}, 145 | namespace: "openshift-ingress-operator", 146 | shouldBeAllowed: true, 147 | }, 148 | } 149 | runServiceAccountTests(t, tests) 150 | } 151 | -------------------------------------------------------------------------------- /pkg/webhooks/techpreviewnoupgrade/techpreviewnoupgrade.go: -------------------------------------------------------------------------------- 1 | package techpreviewnoupgrade 2 | 3 | import ( 4 | "fmt" 5 | "net/http" 6 | "os" 7 | 8 | configv1 "github.com/openshift/api/config/v1" 9 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/utils" 10 | admissionv1 "k8s.io/api/admission/v1" 11 | admissionregv1 "k8s.io/api/admissionregistration/v1" 12 | corev1 "k8s.io/api/core/v1" 13 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 14 | "k8s.io/apimachinery/pkg/runtime" 15 | logf "sigs.k8s.io/controller-runtime/pkg/log" 16 | admissionctl "sigs.k8s.io/controller-runtime/pkg/webhook/admission" 17 | ) 18 | 19 | const ( 20 | WebhookName string = "techpreviewnoupgrade-validation" 21 | docString string = `Managed OpenShift Customers may not use TechPreviewNoUpgrade FeatureGate that could prevent any future ability to do a y-stream upgrade to their clusters.` 22 | ) 23 | 24 | var ( 25 | log = logf.Log.WithName(WebhookName) 26 | 27 | scope = admissionregv1.ClusterScope 28 | rules = []admissionregv1.RuleWithOperations{ 29 | { 30 | Operations: []admissionregv1.OperationType{admissionregv1.Create, admissionregv1.Update}, 31 | Rule: admissionregv1.Rule{ 32 | APIGroups: []string{"config.openshift.io"}, 33 | APIVersions: []string{"*"}, 34 | Resources: []string{"featuregates"}, 35 | Scope: &scope, 36 | }, 37 | }, 38 | } 39 | ) 40 | 41 | type TechPreviewNoUpgradeWebhook struct { 42 | s runtime.Scheme 43 | } 44 | 45 | func (s *TechPreviewNoUpgradeWebhook) ObjectSelector() *metav1.LabelSelector { return nil } 46 | 47 | func (s *TechPreviewNoUpgradeWebhook) Doc() string { 48 | return fmt.Sprintf(docString) 49 | } 50 | 51 | func (s *TechPreviewNoUpgradeWebhook) TimeoutSeconds() int32 { return 1 } 52 | 53 | func (s *TechPreviewNoUpgradeWebhook) MatchPolicy() admissionregv1.MatchPolicyType { 54 | return admissionregv1.Equivalent 55 | } 56 | 57 | func (s *TechPreviewNoUpgradeWebhook) Name() string { return WebhookName } 58 | 59 | func (s *TechPreviewNoUpgradeWebhook) FailurePolicy() admissionregv1.FailurePolicyType { 60 | return admissionregv1.Ignore 61 | } 62 | 63 | func (s *TechPreviewNoUpgradeWebhook) Rules() []admissionregv1.RuleWithOperations { return rules } 64 | 65 | func (s *TechPreviewNoUpgradeWebhook) GetURI() string { return "/" + WebhookName } 66 | 67 | func (s *TechPreviewNoUpgradeWebhook) SideEffects() admissionregv1.SideEffectClass { 68 | return admissionregv1.SideEffectClassNone 69 | } 70 | 71 | func (s *TechPreviewNoUpgradeWebhook) Validate(req admissionctl.Request) bool { 72 | valid := true 73 | valid = valid && (req.UserInfo.Username != "") 74 | valid = valid && (req.Kind.Kind == "FeatureGate") 75 | 76 | return valid 77 | } 78 | 79 | func (s *TechPreviewNoUpgradeWebhook) Authorized(request admissionctl.Request) admissionctl.Response { 80 | return s.authorized(request) 81 | } 82 | 83 | func (s *TechPreviewNoUpgradeWebhook) SyncSetLabelSelector() metav1.LabelSelector { 84 | return utils.DefaultLabelSelector() 85 | } 86 | 87 | func (s *TechPreviewNoUpgradeWebhook) ClassicEnabled() bool { return true } 88 | 89 | func (s *TechPreviewNoUpgradeWebhook) HypershiftEnabled() bool { return true } 90 | 91 | func (s *TechPreviewNoUpgradeWebhook) renderFeatureGate(request admissionctl.Request) (*configv1.FeatureGate, error) { 92 | decoder := admissionctl.NewDecoder(&s.s) 93 | featureGate := &configv1.FeatureGate{} 94 | 95 | // Check the incoming featureGate for TechPreviewNoUpgrade 96 | err := decoder.DecodeRaw(request.Object, featureGate) 97 | if err != nil { 98 | return nil, err 99 | } 100 | 101 | return featureGate, nil 102 | } 103 | 104 | func (s *TechPreviewNoUpgradeWebhook) authorized(request admissionctl.Request) admissionctl.Response { 105 | var ret admissionctl.Response 106 | 107 | featureGate, err := s.renderFeatureGate(request) 108 | 109 | if err != nil { 110 | log.Error(err, "Couldn't render a FeatureGate from the incoming request") 111 | 112 | ret = admissionctl.Errored(http.StatusBadRequest, err) 113 | ret.UID = request.AdmissionRequest.UID 114 | 115 | return ret 116 | } 117 | 118 | if featureGate != nil && featureGate.Spec.FeatureSet == "TechPreviewNoUpgrade" { 119 | log.Info("Not allowing access because of TechPreviewNoUpgrade Feature Gate", "request", request.AdmissionRequest) 120 | 121 | ret = admissionctl.Denied("The TechPreviewNoUpgrade Feature Gate is not allowed") 122 | ret.UID = request.AdmissionRequest.UID 123 | 124 | return ret 125 | } 126 | 127 | log.Info("Allowing access", "request", request.AdmissionRequest) 128 | 129 | ret = admissionctl.Allowed("FeatureGate operation is allowed") 130 | ret.UID = request.AdmissionRequest.UID 131 | 132 | return ret 133 | } 134 | 135 | func NewWebhook() *TechPreviewNoUpgradeWebhook { 136 | scheme := runtime.NewScheme() 137 | 138 | err := admissionv1.AddToScheme(scheme) 139 | if err != nil { 140 | log.Error(err, "Fail adding admissionsv1 scheme to TechPreviewNoUpgradeWebhook") 141 | os.Exit(1) 142 | } 143 | err = corev1.AddToScheme(scheme) 144 | if err != nil { 145 | log.Error(err, "Fail adding corev1 scheme to TechPreviewNoUpgradeWebhook") 146 | os.Exit(1) 147 | } 148 | 149 | return &TechPreviewNoUpgradeWebhook{ 150 | s: *scheme, 151 | } 152 | } 153 | -------------------------------------------------------------------------------- /pkg/webhooks/techpreviewnoupgrade/techpreviewnoupgrade_test.go: -------------------------------------------------------------------------------- 1 | package techpreviewnoupgrade_test 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | 7 | admissionv1 "k8s.io/api/admission/v1" 8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | "k8s.io/apimachinery/pkg/runtime" 10 | 11 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/testutils" 12 | "github.com/openshift/managed-cluster-validating-webhooks/pkg/webhooks/techpreviewnoupgrade" 13 | ) 14 | 15 | type techpreviewnoupgradeTestSuite struct { 16 | testName string 17 | testID string 18 | username string 19 | userGroups []string 20 | operation admissionv1.Operation 21 | featureSet string 22 | shouldBeAllowed bool 23 | } 24 | 25 | const testObjectRaw string = ` 26 | { 27 | "apiVersion": "config.openshift.io/v1", 28 | "kind": "FeatureGate", 29 | "metadata": { 30 | "name": "test-subject", 31 | "uid": "1234", 32 | "creationTimestamp": "2020-05-10T07:51:00Z", 33 | "labels": {} 34 | }, 35 | "spec": { 36 | "featureSet": "%s" 37 | } 38 | } 39 | ` 40 | 41 | func NewTestSuite(operation admissionv1.Operation, featureSet string) techpreviewnoupgradeTestSuite { 42 | return techpreviewnoupgradeTestSuite{ 43 | testID: "1234", 44 | operation: operation, 45 | featureSet: featureSet, 46 | shouldBeAllowed: true, 47 | } 48 | } 49 | 50 | func (s techpreviewnoupgradeTestSuite) ExpectNotAllowed() techpreviewnoupgradeTestSuite { 51 | s.shouldBeAllowed = false 52 | return s 53 | } 54 | 55 | func createObject(featureSet string) *runtime.RawExtension { 56 | return &runtime.RawExtension{ 57 | Raw: []byte(createRawJSONString(featureSet)), 58 | } 59 | } 60 | 61 | func createRawJSONString(featureSet string) string { 62 | s := fmt.Sprintf(testObjectRaw, featureSet) 63 | 64 | return s 65 | } 66 | 67 | func Test_AllowAnythingOtherThanTechPreviewNoUpgrade(t *testing.T) { 68 | testSuites := []techpreviewnoupgradeTestSuite{ 69 | NewTestSuite(admissionv1.Create, "AnythingOtherThanTechPreviewNoUpgrade"), 70 | NewTestSuite(admissionv1.Update, "AnythingOtherThanTechPreviewNoUpgrade"), 71 | } 72 | 73 | runTests(t, testSuites) 74 | } 75 | 76 | func Test_DoNotAllowTechPreviewNoUpgrade(t *testing.T) { 77 | testSuites := []techpreviewnoupgradeTestSuite{ 78 | NewTestSuite(admissionv1.Create, "TechPreviewNoUpgrade").ExpectNotAllowed(), 79 | NewTestSuite(admissionv1.Update, "TechPreviewNoUpgrade").ExpectNotAllowed(), 80 | } 81 | 82 | runTests(t, testSuites) 83 | } 84 | 85 | func runTests(t *testing.T, tests []techpreviewnoupgradeTestSuite) { 86 | for _, test := range tests { 87 | obj := runtime.RawExtension{ 88 | Raw: []byte(createRawJSONString(test.featureSet)), 89 | } 90 | 91 | hook := techpreviewnoupgrade.NewWebhook() 92 | httprequest, err := testutils.CreateHTTPRequest(hook.GetURI(), test.testID, metav1.GroupVersionKind{}, metav1.GroupVersionResource{}, test.operation, test.username, test.userGroups, "", &obj, nil) // we are only worried about the introduction of the featureSet 93 | 94 | if err != nil { 95 | t.Fatalf("Expected no error, got %s", err.Error()) 96 | } 97 | 98 | response, err := testutils.SendHTTPRequest(httprequest, hook) 99 | 100 | if err != nil { 101 | t.Fatalf("Expected no error, got %s", err.Error()) 102 | } 103 | 104 | if response.UID == "" { 105 | t.Fatalf("No tracking UID associated with the response: %+v", response) 106 | } 107 | 108 | if response.Allowed != test.shouldBeAllowed { 109 | 110 | t.Fatalf("Mismatch: %v %s %s. Test's expectation is that the user %s. Reason: %s, Message: %v", 111 | test, 112 | testutils.CanCanNot(response.Allowed), string(test.operation), 113 | testutils.CanCanNot(test.shouldBeAllowed), response.Result.Reason, response.Result.Message) 114 | } 115 | } 116 | } 117 | -------------------------------------------------------------------------------- /pkg/webhooks/utils/utils.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "io" 7 | "net/http" 8 | "regexp" 9 | "slices" 10 | 11 | admissionv1 "k8s.io/api/admission/v1" 12 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 13 | "k8s.io/apimachinery/pkg/runtime" 14 | "k8s.io/apimachinery/pkg/runtime/serializer" 15 | utilruntime "k8s.io/apimachinery/pkg/util/runtime" 16 | admissionctl "sigs.k8s.io/controller-runtime/pkg/webhook/admission" 17 | ) 18 | 19 | const ( 20 | validContentType string = "application/json" 21 | // PrivilegedServiceAccountGroups is a regex string of serviceaccounts that our webhooks should commonly allow to 22 | // perform restricted actions. 23 | // Centralized osde2e tests have a serviceaccount like "system:serviceaccounts:osde2e-abcde" 24 | // Decentralized osde2e tests have a serviceaccount like "system:serviceaccounts:osde2e-h-abcde" 25 | PrivilegedServiceAccountGroups string = `^system:serviceaccounts:(kube-.*|openshift|openshift-.*|default|redhat-.*|osde2e-(h-)?[a-z0-9]{5})` 26 | ) 27 | 28 | var ( 29 | admissionScheme = runtime.NewScheme() 30 | admissionCodecs = serializer.NewCodecFactory(admissionScheme) 31 | ) 32 | 33 | func RequestMatchesGroupKind(req admissionctl.Request, kind, group string) bool { 34 | return req.Kind.Kind == kind && req.Kind.Group == group 35 | } 36 | 37 | func DefaultLabelSelector() metav1.LabelSelector { 38 | return metav1.LabelSelector{ 39 | MatchLabels: map[string]string{ 40 | "api.openshift.com/managed": "true", 41 | }, 42 | } 43 | } 44 | 45 | func IsProtectedByResourceName(name string) bool { 46 | protectedNames := []string{ 47 | "alertmanagerconfigs.monitoring.coreos.com", 48 | "alertmanagers.monitoring.coreos.com", 49 | "prometheuses.monitoring.coreos.com", 50 | "thanosrulers.monitoring.coreos.com", 51 | "podmonitors.monitoring.coreos.com", 52 | "probes.monitoring.coreos.com", 53 | "prometheusrules.monitoring.coreos.com", 54 | "servicemonitors.monitoring.coreos.com", 55 | "prometheusagents.monitoring.coreos.com", 56 | "scrapeconfigs.monitoring.coreos.com", 57 | } 58 | return slices.Contains(protectedNames, name) 59 | } 60 | 61 | func RegexSliceContains(needle string, haystack []string) bool { 62 | for _, check := range haystack { 63 | checkRe := regexp.MustCompile(check) 64 | if checkRe.Match([]byte(needle)) { 65 | return true 66 | } 67 | } 68 | return false 69 | } 70 | 71 | func ParseHTTPRequest(r *http.Request) (admissionctl.Request, admissionctl.Response, error) { 72 | var resp admissionctl.Response 73 | var req admissionctl.Request 74 | var err error 75 | var body []byte 76 | if r.Body != nil { 77 | if body, err = io.ReadAll(r.Body); err != nil { 78 | resp = admissionctl.Errored(http.StatusBadRequest, err) 79 | return req, resp, err 80 | } 81 | } else { 82 | err := errors.New("request body is nil") 83 | resp = admissionctl.Errored(http.StatusBadRequest, err) 84 | return req, resp, err 85 | } 86 | if len(body) == 0 { 87 | err := errors.New("request body is empty") 88 | resp = admissionctl.Errored(http.StatusBadRequest, err) 89 | return req, resp, err 90 | } 91 | contentType := r.Header.Get("Content-Type") 92 | if contentType != validContentType { 93 | err := fmt.Errorf("contentType=%s, expected application/json", contentType) 94 | resp = admissionctl.Errored(http.StatusBadRequest, err) 95 | return req, resp, err 96 | } 97 | ar := admissionv1.AdmissionReview{} 98 | if _, _, err := admissionCodecs.UniversalDeserializer().Decode(body, nil, &ar); err != nil { 99 | resp = admissionctl.Errored(http.StatusBadRequest, err) 100 | return req, resp, err 101 | } 102 | 103 | // Copy for tracking 104 | if ar.Request == nil { 105 | err = fmt.Errorf("No request in request body") 106 | resp = admissionctl.Errored(http.StatusBadRequest, err) 107 | return req, resp, err 108 | } 109 | resp.UID = ar.Request.UID 110 | req = admissionctl.Request{ 111 | AdmissionRequest: *ar.Request, 112 | } 113 | return req, resp, nil 114 | } 115 | 116 | // WebhookResponse assembles an allowed or denied admission response with the same UID as the provided request. 117 | // The reason for allowed admission responses is not shown to the end user and is commonly empty string: "" 118 | func WebhookResponse(request admissionctl.Request, allowed bool, reason string) admissionctl.Response { 119 | resp := admissionctl.ValidationResponse(allowed, reason) 120 | resp.UID = request.UID 121 | return resp 122 | } 123 | 124 | func init() { 125 | utilruntime.Must(admissionv1.AddToScheme(admissionScheme)) 126 | } 127 | -------------------------------------------------------------------------------- /pkg/webhooks/utils/utils_test.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "testing" 5 | 6 | admissionv1 "k8s.io/api/admission/v1" 7 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 8 | admissionctl "sigs.k8s.io/controller-runtime/pkg/webhook/admission" 9 | ) 10 | 11 | func TestRequestMatchesGroupKind(t *testing.T) { 12 | tests := []struct { 13 | name string 14 | req admissionctl.Request 15 | kind string 16 | group string 17 | expected bool 18 | }{ 19 | { 20 | name: "matches", 21 | req: admissionctl.Request{ 22 | AdmissionRequest: admissionv1.AdmissionRequest{ 23 | Kind: metav1.GroupVersionKind{ 24 | Kind: "testkind", 25 | Group: "testgroup", 26 | }, 27 | }, 28 | }, 29 | kind: "testkind", 30 | group: "testgroup", 31 | expected: true, 32 | }, 33 | { 34 | name: "doesn't match", 35 | req: admissionctl.Request{ 36 | AdmissionRequest: admissionv1.AdmissionRequest{ 37 | Kind: metav1.GroupVersionKind{ 38 | Kind: "testkind", 39 | Group: "testgroup", 40 | }, 41 | }, 42 | }, 43 | kind: "otherkind", 44 | group: "testgroup", 45 | expected: false, 46 | }, 47 | } 48 | 49 | for _, test := range tests { 50 | t.Run(test.name, func(t *testing.T) { 51 | actual := RequestMatchesGroupKind(test.req, test.kind, test.group) 52 | if test.expected != actual { 53 | t.Errorf("expected: %v, got %v", test.expected, actual) 54 | } 55 | }) 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /test/e2e/Dockerfile: -------------------------------------------------------------------------------- 1 | # THIS FILE IS GENERATED BY BOILERPLATE. DO NOT EDIT. 2 | FROM registry.access.redhat.com/ubi9/go-toolset:1.24 as builder 3 | WORKDIR /opt/app-root/src 4 | COPY . . 5 | RUN CGO_ENABLED=0 GOFLAGS="-mod=mod" go test ./test/e2e -v -c --tags=osde2e -o e2e.test 6 | 7 | FROM registry.access.redhat.com/ubi8/ubi-minimal:latest 8 | COPY --from=builder /opt/app-root/src/e2e.test e2e.test 9 | 10 | LABEL com.redhat.component="managed-cluster-validating-webhooks-e2e-container" \ 11 | name="managed-cluster-validating-webhooks-e2e" \ 12 | version="1.0" \ 13 | release="1" \ 14 | summary="E2E tests for Managed Cluster Validating Webhooks" \ 15 | description="End-to-end tests for validating admission webhooks for OpenShift" \ 16 | io.k8s.description="End-to-end tests for validating admission webhooks for OpenShift" \ 17 | io.k8s.display-name="Managed Cluster Validating Webhooks E2E" \ 18 | io.openshift.tags="openshift,webhooks,validation,e2e,tests" 19 | 20 | ENTRYPOINT [ "/e2e.test" ] 21 | -------------------------------------------------------------------------------- /test/e2e/README.md: -------------------------------------------------------------------------------- 1 | ## Locally running e2e test suite 2 | When updating your operator it's beneficial to add e2e tests for new functionality AND ensure existing functionality is not breaking using e2e tests. 3 | To do this, following steps are recommended 4 | 5 | 1. Run "make e2e-binary-build" to make sure e2e tests build 6 | 2. Deploy your new version of operator in a test cluster 7 | 3. Run "go install github.com/onsi/ginkgo/ginkgo@latest" 8 | 4. Get kubeadmin credentials from your cluster using 9 | 10 | ocm get /api/clusters_mgmt/v1/clusters/(cluster-id)/credentials | jq -r .kubeconfig > /(path-to)/kubeconfig 11 | 12 | 5. Run test suite using 13 | 14 | DISABLE_JUNIT_REPORT=true KUBECONFIG=/(path-to)/kubeconfig ./(path-to)/bin/ginkgo --tags=osde2e -v test/e2e 15 | -------------------------------------------------------------------------------- /test/e2e/e2e-template.yml: -------------------------------------------------------------------------------- 1 | # THIS FILE IS GENERATED BY BOILERPLATE. DO NOT EDIT. 2 | apiVersion: template.openshift.io/v1 3 | kind: Template 4 | metadata: 5 | name: osde2e-focused-tests 6 | parameters: 7 | - name: OSDE2E_CONFIGS 8 | required: true 9 | - name: TEST_IMAGE 10 | required: true 11 | - name: OCM_CLIENT_ID 12 | required: false 13 | - name: OCM_CLIENT_SECRET 14 | required: false 15 | - name: OCM_CCS 16 | required: false 17 | - name: AWS_ACCESS_KEY_ID 18 | required: false 19 | - name: AWS_SECRET_ACCESS_KEY 20 | required: false 21 | - name: CLOUD_PROVIDER_REGION 22 | required: false 23 | - name: GCP_CREDS_JSON 24 | required: false 25 | - name: JOBID 26 | generate: expression 27 | from: "[0-9a-z]{7}" 28 | - name: IMAGE_TAG 29 | value: '' 30 | required: true 31 | - name: LOG_BUCKET 32 | value: 'osde2e-logs' 33 | - name: USE_EXISTING_CLUSTER 34 | value: 'TRUE' 35 | - name: CAD_PAGERDUTY_ROUTING_KEY 36 | required: false 37 | objects: 38 | - apiVersion: batch/v1 39 | kind: Job 40 | metadata: 41 | name: osde2e-validation-webhook-${IMAGE_TAG}-${JOBID} 42 | spec: 43 | backoffLimit: 0 44 | template: 45 | spec: 46 | restartPolicy: Never 47 | containers: 48 | - name: osde2e 49 | image: quay.io/redhat-services-prod/osde2e-cicada-tenant/osde2e:latest 50 | command: 51 | - /osde2e 52 | args: 53 | - test 54 | - --only-health-check-nodes 55 | - --skip-destroy-cluster 56 | - --skip-must-gather 57 | - --configs 58 | - ${OSDE2E_CONFIGS} 59 | securityContext: 60 | runAsNonRoot: true 61 | allowPrivilegeEscalation: false 62 | capabilities: 63 | drop: ["ALL"] 64 | seccompProfile: 65 | type: RuntimeDefault 66 | env: 67 | - name: AD_HOC_TEST_IMAGES 68 | value: ${TEST_IMAGE}:${IMAGE_TAG} 69 | - name: OCM_CLIENT_ID 70 | value: ${OCM_CLIENT_ID} 71 | - name: OCM_CLIENT_SECRET 72 | value: ${OCM_CLIENT_SECRET} 73 | - name: OCM_CCS 74 | value: ${OCM_CCS} 75 | - name: AWS_ACCESS_KEY_ID 76 | value: ${AWS_ACCESS_KEY_ID} 77 | - name: AWS_SECRET_ACCESS_KEY 78 | value: ${AWS_SECRET_ACCESS_KEY} 79 | - name: CLOUD_PROVIDER_REGION 80 | value: ${CLOUD_PROVIDER_REGION} 81 | - name: GCP_CREDS_JSON 82 | value: ${GCP_CREDS_JSON} 83 | - name: LOG_BUCKET 84 | value: ${LOG_BUCKET} 85 | - name: USE_EXISTING_CLUSTER 86 | value: ${USE_EXISTING_CLUSTER} 87 | - name: CAD_PAGERDUTY_ROUTING_KEY 88 | value: ${CAD_PAGERDUTY_ROUTING_KEY} 89 | -------------------------------------------------------------------------------- /test/e2e/validation_webhook_runner_test.go: -------------------------------------------------------------------------------- 1 | // THIS FILE IS GENERATED BY BOILERPLATE. DO NOT EDIT. 2 | //go:build osde2e 3 | 4 | package osde2etests 5 | 6 | import ( 7 | "os" 8 | "path/filepath" 9 | "testing" 10 | 11 | . "github.com/onsi/ginkgo/v2" 12 | . "github.com/onsi/gomega" 13 | ) 14 | 15 | const ( 16 | testResultsDirectory = "/test-run-results" 17 | jUnitOutputFilename = "junit-validation-webhook.xml" 18 | ) 19 | 20 | // Test entrypoint. osde2e runs this as a test suite on test pod. 21 | func TestValidationWebhook(t *testing.T) { 22 | RegisterFailHandler(Fail) 23 | suiteConfig, reporterConfig := GinkgoConfiguration() 24 | if _, ok := os.LookupEnv("DISABLE_JUNIT_REPORT"); !ok { 25 | reporterConfig.JUnitReport = filepath.Join(testResultsDirectory, jUnitOutputFilename) 26 | } 27 | RunSpecs(t, "Validation Webhook", suiteConfig, reporterConfig) 28 | } 29 | --------------------------------------------------------------------------------