├── .dockerignore ├── .gitignore ├── Dockerfile ├── Makefile ├── PROJECT ├── README.md ├── api └── v1alpha1 │ ├── groupversion_info.go │ ├── server_types.go │ ├── server_webhook.go │ ├── webhook_suite_test.go │ └── zz_generated.deepcopy.go ├── config ├── certmanager │ ├── certificate.yaml │ ├── kustomization.yaml │ └── kustomizeconfig.yaml ├── crd │ ├── bases │ │ └── gameserver.martinheinz.dev_servers.yaml │ ├── kustomization.yaml │ ├── kustomizeconfig.yaml │ └── patches │ │ ├── cainjection_in_servers.yaml │ │ └── webhook_in_servers.yaml ├── default │ ├── kustomization.yaml │ ├── manager_auth_proxy_patch.yaml │ ├── manager_config_patch.yaml │ ├── manager_webhook_patch.yaml │ └── webhookcainjection_patch.yaml ├── kind │ └── kind-config.yaml ├── manager │ ├── controller_manager_config.yaml │ ├── kustomization.yaml │ └── manager.yaml ├── prometheus │ ├── kustomization.yaml │ └── monitor.yaml ├── rbac │ ├── auth_proxy_client_clusterrole.yaml │ ├── auth_proxy_role.yaml │ ├── auth_proxy_role_binding.yaml │ ├── auth_proxy_service.yaml │ ├── kustomization.yaml │ ├── leader_election_role.yaml │ ├── leader_election_role_binding.yaml │ ├── role.yaml │ ├── role_binding.yaml │ ├── server_editor_role.yaml │ └── server_viewer_role.yaml ├── samples │ ├── csgo.yaml │ ├── factorio.yaml │ ├── gameserver_v1alpha1_server.yaml │ ├── kustomization.yaml │ ├── minecraft.yaml │ ├── pvc.yaml │ └── rust.yaml └── scorecard │ ├── bases │ └── config.yaml │ ├── kustomization.yaml │ └── patches │ ├── basic.config.yaml │ └── olm.config.yaml ├── controllers ├── server_controller.go ├── server_controller_test.go └── suite_test.go ├── go.mod ├── hack └── boilerplate.go.txt └── main.go /.dockerignore: -------------------------------------------------------------------------------- 1 | # More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file 2 | # Ignore all files which are not go type 3 | !**/*.go 4 | !**/*.mod 5 | !**/*.sum 6 | shared-storage/* 7 | storage-backup/* -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | bin 9 | testbin/* 10 | 11 | # Test binary, build with `go test -c` 12 | *.test 13 | 14 | # Output of the go coverage tool, specifically when used with LiteIDE 15 | *.out 16 | 17 | # Kubernetes Generated files - skip generated files, except for vendored files 18 | 19 | !vendor/**/zz_generated.* 20 | 21 | # editor and IDE paraphernalia 22 | .idea 23 | *.swp 24 | *.swo 25 | *~ 26 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Build the manager binary 2 | FROM golang:1.15 as builder 3 | 4 | WORKDIR /workspace 5 | # Copy the Go Modules manifests 6 | COPY go.mod go.mod 7 | COPY go.sum go.sum 8 | # cache deps before building and copying source so that we don't need to re-download as much 9 | # and so that source changes don't invalidate our downloaded layer 10 | RUN go mod download 11 | 12 | # Copy the go source 13 | COPY main.go main.go 14 | COPY api/ api/ 15 | COPY controllers/ controllers/ 16 | 17 | # Build 18 | RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -a -o manager main.go 19 | 20 | # Use distroless as minimal base image to package the manager binary 21 | # Refer to https://github.com/GoogleContainerTools/distroless for more details 22 | FROM gcr.io/distroless/static:nonroot 23 | WORKDIR / 24 | COPY --from=builder /workspace/manager . 25 | USER 65532:65532 26 | 27 | ENTRYPOINT ["/manager"] 28 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # Current Operator version 2 | VERSION ?= 0.0.1 3 | # Default bundle image tag 4 | BUNDLE_IMG ?= controller-bundle:$(VERSION) 5 | # Options for 'bundle-build' 6 | ifneq ($(origin CHANNELS), undefined) 7 | BUNDLE_CHANNELS := --channels=$(CHANNELS) 8 | endif 9 | ifneq ($(origin DEFAULT_CHANNEL), undefined) 10 | BUNDLE_DEFAULT_CHANNEL := --default-channel=$(DEFAULT_CHANNEL) 11 | endif 12 | BUNDLE_METADATA_OPTS ?= $(BUNDLE_CHANNELS) $(BUNDLE_DEFAULT_CHANNEL) 13 | 14 | # Image URL to use all building/pushing image targets 15 | IMG ?= controller:latest 16 | # Produce CRDs that work back to Kubernetes 1.11 (no version conversion) 17 | CRD_OPTIONS ?= "crd:trivialVersions=true,preserveUnknownFields=false" 18 | 19 | # Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) 20 | ifeq (,$(shell go env GOBIN)) 21 | GOBIN=$(shell go env GOPATH)/bin 22 | else 23 | GOBIN=$(shell go env GOBIN) 24 | endif 25 | 26 | all: manager 27 | 28 | # Run tests 29 | ENVTEST_ASSETS_DIR=$(shell pwd)/testbin 30 | test: generate fmt vet manifests 31 | mkdir -p ${ENVTEST_ASSETS_DIR} 32 | test -f ${ENVTEST_ASSETS_DIR}/setup-envtest.sh || curl -sSLo ${ENVTEST_ASSETS_DIR}/setup-envtest.sh https://raw.githubusercontent.com/kubernetes-sigs/controller-runtime/v0.7.0/hack/setup-envtest.sh 33 | source ${ENVTEST_ASSETS_DIR}/setup-envtest.sh; fetch_envtest_tools $(ENVTEST_ASSETS_DIR); setup_envtest_env $(ENVTEST_ASSETS_DIR); go test ./... -coverprofile cover.out 34 | 35 | # Build manager binary 36 | manager: generate fmt vet 37 | go build -o bin/manager main.go 38 | 39 | # Run against the configured Kubernetes cluster in ~/.kube/config 40 | run: generate fmt vet manifests 41 | go run ./main.go 42 | 43 | # Install CRDs into a cluster 44 | install: manifests kustomize 45 | $(KUSTOMIZE) build config/crd | kubectl apply -f - 46 | 47 | # Uninstall CRDs from a cluster 48 | uninstall: manifests kustomize 49 | $(KUSTOMIZE) build config/crd | kubectl delete -f - 50 | 51 | # Deploy controller in the configured Kubernetes cluster in ~/.kube/config 52 | deploy: manifests kustomize 53 | cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} 54 | $(KUSTOMIZE) build config/default | kubectl apply -f - 55 | 56 | # UnDeploy controller from the configured Kubernetes cluster in ~/.kube/config 57 | undeploy: 58 | $(KUSTOMIZE) build config/default | kubectl delete -f - 59 | 60 | # Generate manifests e.g. CRD, RBAC etc. 61 | manifests: controller-gen 62 | $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases 63 | 64 | # Run go fmt against code 65 | fmt: 66 | go fmt ./... 67 | 68 | # Run go vet against code 69 | vet: 70 | go vet ./... 71 | 72 | # Generate code 73 | generate: controller-gen 74 | $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." 75 | 76 | # Build the docker image 77 | docker-build: test 78 | docker build -t ${IMG} . 79 | 80 | # Push the docker image 81 | docker-push: 82 | docker push ${IMG} 83 | 84 | # Download controller-gen locally if necessary 85 | CONTROLLER_GEN = $(shell pwd)/bin/controller-gen 86 | controller-gen: 87 | $(call go-get-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.4.1) 88 | 89 | # Download kustomize locally if necessary 90 | KUSTOMIZE = $(shell pwd)/bin/kustomize 91 | kustomize: 92 | $(call go-get-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v3@v3.8.7) 93 | 94 | # go-get-tool will 'go get' any package $2 and install it to $1. 95 | PROJECT_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST)))) 96 | define go-get-tool 97 | @[ -f $(1) ] || { \ 98 | set -e ;\ 99 | TMP_DIR=$$(mktemp -d) ;\ 100 | cd $$TMP_DIR ;\ 101 | go mod init tmp ;\ 102 | echo "Downloading $(2)" ;\ 103 | GOBIN=$(PROJECT_DIR)/bin go get $(2) ;\ 104 | rm -rf $$TMP_DIR ;\ 105 | } 106 | endef 107 | 108 | # Generate bundle manifests and metadata, then validate generated files. 109 | .PHONY: bundle 110 | bundle: manifests kustomize 111 | operator-sdk generate kustomize manifests -q 112 | cd config/manager && $(KUSTOMIZE) edit set image controller=$(IMG) 113 | $(KUSTOMIZE) build config/manifests | operator-sdk generate bundle -q --overwrite --version $(VERSION) $(BUNDLE_METADATA_OPTS) 114 | operator-sdk bundle validate ./bundle 115 | 116 | # Build the bundle image. 117 | .PHONY: bundle-build 118 | bundle-build: 119 | docker build -f bundle.Dockerfile -t $(BUNDLE_IMG) . 120 | -------------------------------------------------------------------------------- /PROJECT: -------------------------------------------------------------------------------- 1 | domain: martinheinz.dev 2 | layout: go.kubebuilder.io/v3 3 | projectName: game-server-operator 4 | repo: github.com/MartinHeinz/game-server-operator 5 | resources: 6 | - crdVersion: v1 7 | group: gameserver 8 | kind: Server 9 | version: v1alpha1 10 | webhookVersion: v1 11 | version: 3-alpha 12 | plugins: 13 | manifests.sdk.operatorframework.io/v2: {} 14 | scorecard.sdk.operatorframework.io/v2: {} 15 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Game Server Operator 2 | 3 | This is repository for Kubernetes game-server-operator. This operator allows you to deploy popular game servers with single YAML (CRD). 4 | 5 | Currently supported game servers are: CS:GO, Rust, Minecraft and Factorio. Any containerized game server can be easily added. 6 | 7 | The most minimalistic server configuration can be as simple as: 8 | 9 | ```yaml 10 | apiVersion: gameserver.martinheinz.dev/v1alpha1 11 | kind: Server 12 | metadata: 13 | name: csgo 14 | spec: 15 | gameName: "CSGO" 16 | config: 17 | from: 18 | - configMapRef: 19 | name: csgo 20 | - secretRef: 21 | name: csgo 22 | storage: 23 | size: 12Gi 24 | ``` 25 | 26 | For sample configurations for each game see [samples directory](./config/samples) 27 | 28 | For details on how to setup and connect to each game server see [Games section below](#games) 29 | 30 | ## Initial Setup 31 | 32 | ```shell 33 | operator-sdk init --domain martinheinz.dev --repo=github.com/MartinHeinz/game-server-operator --owner="Martin Heinz" --license=none 34 | operator-sdk create api --group gameserver --version v1alpha1 --kind Server 35 | operator-sdk create webhook --group gameserver --version v1alpha1 --kind Server --defaulting --programmatic-validation 36 | ``` 37 | 38 | # Deployment (on KinD) 39 | 40 | ```shell 41 | kind delete cluster --name operator 42 | kind create cluster --name operator --config config/kind/kind-config.yaml --image=kindest/node:v1.20.0 43 | kind --name operator export kubeconfig 44 | 45 | # Install cert-manager 46 | kubectl apply -f https://github.com/jetstack/cert-manager/releases/download/v1.1.0/cert-manager.yaml 47 | kubectl get pods --namespace cert-manager 48 | NAME READY STATUS RESTARTS AGE 49 | cert-manager-5597cff495-d8mmx 1/1 Running 0 34s 50 | cert-manager-cainjector-bd5f9c764-mssm2 1/1 Running 0 34s 51 | cert-manager-webhook-5f57f59fbc-m8j2j 1/1 Running 0 34s 52 | 53 | export USERNAME=martinheinz 54 | export IMAGE=docker.io/$USERNAME/game-server-operator:latest 55 | 56 | docker build -t $IMAGE . # ONLY FOR DEVELOPEMENT 57 | docker push $IMAGE # ONLY FOR DEVELOPEMENT 58 | 59 | make deploy IMG=$IMAGE 60 | kubectl get crd 61 | NAME CREATED AT 62 | servers.gameserver.martinheinz.dev 2021-01-06T13:03:10Z 63 | 64 | kubectl get pods -n game-server-operator-system 65 | NAME READY STATUS RESTARTS AGE 66 | game-server-operator-controller-manager-6c7758447b-qnlhq 2/2 Running 0 8s 67 | 68 | # Options: rust, csgo, minecraft, factorio 69 | export GAME_NAME=... 70 | # For game-specific notes, see _Games_ section below 71 | kubectl apply -f config/samples/${GAME_NAME}.yaml 72 | 73 | kubectl get server ${GAME_NAME} 74 | NAME STATUS STORAGE AGE 75 | GAME_NAME Active Bound 39h 76 | ``` 77 | 78 | # Games 79 | 80 | Before we can deploy individual servers, we first need to deploy the operator, for that see [Deployment section](#deployment-on-kind) 81 | 82 | ## CS:GO 83 | 84 | - Create server (modify file to override defaults): 85 | ```shell 86 | ~ $ kubectl apply -f config/sample/rust.yaml 87 | ``` 88 | 89 | - Verify that the server is running: 90 | ```shell 91 | ~ $ kubectl get server csgo 92 | NAME STATUS STORAGE AGE 93 | csgo Active Bound 39h 94 | ``` 95 | 96 | - Connecting to server (if running on _KinD_): 97 | ```shell 98 | ~ $ docker inspect --format='{{.NetworkSettings.IPAddress}}' operator-control-plane 99 | 172.17.0.2 # Node IP 100 | ``` 101 | 102 | By default CS:GO uses port `27015`, which is exposed using NodePort at `30015`. 103 | 104 | - Start game and open console (using _tilde_ key) 105 | - Type: 106 | ``` 107 | rcon_address 172.17.0.2:30015 108 | rcon_password 109 | rcon status 110 | ``` 111 | 112 | This should output something like: 113 | 114 | ``` 115 | hostname: csgo.default.svc.cluster.local 116 | version : 1.37.7.5/13775 1215/8012 secure [G:1:3964911] 117 | udp/ip : 0.0.0.0:27015 (public ip: ...) 118 | os : Linux 119 | type : community dedicated 120 | map : de_dust2 121 | gotv[0]: port 27020, delay 30.0s, rate 64.0 122 | players : 0 humans, 1 bots (12/0 max) (hibernating) 123 | 124 | # userid name uniqueid connected ping loss state rate adr 125 | # 2 "GOTV" BOT active 64 126 | #end 127 | ``` 128 | 129 | Playing on server: 130 | 131 | - Open console in CS:GO (using _tilde_ key) 132 | - Type (assuming IP above and default config): 133 | ``` 134 | password # Omit if using password-less server 135 | connect 172.17.0.2:30015 136 | ``` 137 | 138 | Check server logs: 139 | 140 | ```shell 141 | L 01/09/2021 - 09:02:59: "USERNAME<3><>" connected, address "" 142 | Client "USERNAME" connected (10.128.0.340:18320). 143 | Server waking up from hibernation 144 | ``` 145 | 146 | ## Rust 147 | 148 | Create server (modify file to override defaults): 149 | ```shell 150 | ~ $ kubectl apply -f config/sample/rust.yaml 151 | ``` 152 | 153 | Verify that the server is running: 154 | ```shell 155 | ~ $ kubectl get server rust 156 | NAME STATUS STORAGE AGE 157 | rust Active Bound 39h 158 | 159 | ~ $ kubectl logs deploy/rust-deployment 160 | ... 161 | WebSocket RCon Started on 30016 162 | ... 163 | 164 | ~ $ kubectl exec deploy/rust-deployment -- rcon status 165 | RconApp::Relaying RCON command: status 166 | RconApp::Received message: hostname: My Awesome Server 167 | version : 2275 secure (secure mode enabled, connected to Steam3) 168 | map : Procedural Map 169 | players : 0 (500 max) (0 queued) (0 joining) 170 | 171 | id name ping connected addr owner violation kicks 172 | 173 | RconApp::Command relayed 174 | ``` 175 | 176 | Connecting to server (if running on _KinD_): 177 | ```shell 178 | ~ $ docker inspect --format='{{.NetworkSettings.IPAddress}}' operator-control-plane 179 | 172.17.0.2 # Node IP 180 | ``` 181 | 182 | By default ports are set to: 183 | - `30015` - user access 184 | - `30016` - RCON access 185 | - `30080` - RCON browser access 186 | 187 | ## Factorio 188 | 189 | Create server (modify file to override defaults): 190 | ```shell 191 | ~ $ kubectl apply -f config/sample/factorio.yaml 192 | ``` 193 | 194 | Verify that the server is running: 195 | ```shell 196 | ~ $ kubectl get server factorio 197 | NAME STATUS STORAGE AGE 198 | factorio Active Bound 80s 199 | ``` 200 | 201 | Testing RCON connection with Python: 202 | 203 | ```python 204 | # pip install factorio-rcon-py 205 | import factorio_rcon 206 | 207 | client = factorio_rcon.RCONClient("172.17.0.2", 30015, "") 208 | response = client.send_command("/help") 209 | ``` 210 | 211 | The above snippet should exit with `code 0` and in logs you should see something like: 212 | 213 | ``` 214 | 543.988 Info RemoteCommandProcessor.cpp:242: New RCON connection from IP ADDR:({...:54278}) 215 | ``` 216 | 217 | By default ports are set to: 218 | - `32197` - user access 219 | - `30015` - RCON access 220 | 221 | ## Minecraft 222 | 223 | Create server (modify file to override defaults): 224 | ```shell 225 | ~ $ kubectl apply -f config/sample/minecraft.yaml 226 | ``` 227 | 228 | Verify that the server is running: 229 | ```shell 230 | ~ $ kubectl get server minecraft 231 | NAME STATUS STORAGE AGE 232 | minecraft Active Bound 10m 233 | ``` 234 | 235 | Test RCON: 236 | ```shell 237 | ~ $ kubectl exec --stdin --tty deploy/minecraft-deployment -- /bin/bash 238 | bash-4.4# rcon-cli 239 | > /list 240 | There are 0 of a max of 20 players online: 241 | ... 242 | CTRL+D 243 | ``` 244 | 245 | Logs should show something like: 246 | 247 | ``` 248 | [16:47:28] [RCON Listener #1/INFO]: Thread RCON Client /0:0:0:0:0:0:0:1 started 249 | [16:48:06] [RCON Client /0:0:0:0:0:0:0:1 #3/INFO]: Thread RCON Client /0:0:0:0:0:0:0:1 shutting down 250 | ``` 251 | 252 | By default ports are set to: 253 | - `30565` - user access 254 | - `30575` - RCON access 255 | 256 | ## Changing Default Server Configuration 257 | 258 | | Parameter | Description | 259 | |-------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------| 260 | | gameName | Name of the game. One of the `CSGO`, `Rust`, `Factorio`, `Minecraft` | 261 | | config.from.configMapRef.name | Name of configMap used for configuration | 262 | | config.from.secretRef.name | Name of Secret used for configuration of sensitive information | 263 | | config.from.mountAs | Specifies whether to mount configuration as environment variables or as files in PVC (Optional, Defaults to `Env`, Options: `Env`, `File` ) | 264 | | config.from.mountPath | Specifies mount path when using config from files, only needed when `config.from.mountAs` is set to `File` | 265 | | port.targetPort | Port that container is listening on (Optional) | 266 | | port.port | Port exposed by generated Service (Optional) | 267 | | port.nodePort | Port that will be publicly exposed on cluster node (Optional) | 268 | | storage.size | Size of PVC, e.g. `12Gi` | 269 | | resources.requests | Minimum resources allocated for server container (Optional, Defaults to `memory: 64Mi`, `cpu: 128m`) | 270 | | resources.limits | Minimum resources available for server container (Optional, Defaults to `memory: 1`, `cpu: 1Gi`) | 271 | 272 | 273 | Complete configuration example: 274 | 275 | ```yaml 276 | apiVersion: gameserver.martinheinz.dev/v1alpha1 277 | kind: Server 278 | metadata: 279 | name: csgo 280 | spec: 281 | gameName: "CSGO" 282 | port: # These ports are defaults, can be omitted 283 | - port: 27015 284 | targetPort: 27015 285 | nodePort: 30015 286 | config: 287 | from: 288 | - configMapRef: 289 | name: csgo 290 | - secretRef: 291 | name: csgo 292 | mountAs: Env 293 | storage: 294 | size: 12Gi 295 | resources: 296 | requests: 297 | memory: "64Mi" 298 | cpu: "250m" 299 | limits: 300 | memory: "1Gi" 301 | cpu: "2" 302 | ``` 303 | 304 | To see what environment variables can be used in configMap and Secret for each game see: 305 | 306 | - [Rust configuration options](https://github.com/Didstopia/rust-server#how-to-run-the-server) 307 | - [CS:GO configuration options](https://github.com/kaimallea/csgo#environment-variable-overrides) 308 | - [Minecraft configuration options](https://github.com/itzg/docker-minecraft-server/blob/master/README.md) 309 | - [Factorio configuration options](https://github.com/factoriotools/factorio-docker#volumes) 310 | 311 | ## Contributing 312 | 313 | If you'd like to use this operator to deploy some other game server, then you can open an issue or contribute it yourself. That would include: 314 | 315 | - Providing a Docker image for the game server 316 | - Adding template (e.g. _Deployment_, _Service_ and _PVC_) for game in [server_controller.go](controllers/server_controller.go) (see `Games = map[gameserverv1alpha1.GameName]GameSetting{...}` near the end of file) 317 | - Adding game name to list of possible server name values in [server_types.go](api/v1alpha1/server_types.go) (see `type GameName string`) -------------------------------------------------------------------------------- /api/v1alpha1/groupversion_info.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021 Martin Heinz. 3 | */ 4 | 5 | // Package v1alpha1 contains API Schema definitions for the gameserver v1alpha1 API group 6 | // +kubebuilder:object:generate=true 7 | // +groupName=gameserver.martinheinz.dev 8 | package v1alpha1 9 | 10 | import ( 11 | "k8s.io/apimachinery/pkg/runtime/schema" 12 | "sigs.k8s.io/controller-runtime/pkg/scheme" 13 | ) 14 | 15 | var ( 16 | // GroupVersion is group version used to register these objects 17 | GroupVersion = schema.GroupVersion{Group: "gameserver.martinheinz.dev", Version: "v1alpha1"} 18 | 19 | // SchemeBuilder is used to add go types to the GroupVersionKind scheme 20 | SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} 21 | 22 | // AddToScheme adds the types in this group-version to the given scheme. 23 | AddToScheme = SchemeBuilder.AddToScheme 24 | ) 25 | -------------------------------------------------------------------------------- /api/v1alpha1/server_types.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021 Martin Heinz. 3 | */ 4 | 5 | package v1alpha1 6 | 7 | import ( 8 | appsv1 "k8s.io/api/apps/v1" 9 | corev1 "k8s.io/api/core/v1" 10 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 11 | "k8s.io/apimachinery/pkg/util/intstr" 12 | ) 13 | 14 | // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! 15 | // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. 16 | 17 | // ServerSpec defines the desired state of Server 18 | type ServerSpec struct { 19 | // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster 20 | // Important: Run "make manifests" to regenerate code after modifying this file 21 | 22 | GameName GameName `json:"gameName"` 23 | 24 | // +listType=atomic 25 | // +optional 26 | Ports []corev1.ServicePort `json:"port,omitempty"` 27 | 28 | // +optional 29 | Config Config `json:"config,omitempty"` 30 | 31 | Storage *ServerStorage `json:"storage,omitempty"` 32 | 33 | // +optional 34 | ResourceRequirements *corev1.ResourceRequirements `json:"resources,omitempty"` 35 | } 36 | 37 | type Config struct { 38 | // +listType=atomic 39 | From []corev1.EnvFromSource `json:"from,omitempty"` 40 | // +optional 41 | // +kubebuilder:default:=Env 42 | MountAs MountType `json:"mountAs,omitempty"` 43 | // +optional 44 | MountPath string `json:"mountPath,omitempty"` 45 | } 46 | 47 | // +kubebuilder:validation:Enum=File;Env 48 | type MountType string 49 | 50 | const ( 51 | File MountType = "File" 52 | Env MountType = "Env" 53 | ) 54 | 55 | // +kubebuilder:validation:Enum=CSGO;Factorio;Rust;Minecraft 56 | type GameName string 57 | 58 | const ( 59 | CSGO GameName = "CSGO" 60 | Factorio GameName = "Factorio" 61 | Rust GameName = "Rust" 62 | Minecraft GameName = "Minecraft" 63 | ) 64 | 65 | // ServerStorage ... 66 | // +k8s:openapi-gen=false 67 | type ServerStorage struct { 68 | // +kubebuilder:validation:Pattern=^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$ 69 | Size string `json:"size,omitempty"` 70 | // +optional 71 | //Name string `json:"name"` 72 | } 73 | 74 | // +kubebuilder:validation:Enum=Bound;Pending 75 | type StorageStatus string 76 | 77 | const ( 78 | Bound StorageStatus = "Bound" 79 | Pending StorageStatus = "Pending" 80 | ) 81 | 82 | // +kubebuilder:validation:Enum=Active;Inactive 83 | type Status string 84 | 85 | const ( 86 | Active Status = "Active" 87 | Inactive Status = "Inactive" 88 | ) 89 | 90 | // ServerStatus defines the observed state of Server 91 | type ServerStatus struct { 92 | // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster 93 | // Important: Run "make manifests" to regenerate code after modifying this file 94 | 95 | // Describes whether there are running pods for this server 96 | // +optional 97 | Status Status `json:"status,omitempty"` 98 | // Describes whether storage for server is ready 99 | // +optional 100 | Storage StorageStatus `json:"storage,omitempty"` 101 | // List of available server NodePorts 102 | // +optional 103 | Ports []int32 `json:"ports,omitempty"` 104 | } 105 | 106 | // +kubebuilder:object:root=true 107 | // +kubebuilder:subresource:status 108 | 109 | // Server is the Schema for the servers API 110 | // +k8s:openapi-gen=true 111 | // +kubebuilder:resource:path=servers,scope=Namespaced 112 | // +kubebuilder:subresource:status 113 | // +kubebuilder:printcolumn:name="status",type="string",JSONPath=".status.status",priority=0,description="Status of deployment" 114 | // +kubebuilder:printcolumn:name="Storage",type="string",JSONPath=".status.storage",priority=0,description="Status of the reconcile condition" 115 | // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",priority=0,description="Age of the resource" 116 | // +kubebuilder:printcolumn:name="Ports",type="string",JSONPath=".status.ports",priority=10,description="List of available server NodePorts" 117 | type Server struct { 118 | metav1.TypeMeta `json:",inline"` 119 | metav1.ObjectMeta `json:"metadata,omitempty"` 120 | 121 | Spec ServerSpec `json:"spec,omitempty"` 122 | Status ServerStatus `json:"status,omitempty"` 123 | } 124 | 125 | // +kubebuilder:object:root=true 126 | 127 | // ServerList contains a list of Server 128 | type ServerList struct { 129 | metav1.TypeMeta `json:",inline"` 130 | metav1.ListMeta `json:"metadata,omitempty"` 131 | Items []Server `json:"items"` 132 | } 133 | 134 | func init() { 135 | SchemeBuilder.Register(&Server{}, &ServerList{}) 136 | } 137 | 138 | type GameSetting struct { 139 | Deployment appsv1.Deployment 140 | Service corev1.Service 141 | PersistentVolumeClaim corev1.PersistentVolumeClaim 142 | } 143 | 144 | var ( 145 | Games = map[GameName]GameSetting{ 146 | CSGO: {Deployment: appsv1.Deployment{ 147 | Spec: appsv1.DeploymentSpec{ 148 | Replicas: func(val int32) *int32 { return &val }(1), 149 | Template: corev1.PodTemplateSpec{ 150 | ObjectMeta: metav1.ObjectMeta{}, 151 | Spec: corev1.PodSpec{ 152 | Volumes: []corev1.Volume{{ 153 | Name: "csgo-data", 154 | VolumeSource: corev1.VolumeSource{ 155 | PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ 156 | ClaimName: "", // This gets set to server name (m.Name) 157 | }, 158 | }, 159 | }}, 160 | Containers: []corev1.Container{{ 161 | Name: "csgo", 162 | Image: "kmallea/csgo:latest", 163 | Ports: []corev1.ContainerPort{ 164 | {ContainerPort: 27015, Protocol: corev1.ProtocolTCP}, 165 | {ContainerPort: 27015, Protocol: corev1.ProtocolUDP}, 166 | }, 167 | VolumeMounts: []corev1.VolumeMount{ 168 | {Name: "csgo-data", MountPath: "/home/steam/csgo"}, 169 | }, 170 | ImagePullPolicy: corev1.PullIfNotPresent, 171 | }}, 172 | }, 173 | }, 174 | }, 175 | }, 176 | Service: corev1.Service{ 177 | ObjectMeta: metav1.ObjectMeta{ 178 | Name: "csgo", 179 | }, 180 | Spec: corev1.ServiceSpec{ 181 | Ports: []corev1.ServicePort{ 182 | {Name: "27015-tcp", Port: 27015, NodePort: 30015, TargetPort: intstr.IntOrString{Type: 0, IntVal: 27015, StrVal: ""}, Protocol: corev1.ProtocolTCP}, 183 | {Name: "27015-udp", Port: 27015, NodePort: 30015, TargetPort: intstr.IntOrString{Type: 0, IntVal: 27015, StrVal: ""}, Protocol: corev1.ProtocolUDP}, 184 | }, 185 | Type: corev1.ServiceTypeNodePort, 186 | }, 187 | }, 188 | PersistentVolumeClaim: corev1.PersistentVolumeClaim{ 189 | ObjectMeta: metav1.ObjectMeta{ 190 | Name: "csgo", 191 | }, 192 | Spec: corev1.PersistentVolumeClaimSpec{ 193 | AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, 194 | }, 195 | }, 196 | }, 197 | Factorio: {Deployment: appsv1.Deployment{ 198 | Spec: appsv1.DeploymentSpec{ 199 | Replicas: func(val int32) *int32 { return &val }(1), 200 | Template: corev1.PodTemplateSpec{ 201 | ObjectMeta: metav1.ObjectMeta{}, 202 | Spec: corev1.PodSpec{ 203 | Volumes: []corev1.Volume{{ 204 | Name: "factorio-data", 205 | VolumeSource: corev1.VolumeSource{ 206 | PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ 207 | ClaimName: "", // This gets set to server name (m.Name) 208 | }, 209 | }, 210 | }}, 211 | Containers: []corev1.Container{{ 212 | Name: "factorio", 213 | Image: "factoriotools/factorio:latest", 214 | Ports: []corev1.ContainerPort{ 215 | {ContainerPort: 27015, Protocol: corev1.ProtocolTCP}, 216 | {ContainerPort: 34197, Protocol: corev1.ProtocolUDP}, 217 | }, 218 | VolumeMounts: []corev1.VolumeMount{ 219 | {Name: "factorio-data", MountPath: "/factorio"}, 220 | }, 221 | ImagePullPolicy: corev1.PullIfNotPresent, 222 | }}, 223 | SecurityContext: &corev1.PodSecurityContext{ 224 | RunAsUser: func(val int64) *int64 { return &val }(845), 225 | RunAsGroup: func(val int64) *int64 { return &val }(845), 226 | FSGroup: func(val int64) *int64 { return &val }(845), 227 | }, 228 | }, 229 | }, 230 | Strategy: appsv1.DeploymentStrategy{ 231 | Type: appsv1.RecreateDeploymentStrategyType, // Pod keep crashing with rolling update if one instance already exists 232 | }, 233 | }, 234 | }, 235 | Service: corev1.Service{ 236 | ObjectMeta: metav1.ObjectMeta{ 237 | Name: "factorio", 238 | }, 239 | Spec: corev1.ServiceSpec{ 240 | Ports: []corev1.ServicePort{ 241 | {Name: "27015-tcp", Port: 27015, NodePort: 30015, TargetPort: intstr.IntOrString{Type: 0, IntVal: 27015, StrVal: ""}, Protocol: corev1.ProtocolTCP}, 242 | {Name: "34197-udp", Port: 34197, NodePort: 32197, TargetPort: intstr.IntOrString{Type: 0, IntVal: 34197, StrVal: ""}, Protocol: corev1.ProtocolUDP}, 243 | }, 244 | Type: corev1.ServiceTypeNodePort, 245 | }, 246 | }, 247 | PersistentVolumeClaim: corev1.PersistentVolumeClaim{ 248 | ObjectMeta: metav1.ObjectMeta{ 249 | Name: "factorio", 250 | }, 251 | Spec: corev1.PersistentVolumeClaimSpec{ 252 | AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, 253 | }, 254 | }, 255 | }, 256 | Rust: {Deployment: appsv1.Deployment{ 257 | Spec: appsv1.DeploymentSpec{ 258 | Replicas: func(val int32) *int32 { return &val }(1), 259 | Template: corev1.PodTemplateSpec{ 260 | ObjectMeta: metav1.ObjectMeta{}, 261 | Spec: corev1.PodSpec{ 262 | Volumes: []corev1.Volume{{ 263 | Name: "rust-data", 264 | VolumeSource: corev1.VolumeSource{ 265 | PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ 266 | ClaimName: "", // This gets set to server name (m.Name) 267 | }, 268 | }, 269 | }}, 270 | Containers: []corev1.Container{{ 271 | Name: "rust", 272 | Image: "didstopia/rust-server:latest", 273 | Ports: []corev1.ContainerPort{ 274 | {ContainerPort: 30015, Protocol: corev1.ProtocolTCP}, 275 | {ContainerPort: 30015, Protocol: corev1.ProtocolUDP}, 276 | {ContainerPort: 30016, Protocol: corev1.ProtocolTCP}, 277 | {ContainerPort: 8080, Protocol: corev1.ProtocolTCP}, 278 | {ContainerPort: 8080, Protocol: corev1.ProtocolUDP}, 279 | }, 280 | VolumeMounts: []corev1.VolumeMount{ 281 | {Name: "rust-data", MountPath: "/steamcmd/rust"}, 282 | }, 283 | ImagePullPolicy: corev1.PullIfNotPresent, 284 | }}, 285 | }, 286 | }, 287 | }, 288 | }, 289 | Service: corev1.Service{ 290 | ObjectMeta: metav1.ObjectMeta{ 291 | Name: "rust", 292 | }, 293 | Spec: corev1.ServiceSpec{ 294 | Ports: []corev1.ServicePort{ 295 | {Name: "30015-tcp", Port: 30015, NodePort: 30015, TargetPort: intstr.IntOrString{Type: 0, IntVal: 30015, StrVal: ""}, Protocol: corev1.ProtocolTCP}, 296 | {Name: "30015-udp", Port: 30015, NodePort: 30015, TargetPort: intstr.IntOrString{Type: 0, IntVal: 30015, StrVal: ""}, Protocol: corev1.ProtocolUDP}, 297 | {Name: "30016-tcp", Port: 30016, NodePort: 30016, TargetPort: intstr.IntOrString{Type: 0, IntVal: 30016, StrVal: ""}, Protocol: corev1.ProtocolTCP}, 298 | {Name: "8080-tcp", Port: 8080, NodePort: 30080, TargetPort: intstr.IntOrString{Type: 0, IntVal: 8080, StrVal: ""}, Protocol: corev1.ProtocolTCP}, 299 | {Name: "8080-udp", Port: 8080, NodePort: 30080, TargetPort: intstr.IntOrString{Type: 0, IntVal: 8080, StrVal: ""}, Protocol: corev1.ProtocolUDP}, 300 | }, 301 | Type: corev1.ServiceTypeNodePort, 302 | }, 303 | }, 304 | PersistentVolumeClaim: corev1.PersistentVolumeClaim{ 305 | ObjectMeta: metav1.ObjectMeta{ 306 | Name: "rust", 307 | }, 308 | Spec: corev1.PersistentVolumeClaimSpec{ 309 | AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, 310 | }, 311 | }, 312 | }, 313 | Minecraft: {Deployment: appsv1.Deployment{ 314 | Spec: appsv1.DeploymentSpec{ 315 | Replicas: func(val int32) *int32 { return &val }(1), 316 | Template: corev1.PodTemplateSpec{ 317 | ObjectMeta: metav1.ObjectMeta{}, 318 | Spec: corev1.PodSpec{ 319 | Volumes: []corev1.Volume{{ 320 | Name: "minecraft-data", 321 | VolumeSource: corev1.VolumeSource{ 322 | PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ 323 | ClaimName: "", // This gets set to server name (m.Name) 324 | }, 325 | }, 326 | }}, 327 | Containers: []corev1.Container{{ 328 | Name: "minecraft", 329 | Image: "itzg/minecraft-server:latest", 330 | Ports: []corev1.ContainerPort{ 331 | {ContainerPort: 25565, Protocol: corev1.ProtocolTCP}, 332 | {ContainerPort: 2375, Protocol: corev1.ProtocolTCP}, 333 | }, 334 | VolumeMounts: []corev1.VolumeMount{ 335 | {Name: "minecraft-data", MountPath: "/data"}, 336 | }, 337 | ImagePullPolicy: corev1.PullIfNotPresent, 338 | }}, 339 | }, 340 | }, 341 | }, 342 | }, 343 | Service: corev1.Service{ 344 | ObjectMeta: metav1.ObjectMeta{ 345 | Name: "minecraft", 346 | }, 347 | Spec: corev1.ServiceSpec{ 348 | Ports: []corev1.ServicePort{ 349 | {Name: "25565-tcp", Port: 25565, NodePort: 30565, TargetPort: intstr.IntOrString{Type: 0, IntVal: 25565, StrVal: ""}, Protocol: corev1.ProtocolTCP}, 350 | {Name: "25575-tcp", Port: 25575, NodePort: 30575, TargetPort: intstr.IntOrString{Type: 0, IntVal: 25575, StrVal: ""}, Protocol: corev1.ProtocolTCP}, 351 | }, 352 | Type: corev1.ServiceTypeNodePort, 353 | }, 354 | }, 355 | PersistentVolumeClaim: corev1.PersistentVolumeClaim{ 356 | ObjectMeta: metav1.ObjectMeta{ 357 | Name: "minecraft", 358 | }, 359 | Spec: corev1.PersistentVolumeClaimSpec{ 360 | AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, 361 | }, 362 | }, 363 | }, 364 | } 365 | ) 366 | -------------------------------------------------------------------------------- /api/v1alpha1/server_webhook.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021 Martin Heinz. 3 | */ 4 | 5 | package v1alpha1 6 | 7 | import ( 8 | corev1 "k8s.io/api/core/v1" 9 | apierrors "k8s.io/apimachinery/pkg/api/errors" 10 | "k8s.io/apimachinery/pkg/api/resource" 11 | "k8s.io/apimachinery/pkg/runtime" 12 | "k8s.io/apimachinery/pkg/runtime/schema" 13 | "k8s.io/apimachinery/pkg/util/validation/field" 14 | "reflect" 15 | ctrl "sigs.k8s.io/controller-runtime" 16 | logf "sigs.k8s.io/controller-runtime/pkg/log" 17 | "sigs.k8s.io/controller-runtime/pkg/webhook" 18 | ) 19 | 20 | // log is for logging in this package. 21 | var serverlog = logf.Log.WithName("server-resource") 22 | 23 | func (r *Server) SetupWebhookWithManager(mgr ctrl.Manager) error { 24 | return ctrl.NewWebhookManagedBy(mgr). 25 | For(r). 26 | Complete() 27 | } 28 | 29 | // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! 30 | 31 | // +kubebuilder:webhook:path=/mutate-gameserver-martinheinz-dev-v1alpha1-server,mutating=true,failurePolicy=Ignore,sideEffects=None,groups=gameserver.martinheinz.dev,resources=servers,verbs=create;update,versions=v1alpha1,name=mserver.kb.io,admissionReviewVersions={v1,v1beta1} 32 | 33 | var _ webhook.Defaulter = &Server{} 34 | 35 | // Default implements webhook.Defaulter so a webhook will be registered for the type 36 | func (r *Server) Default() { 37 | serverlog.Info("default", "name", r.Name) 38 | 39 | // Note: If not assignment is made here, then "webhook returned response.patchType but not response.patch" is thrown 40 | // Therefore failurePolicy=Ignore, otherwise no resource gets admitted 41 | if r.Spec.ResourceRequirements == nil { 42 | r.Spec.ResourceRequirements = &corev1.ResourceRequirements{ 43 | Requests: corev1.ResourceList{ 44 | corev1.ResourceCPU: resource.MustParse("128m"), 45 | corev1.ResourceMemory: resource.MustParse("64Mi"), 46 | }, 47 | Limits: corev1.ResourceList{ 48 | corev1.ResourceCPU: resource.MustParse("1"), 49 | corev1.ResourceMemory: resource.MustParse("1Gi"), 50 | }, 51 | } 52 | } 53 | 54 | if r.Spec.Ports == nil { 55 | r.Spec.Ports = Games[r.Spec.GameName].Service.Spec.Ports 56 | } 57 | } 58 | 59 | // +kubebuilder:webhook:path=/validate-gameserver-martinheinz-dev-v1alpha1-server,mutating=false,failurePolicy=fail,sideEffects=None,groups=gameserver.martinheinz.dev,resources=servers,verbs=create;update,versions=v1alpha1,name=vserver.kb.io,admissionReviewVersions={v1,v1beta1} 60 | 61 | var _ webhook.Validator = &Server{} 62 | 63 | // ValidateCreate implements webhook.Validator so a webhook will be registered for the type 64 | func (r *Server) ValidateCreate() error { 65 | serverlog.Info("validate create", "name", r.Name) 66 | var allErrs field.ErrorList 67 | 68 | // Validation logic on object creation 69 | if len(r.Spec.Config.From) == 0 { 70 | serverlog.Info("validate create - Environment configuration missing", "name", r.Name) 71 | allErrs = append(allErrs, field.Required(field.NewPath("spec").Child("envFrom"), "Environment configuration is required")) 72 | } else if r.Spec.Config.MountAs == File && r.Spec.Config.MountPath == "" { 73 | serverlog.Info("validate create - MountPath is required when MountAs: File is specified", "name", r.Name) 74 | allErrs = append(allErrs, field.Forbidden(field.NewPath("spec").Child("envFrom").Child("MountPath"), "MountPath is required when MountAs: File is specified")) 75 | } else { 76 | return nil 77 | } 78 | return apierrors.NewInvalid( 79 | schema.GroupKind{Group: "gameserver.martinheinz.dev", Kind: "Server"}, 80 | r.Name, allErrs) 81 | } 82 | 83 | // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type 84 | func (r *Server) ValidateUpdate(old runtime.Object) error { 85 | serverlog.Info("validate update", "name", r.Name) 86 | return r.enforceImmutability(old) 87 | } 88 | 89 | func (r *Server) enforceImmutability(old runtime.Object) error { 90 | var allErrs field.ErrorList 91 | errorMessage := "Field is immutable" 92 | oldServer := (old).(*Server) 93 | 94 | portsPath := field.NewPath("spec").Child("ports") 95 | if len(oldServer.Spec.Ports) != len(r.Spec.Ports) { 96 | allErrs = append(allErrs, field.Forbidden(portsPath, errorMessage)) 97 | } else if len(r.Spec.Ports) > 0 { 98 | for i, ports := range r.Spec.Ports { 99 | if ports.Port != oldServer.Spec.Ports[i].Port { 100 | allErrs = append(allErrs, field.Forbidden(portsPath.Child("port"), errorMessage)) 101 | } 102 | if ports.TargetPort != oldServer.Spec.Ports[i].TargetPort { 103 | allErrs = append(allErrs, field.Forbidden(portsPath.Child("targetPort"), errorMessage)) 104 | } 105 | } 106 | } 107 | 108 | if !reflect.DeepEqual(oldServer.Spec.Storage, r.Spec.Storage) { 109 | allErrs = append(allErrs, field.Forbidden(field.NewPath("spec").Child("storage"), errorMessage)) 110 | } 111 | 112 | if oldServer.Spec.GameName != r.Spec.GameName { 113 | allErrs = append(allErrs, field.Forbidden(field.NewPath("spec").Child("gameName"), errorMessage)) 114 | } 115 | 116 | if !reflect.DeepEqual(oldServer.Spec.Config.MountAs, r.Spec.Config.MountAs) { 117 | allErrs = append(allErrs, field.Forbidden(field. 118 | NewPath("spec"). 119 | Child("EnvFrom"). 120 | Child("MountAs"), errorMessage)) 121 | } 122 | if len(allErrs) == 0 { 123 | return nil 124 | } 125 | return apierrors.NewInvalid( 126 | schema.GroupKind{Group: "gameserver.martinheinz.dev", Kind: "Server"}, 127 | r.Name, allErrs) 128 | } 129 | 130 | // ValidateDelete implements webhook.Validator so a webhook will be registered for the type 131 | func (r *Server) ValidateDelete() error { 132 | serverlog.Info("validate delete", "name", r.Name) 133 | 134 | return nil 135 | } 136 | -------------------------------------------------------------------------------- /api/v1alpha1/webhook_suite_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021 Martin Heinz. 3 | */ 4 | 5 | package v1alpha1 6 | 7 | import ( 8 | "context" 9 | "crypto/tls" 10 | "fmt" 11 | "net" 12 | "path/filepath" 13 | "testing" 14 | "time" 15 | 16 | . "github.com/onsi/ginkgo" 17 | . "github.com/onsi/gomega" 18 | 19 | admissionv1beta1 "k8s.io/api/admission/v1beta1" 20 | // +kubebuilder:scaffold:imports 21 | "k8s.io/apimachinery/pkg/runtime" 22 | "k8s.io/client-go/rest" 23 | ctrl "sigs.k8s.io/controller-runtime" 24 | "sigs.k8s.io/controller-runtime/pkg/client" 25 | "sigs.k8s.io/controller-runtime/pkg/envtest" 26 | "sigs.k8s.io/controller-runtime/pkg/envtest/printer" 27 | logf "sigs.k8s.io/controller-runtime/pkg/log" 28 | "sigs.k8s.io/controller-runtime/pkg/log/zap" 29 | ) 30 | 31 | // These tests use Ginkgo (BDD-style Go testing framework). Refer to 32 | // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. 33 | 34 | var cfg *rest.Config 35 | var k8sClient client.Client 36 | var testEnv *envtest.Environment 37 | var ctx context.Context 38 | var cancel context.CancelFunc 39 | 40 | func TestAPIs(t *testing.T) { 41 | RegisterFailHandler(Fail) 42 | 43 | RunSpecsWithDefaultAndCustomReporters(t, 44 | "Webhook Suite", 45 | []Reporter{printer.NewlineReporter{}}) 46 | } 47 | 48 | var _ = BeforeSuite(func() { 49 | logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) 50 | 51 | ctx, cancel = context.WithCancel(context.TODO()) 52 | 53 | By("bootstrapping test environment") 54 | testEnv = &envtest.Environment{ 55 | CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, 56 | WebhookInstallOptions: envtest.WebhookInstallOptions{ 57 | Paths: []string{filepath.Join("..", "..", "config", "webhook")}, 58 | }, 59 | } 60 | 61 | cfg, err := testEnv.Start() 62 | Expect(err).NotTo(HaveOccurred()) 63 | Expect(cfg).NotTo(BeNil()) 64 | 65 | scheme := runtime.NewScheme() 66 | err = AddToScheme(scheme) 67 | Expect(err).NotTo(HaveOccurred()) 68 | 69 | err = admissionv1beta1.AddToScheme(scheme) 70 | Expect(err).NotTo(HaveOccurred()) 71 | 72 | // +kubebuilder:scaffold:scheme 73 | 74 | k8sClient, err = client.New(cfg, client.Options{Scheme: scheme}) 75 | Expect(err).NotTo(HaveOccurred()) 76 | Expect(k8sClient).NotTo(BeNil()) 77 | 78 | // start webhook server using Manager 79 | webhookInstallOptions := &testEnv.WebhookInstallOptions 80 | mgr, err := ctrl.NewManager(cfg, ctrl.Options{ 81 | Scheme: scheme, 82 | Host: webhookInstallOptions.LocalServingHost, 83 | Port: webhookInstallOptions.LocalServingPort, 84 | CertDir: webhookInstallOptions.LocalServingCertDir, 85 | LeaderElection: false, 86 | MetricsBindAddress: "0", 87 | }) 88 | Expect(err).NotTo(HaveOccurred()) 89 | 90 | err = (&Server{}).SetupWebhookWithManager(mgr) 91 | Expect(err).NotTo(HaveOccurred()) 92 | 93 | // +kubebuilder:scaffold:webhook 94 | 95 | go func() { 96 | err = mgr.Start(ctx) 97 | if err != nil { 98 | Expect(err).NotTo(HaveOccurred()) 99 | } 100 | }() 101 | 102 | // wait for the webhook server to get ready 103 | dialer := &net.Dialer{Timeout: time.Second} 104 | addrPort := fmt.Sprintf("%s:%d", webhookInstallOptions.LocalServingHost, webhookInstallOptions.LocalServingPort) 105 | Eventually(func() error { 106 | conn, err := tls.DialWithDialer(dialer, "tcp", addrPort, &tls.Config{InsecureSkipVerify: true}) 107 | if err != nil { 108 | return err 109 | } 110 | conn.Close() 111 | return nil 112 | }).Should(Succeed()) 113 | 114 | }, 60) 115 | 116 | var _ = AfterSuite(func() { 117 | cancel() 118 | By("tearing down the test environment") 119 | err := testEnv.Stop() 120 | Expect(err).NotTo(HaveOccurred()) 121 | }) 122 | -------------------------------------------------------------------------------- /api/v1alpha1/zz_generated.deepcopy.go: -------------------------------------------------------------------------------- 1 | // +build !ignore_autogenerated 2 | 3 | /* 4 | Copyright 2021 Martin Heinz. 5 | */ 6 | 7 | // Code generated by controller-gen. DO NOT EDIT. 8 | 9 | package v1alpha1 10 | 11 | import ( 12 | "k8s.io/api/core/v1" 13 | "k8s.io/apimachinery/pkg/runtime" 14 | ) 15 | 16 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 17 | func (in *Config) DeepCopyInto(out *Config) { 18 | *out = *in 19 | if in.From != nil { 20 | in, out := &in.From, &out.From 21 | *out = make([]v1.EnvFromSource, len(*in)) 22 | for i := range *in { 23 | (*in)[i].DeepCopyInto(&(*out)[i]) 24 | } 25 | } 26 | } 27 | 28 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config. 29 | func (in *Config) DeepCopy() *Config { 30 | if in == nil { 31 | return nil 32 | } 33 | out := new(Config) 34 | in.DeepCopyInto(out) 35 | return out 36 | } 37 | 38 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 39 | func (in *Server) DeepCopyInto(out *Server) { 40 | *out = *in 41 | out.TypeMeta = in.TypeMeta 42 | in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) 43 | in.Spec.DeepCopyInto(&out.Spec) 44 | in.Status.DeepCopyInto(&out.Status) 45 | } 46 | 47 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Server. 48 | func (in *Server) DeepCopy() *Server { 49 | if in == nil { 50 | return nil 51 | } 52 | out := new(Server) 53 | in.DeepCopyInto(out) 54 | return out 55 | } 56 | 57 | // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. 58 | func (in *Server) DeepCopyObject() runtime.Object { 59 | if c := in.DeepCopy(); c != nil { 60 | return c 61 | } 62 | return nil 63 | } 64 | 65 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 66 | func (in *ServerList) DeepCopyInto(out *ServerList) { 67 | *out = *in 68 | out.TypeMeta = in.TypeMeta 69 | in.ListMeta.DeepCopyInto(&out.ListMeta) 70 | if in.Items != nil { 71 | in, out := &in.Items, &out.Items 72 | *out = make([]Server, len(*in)) 73 | for i := range *in { 74 | (*in)[i].DeepCopyInto(&(*out)[i]) 75 | } 76 | } 77 | } 78 | 79 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerList. 80 | func (in *ServerList) DeepCopy() *ServerList { 81 | if in == nil { 82 | return nil 83 | } 84 | out := new(ServerList) 85 | in.DeepCopyInto(out) 86 | return out 87 | } 88 | 89 | // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. 90 | func (in *ServerList) DeepCopyObject() runtime.Object { 91 | if c := in.DeepCopy(); c != nil { 92 | return c 93 | } 94 | return nil 95 | } 96 | 97 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 98 | func (in *ServerSpec) DeepCopyInto(out *ServerSpec) { 99 | *out = *in 100 | if in.Ports != nil { 101 | in, out := &in.Ports, &out.Ports 102 | *out = make([]v1.ServicePort, len(*in)) 103 | for i := range *in { 104 | (*in)[i].DeepCopyInto(&(*out)[i]) 105 | } 106 | } 107 | in.Config.DeepCopyInto(&out.Config) 108 | if in.Storage != nil { 109 | in, out := &in.Storage, &out.Storage 110 | *out = new(ServerStorage) 111 | **out = **in 112 | } 113 | if in.ResourceRequirements != nil { 114 | in, out := &in.ResourceRequirements, &out.ResourceRequirements 115 | *out = new(v1.ResourceRequirements) 116 | (*in).DeepCopyInto(*out) 117 | } 118 | } 119 | 120 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerSpec. 121 | func (in *ServerSpec) DeepCopy() *ServerSpec { 122 | if in == nil { 123 | return nil 124 | } 125 | out := new(ServerSpec) 126 | in.DeepCopyInto(out) 127 | return out 128 | } 129 | 130 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 131 | func (in *ServerStatus) DeepCopyInto(out *ServerStatus) { 132 | *out = *in 133 | if in.Ports != nil { 134 | in, out := &in.Ports, &out.Ports 135 | *out = make([]int32, len(*in)) 136 | copy(*out, *in) 137 | } 138 | } 139 | 140 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerStatus. 141 | func (in *ServerStatus) DeepCopy() *ServerStatus { 142 | if in == nil { 143 | return nil 144 | } 145 | out := new(ServerStatus) 146 | in.DeepCopyInto(out) 147 | return out 148 | } 149 | 150 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 151 | func (in *ServerStorage) DeepCopyInto(out *ServerStorage) { 152 | *out = *in 153 | } 154 | 155 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerStorage. 156 | func (in *ServerStorage) DeepCopy() *ServerStorage { 157 | if in == nil { 158 | return nil 159 | } 160 | out := new(ServerStorage) 161 | in.DeepCopyInto(out) 162 | return out 163 | } 164 | -------------------------------------------------------------------------------- /config/certmanager/certificate.yaml: -------------------------------------------------------------------------------- 1 | # The following manifests contain a self-signed issuer CR and a certificate CR. 2 | # More document can be found at https://docs.cert-manager.io 3 | # WARNING: Targets CertManager v1.0. Check https://cert-manager.io/docs/installation/upgrading/ for breaking changes. 4 | apiVersion: cert-manager.io/v1 5 | kind: Issuer 6 | metadata: 7 | name: selfsigned-issuer 8 | namespace: system 9 | spec: 10 | selfSigned: {} 11 | --- 12 | apiVersion: cert-manager.io/v1 13 | kind: Certificate 14 | metadata: 15 | name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml 16 | namespace: system 17 | spec: 18 | # $(SERVICE_NAME) and $(SERVICE_NAMESPACE) will be substituted by kustomize 19 | dnsNames: 20 | - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc 21 | - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc.cluster.local 22 | issuerRef: 23 | kind: Issuer 24 | name: selfsigned-issuer 25 | secretName: webhook-server-cert # this secret will not be prefixed, since it's not managed by kustomize 26 | -------------------------------------------------------------------------------- /config/certmanager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - certificate.yaml 3 | 4 | configurations: 5 | - kustomizeconfig.yaml 6 | -------------------------------------------------------------------------------- /config/certmanager/kustomizeconfig.yaml: -------------------------------------------------------------------------------- 1 | # This configuration is for teaching kustomize how to update name ref and var substitution 2 | nameReference: 3 | - kind: Issuer 4 | group: cert-manager.io 5 | fieldSpecs: 6 | - kind: Certificate 7 | group: cert-manager.io 8 | path: spec/issuerRef/name 9 | 10 | varReference: 11 | - kind: Certificate 12 | group: cert-manager.io 13 | path: spec/commonName 14 | - kind: Certificate 15 | group: cert-manager.io 16 | path: spec/dnsNames 17 | -------------------------------------------------------------------------------- /config/crd/bases/gameserver.martinheinz.dev_servers.yaml: -------------------------------------------------------------------------------- 1 | 2 | --- 3 | apiVersion: apiextensions.k8s.io/v1 4 | kind: CustomResourceDefinition 5 | metadata: 6 | annotations: 7 | controller-gen.kubebuilder.io/version: v0.4.1 8 | creationTimestamp: null 9 | name: servers.gameserver.martinheinz.dev 10 | spec: 11 | group: gameserver.martinheinz.dev 12 | names: 13 | kind: Server 14 | listKind: ServerList 15 | plural: servers 16 | singular: server 17 | scope: Namespaced 18 | versions: 19 | - additionalPrinterColumns: 20 | - description: Status of deployment 21 | jsonPath: .status.status 22 | name: status 23 | type: string 24 | - description: Status of the reconcile condition 25 | jsonPath: .status.storage 26 | name: Storage 27 | type: string 28 | - description: Age of the resource 29 | jsonPath: .metadata.creationTimestamp 30 | name: Age 31 | type: date 32 | - description: List of available server NodePorts 33 | jsonPath: .status.ports 34 | name: Ports 35 | priority: 10 36 | type: string 37 | name: v1alpha1 38 | schema: 39 | openAPIV3Schema: 40 | description: Server is the Schema for the servers API 41 | properties: 42 | apiVersion: 43 | description: 'APIVersion defines the versioned schema of this representation 44 | of an object. Servers should convert recognized schemas to the latest 45 | internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' 46 | type: string 47 | kind: 48 | description: 'Kind is a string value representing the REST resource this 49 | object represents. Servers may infer this from the endpoint the client 50 | submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' 51 | type: string 52 | metadata: 53 | type: object 54 | spec: 55 | description: ServerSpec defines the desired state of Server 56 | properties: 57 | config: 58 | properties: 59 | from: 60 | items: 61 | description: EnvFromSource represents the source of a set of 62 | ConfigMaps 63 | properties: 64 | configMapRef: 65 | description: The ConfigMap to select from 66 | properties: 67 | name: 68 | description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names 69 | TODO: Add other useful fields. apiVersion, kind, uid?' 70 | type: string 71 | optional: 72 | description: Specify whether the ConfigMap must be defined 73 | type: boolean 74 | type: object 75 | prefix: 76 | description: An optional identifier to prepend to each key 77 | in the ConfigMap. Must be a C_IDENTIFIER. 78 | type: string 79 | secretRef: 80 | description: The Secret to select from 81 | properties: 82 | name: 83 | description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names 84 | TODO: Add other useful fields. apiVersion, kind, uid?' 85 | type: string 86 | optional: 87 | description: Specify whether the Secret must be defined 88 | type: boolean 89 | type: object 90 | type: object 91 | type: array 92 | x-kubernetes-list-type: atomic 93 | mountAs: 94 | default: Env 95 | enum: 96 | - File 97 | - Env 98 | type: string 99 | mountPath: 100 | type: string 101 | type: object 102 | gameName: 103 | enum: 104 | - CSGO 105 | - Factorio 106 | - Rust 107 | - Minecraft 108 | type: string 109 | port: 110 | items: 111 | description: ServicePort contains information on service's port. 112 | properties: 113 | appProtocol: 114 | description: The application protocol for this port. This field 115 | follows standard Kubernetes label syntax. Un-prefixed names 116 | are reserved for IANA standard service names (as per RFC-6335 117 | and http://www.iana.org/assignments/service-names). Non-standard 118 | protocols should use prefixed names such as mycompany.com/my-custom-protocol. 119 | This is a beta field that is guarded by the ServiceAppProtocol 120 | feature gate and enabled by default. 121 | type: string 122 | name: 123 | description: The name of this port within the service. This 124 | must be a DNS_LABEL. All ports within a ServiceSpec must have 125 | unique names. When considering the endpoints for a Service, 126 | this must match the 'name' field in the EndpointPort. Optional 127 | if only one ServicePort is defined on this service. 128 | type: string 129 | nodePort: 130 | description: 'The port on each node on which this service is 131 | exposed when type=NodePort or LoadBalancer. Usually assigned 132 | by the system. If specified, it will be allocated to the service 133 | if unused or else creation of the service will fail. Default 134 | is to auto-allocate a port if the ServiceType of this Service 135 | requires one. More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport' 136 | format: int32 137 | type: integer 138 | port: 139 | description: The port that will be exposed by this service. 140 | format: int32 141 | type: integer 142 | protocol: 143 | default: TCP 144 | description: The IP protocol for this port. Supports "TCP", 145 | "UDP", and "SCTP". Default is TCP. 146 | type: string 147 | targetPort: 148 | anyOf: 149 | - type: integer 150 | - type: string 151 | description: 'Number or name of the port to access on the pods 152 | targeted by the service. Number must be in the range 1 to 153 | 65535. Name must be an IANA_SVC_NAME. If this is a string, 154 | it will be looked up as a named port in the target Pod''s 155 | container ports. If this is not specified, the value of the 156 | ''port'' field is used (an identity map). This field is ignored 157 | for services with clusterIP=None, and should be omitted or 158 | set equal to the ''port'' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service' 159 | x-kubernetes-int-or-string: true 160 | required: 161 | - port 162 | type: object 163 | type: array 164 | x-kubernetes-list-type: atomic 165 | resources: 166 | description: ResourceRequirements describes the compute resource requirements. 167 | properties: 168 | limits: 169 | additionalProperties: 170 | anyOf: 171 | - type: integer 172 | - type: string 173 | pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ 174 | x-kubernetes-int-or-string: true 175 | description: 'Limits describes the maximum amount of compute resources 176 | allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' 177 | type: object 178 | requests: 179 | additionalProperties: 180 | anyOf: 181 | - type: integer 182 | - type: string 183 | pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ 184 | x-kubernetes-int-or-string: true 185 | description: 'Requests describes the minimum amount of compute 186 | resources required. If Requests is omitted for a container, 187 | it defaults to Limits if that is explicitly specified, otherwise 188 | to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/' 189 | type: object 190 | type: object 191 | storage: 192 | description: ServerStorage ... 193 | properties: 194 | size: 195 | pattern: ^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$ 196 | type: string 197 | type: object 198 | required: 199 | - gameName 200 | type: object 201 | status: 202 | description: ServerStatus defines the observed state of Server 203 | properties: 204 | ports: 205 | description: List of available server NodePorts 206 | items: 207 | format: int32 208 | type: integer 209 | type: array 210 | status: 211 | description: Describes whether there are running pods for this server 212 | enum: 213 | - Active 214 | - Inactive 215 | type: string 216 | storage: 217 | description: Describes whether storage for server is ready 218 | enum: 219 | - Bound 220 | - Pending 221 | type: string 222 | type: object 223 | type: object 224 | served: true 225 | storage: true 226 | subresources: 227 | status: {} 228 | status: 229 | acceptedNames: 230 | kind: "" 231 | plural: "" 232 | conditions: [] 233 | storedVersions: [] 234 | -------------------------------------------------------------------------------- /config/crd/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # This kustomization.yaml is not intended to be run by itself, 2 | # since it depends on service name and namespace that are out of this kustomize package. 3 | # It should be run by config/default 4 | resources: 5 | - bases/gameserver.martinheinz.dev_servers.yaml 6 | # +kubebuilder:scaffold:crdkustomizeresource 7 | 8 | patchesStrategicMerge: 9 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. 10 | # patches here are for enabling the conversion webhook for each CRD 11 | #- patches/webhook_in_servers.yaml 12 | # +kubebuilder:scaffold:crdkustomizewebhookpatch 13 | 14 | # [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix. 15 | # patches here are for enabling the CA injection for each CRD 16 | #- patches/cainjection_in_servers.yaml 17 | # +kubebuilder:scaffold:crdkustomizecainjectionpatch 18 | 19 | # the following config is for teaching kustomize how to do kustomization for CRDs. 20 | configurations: 21 | - kustomizeconfig.yaml 22 | -------------------------------------------------------------------------------- /config/crd/kustomizeconfig.yaml: -------------------------------------------------------------------------------- 1 | # This file is for teaching kustomize how to substitute name and namespace reference in CRD 2 | nameReference: 3 | - kind: Service 4 | version: v1 5 | fieldSpecs: 6 | - kind: CustomResourceDefinition 7 | version: v1 8 | group: apiextensions.k8s.io 9 | path: spec/conversion/webhook/clientConfig/service/name 10 | 11 | namespace: 12 | - kind: CustomResourceDefinition 13 | version: v1 14 | group: apiextensions.k8s.io 15 | path: spec/conversion/webhook/clientConfig/service/namespace 16 | create: false 17 | 18 | varReference: 19 | - path: metadata/annotations 20 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_servers.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 7 | name: servers.gameserver.martinheinz.dev 8 | -------------------------------------------------------------------------------- /config/crd/patches/webhook_in_servers.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables a conversion webhook for the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | name: servers.gameserver.martinheinz.dev 6 | spec: 7 | conversion: 8 | strategy: Webhook 9 | webhook: 10 | clientConfig: 11 | service: 12 | namespace: system 13 | name: webhook-service 14 | path: /convert 15 | -------------------------------------------------------------------------------- /config/default/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # Adds namespace to all resources. 2 | namespace: game-server-operator-system 3 | 4 | # Value of this field is prepended to the 5 | # names of all resources, e.g. a deployment named 6 | # "wordpress" becomes "alices-wordpress". 7 | # Note that it should also match with the prefix (text before '-') of the namespace 8 | # field above. 9 | namePrefix: game-server-operator- 10 | 11 | # Labels to add to all resources and selectors. 12 | #commonLabels: 13 | # someName: someValue 14 | 15 | bases: 16 | - ../crd 17 | - ../rbac 18 | - ../manager 19 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in 20 | # crd/kustomization.yaml 21 | - ../webhook 22 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. 23 | - ../certmanager 24 | # [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. 25 | #- ../prometheus 26 | 27 | patchesStrategicMerge: 28 | # Protect the /metrics endpoint by putting it behind auth. 29 | # If you want your controller-manager to expose the /metrics 30 | # endpoint w/o any authn/z, please comment the following line. 31 | - manager_auth_proxy_patch.yaml 32 | 33 | # Mount the controller config file for loading manager configurations 34 | # through a ComponentConfig type 35 | #- manager_config_patch.yaml 36 | 37 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in 38 | # crd/kustomization.yaml 39 | - manager_webhook_patch.yaml 40 | 41 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 42 | # Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. 43 | # 'CERTMANAGER' needs to be enabled to use ca injection 44 | - webhookcainjection_patch.yaml 45 | 46 | # the following config is for teaching kustomize how to do var substitution 47 | vars: 48 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. 49 | - name: CERTIFICATE_NAMESPACE # namespace of the certificate CR 50 | objref: 51 | kind: Certificate 52 | group: cert-manager.io 53 | version: v1 54 | name: serving-cert # this name should match the one in certificate.yaml 55 | fieldref: 56 | fieldpath: metadata.namespace 57 | - name: CERTIFICATE_NAME 58 | objref: 59 | kind: Certificate 60 | group: cert-manager.io 61 | version: v1 62 | name: serving-cert # this name should match the one in certificate.yaml 63 | - name: SERVICE_NAMESPACE # namespace of the service 64 | objref: 65 | kind: Service 66 | version: v1 67 | name: webhook-service 68 | fieldref: 69 | fieldpath: metadata.namespace 70 | - name: SERVICE_NAME 71 | objref: 72 | kind: Service 73 | version: v1 74 | name: webhook-service 75 | -------------------------------------------------------------------------------- /config/default/manager_auth_proxy_patch.yaml: -------------------------------------------------------------------------------- 1 | # This patch inject a sidecar container which is a HTTP proxy for the 2 | # controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. 3 | apiVersion: apps/v1 4 | kind: Deployment 5 | metadata: 6 | name: controller-manager 7 | namespace: system 8 | spec: 9 | template: 10 | spec: 11 | containers: 12 | - name: kube-rbac-proxy 13 | image: gcr.io/kubebuilder/kube-rbac-proxy:v0.5.0 14 | args: 15 | - "--secure-listen-address=0.0.0.0:8443" 16 | - "--upstream=http://127.0.0.1:8080/" 17 | - "--logtostderr=true" 18 | - "--v=10" 19 | ports: 20 | - containerPort: 8443 21 | name: https 22 | - name: manager 23 | args: 24 | - "--health-probe-bind-address=:8081" 25 | - "--metrics-bind-address=127.0.0.1:8080" 26 | - "--leader-elect" 27 | -------------------------------------------------------------------------------- /config/default/manager_config_patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: controller-manager 5 | namespace: system 6 | spec: 7 | template: 8 | spec: 9 | containers: 10 | - name: manager 11 | args: 12 | - "--config=controller_manager_config.yaml" 13 | volumeMounts: 14 | - name: manager-config 15 | mountPath: /controller_manager_config.yaml 16 | subPath: controller_manager_config.yaml 17 | volumes: 18 | - name: manager-config 19 | configMap: 20 | name: manager-config 21 | -------------------------------------------------------------------------------- /config/default/manager_webhook_patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: controller-manager 5 | namespace: system 6 | spec: 7 | template: 8 | spec: 9 | containers: 10 | - name: manager 11 | ports: 12 | - containerPort: 9443 13 | name: webhook-server 14 | protocol: TCP 15 | volumeMounts: 16 | - mountPath: /tmp/k8s-webhook-server/serving-certs 17 | name: cert 18 | readOnly: true 19 | volumes: 20 | - name: cert 21 | secret: 22 | defaultMode: 420 23 | secretName: webhook-server-cert 24 | -------------------------------------------------------------------------------- /config/default/webhookcainjection_patch.yaml: -------------------------------------------------------------------------------- 1 | # This patch add annotation to admission webhook config and 2 | # the variables $(CERTIFICATE_NAMESPACE) and $(CERTIFICATE_NAME) will be substituted by kustomize. 3 | apiVersion: admissionregistration.k8s.io/v1 4 | kind: MutatingWebhookConfiguration 5 | metadata: 6 | name: mutating-webhook-configuration 7 | annotations: 8 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 9 | --- 10 | apiVersion: admissionregistration.k8s.io/v1 11 | kind: ValidatingWebhookConfiguration 12 | metadata: 13 | name: validating-webhook-configuration 14 | annotations: 15 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 16 | -------------------------------------------------------------------------------- /config/kind/kind-config.yaml: -------------------------------------------------------------------------------- 1 | kind: Cluster 2 | apiVersion: kind.x-k8s.io/v1alpha4 3 | nodes: 4 | - role: control-plane 5 | kubeadmConfigPatches: 6 | - | 7 | kind: InitConfiguration 8 | nodeRegistration: 9 | kubeletExtraArgs: 10 | node-labels: "ingress-ready=true" 11 | extraMounts: 12 | - hostPath: ./shared-storage 13 | containerPath: /var/local-path-provisioner 14 | extraPortMappings: 15 | - containerPort: 80 16 | hostPort: 80 17 | protocol: TCP 18 | - containerPort: 443 19 | hostPort: 443 20 | protocol: TCP 21 | # If more nodes are needed, then refer to https://github.com/kubernetes-sigs/kind/issues/1660#issuecomment-641354564 -------------------------------------------------------------------------------- /config/manager/controller_manager_config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: controller-runtime.sigs.k8s.io/v1alpha1 2 | kind: ControllerManagerConfig 3 | health: 4 | healthProbeBindAddress: :8081 5 | metrics: 6 | bindAddress: 127.0.0.1:8080 7 | webhook: 8 | port: 9443 9 | leaderElection: 10 | leaderElect: true 11 | resourceName: 0fb7271b.martinheinz.dev 12 | -------------------------------------------------------------------------------- /config/manager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - manager.yaml 3 | 4 | generatorOptions: 5 | disableNameSuffixHash: true 6 | 7 | configMapGenerator: 8 | - files: 9 | - controller_manager_config.yaml 10 | name: manager-config 11 | apiVersion: kustomize.config.k8s.io/v1beta1 12 | kind: Kustomization 13 | images: 14 | - name: controller 15 | newName: docker.io/martinheinz/game-server-operator 16 | newTag: latest 17 | -------------------------------------------------------------------------------- /config/manager/manager.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | labels: 5 | control-plane: controller-manager 6 | name: system 7 | --- 8 | apiVersion: apps/v1 9 | kind: Deployment 10 | metadata: 11 | name: controller-manager 12 | namespace: system 13 | labels: 14 | control-plane: controller-manager 15 | spec: 16 | selector: 17 | matchLabels: 18 | control-plane: controller-manager 19 | replicas: 1 20 | template: 21 | metadata: 22 | labels: 23 | control-plane: controller-manager 24 | spec: 25 | securityContext: 26 | runAsUser: 65532 27 | containers: 28 | - command: 29 | - /manager 30 | args: 31 | - --leader-elect 32 | image: controller:latest 33 | imagePullPolicy: Always 34 | name: manager 35 | securityContext: 36 | allowPrivilegeEscalation: false 37 | livenessProbe: 38 | httpGet: 39 | path: /healthz 40 | port: 8081 41 | initialDelaySeconds: 15 42 | periodSeconds: 20 43 | readinessProbe: 44 | httpGet: 45 | path: /readyz 46 | port: 8081 47 | initialDelaySeconds: 5 48 | periodSeconds: 10 49 | resources: 50 | limits: 51 | cpu: 100m 52 | memory: 30Mi 53 | requests: 54 | cpu: 100m 55 | memory: 20Mi 56 | terminationGracePeriodSeconds: 10 57 | -------------------------------------------------------------------------------- /config/prometheus/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - monitor.yaml 3 | -------------------------------------------------------------------------------- /config/prometheus/monitor.yaml: -------------------------------------------------------------------------------- 1 | 2 | # Prometheus Monitor Service (Metrics) 3 | apiVersion: monitoring.coreos.com/v1 4 | kind: ServiceMonitor 5 | metadata: 6 | labels: 7 | control-plane: controller-manager 8 | name: controller-manager-metrics-monitor 9 | namespace: system 10 | spec: 11 | endpoints: 12 | - path: /metrics 13 | port: https 14 | selector: 15 | matchLabels: 16 | control-plane: controller-manager 17 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_client_clusterrole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: metrics-reader 5 | rules: 6 | - nonResourceURLs: ["/metrics"] 7 | verbs: ["get"] 8 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: proxy-role 5 | rules: 6 | - apiGroups: ["authentication.k8s.io"] 7 | resources: 8 | - tokenreviews 9 | verbs: ["create"] 10 | - apiGroups: ["authorization.k8s.io"] 11 | resources: 12 | - subjectaccessreviews 13 | verbs: ["create"] 14 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: proxy-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: proxy-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: default 12 | namespace: system 13 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | control-plane: controller-manager 6 | name: controller-manager-metrics-service 7 | namespace: system 8 | spec: 9 | ports: 10 | - name: https 11 | port: 8443 12 | targetPort: https 13 | selector: 14 | control-plane: controller-manager 15 | -------------------------------------------------------------------------------- /config/rbac/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - role.yaml 3 | - role_binding.yaml 4 | - leader_election_role.yaml 5 | - leader_election_role_binding.yaml 6 | # Comment the following 4 lines if you want to disable 7 | # the auth proxy (https://github.com/brancz/kube-rbac-proxy) 8 | # which protects your /metrics endpoint. 9 | - auth_proxy_service.yaml 10 | - auth_proxy_role.yaml 11 | - auth_proxy_role_binding.yaml 12 | - auth_proxy_client_clusterrole.yaml 13 | -------------------------------------------------------------------------------- /config/rbac/leader_election_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions to do leader election. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | name: leader-election-role 6 | rules: 7 | - apiGroups: 8 | - "" 9 | - coordination.k8s.io 10 | resources: 11 | - configmaps 12 | - leases 13 | verbs: 14 | - get 15 | - list 16 | - watch 17 | - create 18 | - update 19 | - patch 20 | - delete 21 | - apiGroups: 22 | - "" 23 | resources: 24 | - events 25 | verbs: 26 | - create 27 | - patch 28 | -------------------------------------------------------------------------------- /config/rbac/leader_election_role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: leader-election-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: Role 8 | name: leader-election-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: default 12 | namespace: system 13 | -------------------------------------------------------------------------------- /config/rbac/role.yaml: -------------------------------------------------------------------------------- 1 | 2 | --- 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | kind: ClusterRole 5 | metadata: 6 | creationTimestamp: null 7 | name: manager-role 8 | rules: 9 | - apiGroups: 10 | - apps 11 | resources: 12 | - deployments 13 | verbs: 14 | - create 15 | - delete 16 | - get 17 | - list 18 | - patch 19 | - update 20 | - watch 21 | - apiGroups: 22 | - apps 23 | resources: 24 | - deployments 25 | - services 26 | verbs: 27 | - create 28 | - delete 29 | - get 30 | - list 31 | - patch 32 | - update 33 | - watch 34 | - apiGroups: 35 | - "" 36 | resources: 37 | - persistentvolumeclaims 38 | - services 39 | verbs: 40 | - create 41 | - delete 42 | - get 43 | - list 44 | - patch 45 | - update 46 | - watch 47 | - apiGroups: 48 | - "" 49 | resources: 50 | - pods 51 | verbs: 52 | - get 53 | - list 54 | - watch 55 | - apiGroups: 56 | - gameserver.martinheinz.dev 57 | resources: 58 | - servers 59 | verbs: 60 | - create 61 | - delete 62 | - get 63 | - list 64 | - patch 65 | - update 66 | - watch 67 | - apiGroups: 68 | - gameserver.martinheinz.dev 69 | resources: 70 | - servers/finalizers 71 | verbs: 72 | - update 73 | - apiGroups: 74 | - gameserver.martinheinz.dev 75 | resources: 76 | - servers/status 77 | verbs: 78 | - get 79 | - patch 80 | - update 81 | -------------------------------------------------------------------------------- /config/rbac/role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: manager-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: manager-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: default 12 | namespace: system 13 | -------------------------------------------------------------------------------- /config/rbac/server_editor_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to edit servers. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: server-editor-role 6 | rules: 7 | - apiGroups: 8 | - gameserver.martinheinz.dev 9 | resources: 10 | - servers 11 | verbs: 12 | - create 13 | - delete 14 | - get 15 | - list 16 | - patch 17 | - update 18 | - watch 19 | - apiGroups: 20 | - gameserver.martinheinz.dev 21 | resources: 22 | - servers/status 23 | verbs: 24 | - get 25 | -------------------------------------------------------------------------------- /config/rbac/server_viewer_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to view servers. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: server-viewer-role 6 | rules: 7 | - apiGroups: 8 | - gameserver.martinheinz.dev 9 | resources: 10 | - servers 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - apiGroups: 16 | - gameserver.martinheinz.dev 17 | resources: 18 | - servers/status 19 | verbs: 20 | - get 21 | -------------------------------------------------------------------------------- /config/samples/csgo.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: csgo 5 | namespace: default 6 | data: 7 | RCON_PASSWORD: cmNvbnBhc3N3b3Jk 8 | SERVER_PASSWORD: "" 9 | STEAM_ACCOUNT: Q0VFMTVDNjk4RDNGODUxOUM2OTRDMEU3NzREOTdDREE= 10 | type: Opaque 11 | --- 12 | apiVersion: v1 13 | kind: ConfigMap 14 | metadata: 15 | name: csgo 16 | namespace: default 17 | data: 18 | SERVER_HOSTNAME: csgo.default.svc.cluster.local 19 | LAN: "1" 20 | --- 21 | apiVersion: gameserver.martinheinz.dev/v1alpha1 22 | kind: Server 23 | metadata: 24 | name: csgo 25 | spec: 26 | gameName: "CSGO" 27 | config: 28 | from: 29 | - configMapRef: 30 | name: csgo 31 | - secretRef: 32 | name: csgo 33 | storage: 34 | size: 12Gi 35 | resources: 36 | requests: 37 | memory: "64Mi" 38 | cpu: "250m" 39 | limits: 40 | memory: "1Gi" 41 | cpu: "2" 42 | -------------------------------------------------------------------------------- /config/samples/factorio.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: factorio 5 | namespace: default 6 | data: 7 | rconpw: cmNvbnB3 # Project this into /factorio/config/rconpw 8 | type: Opaque 9 | --- 10 | apiVersion: v1 11 | kind: ConfigMap 12 | metadata: 13 | name: factorio 14 | namespace: default 15 | data: 16 | server-whitelist.json: '["you", "friend"]' 17 | map-gen-settings.json: > 18 | { 19 | "_terrain_segmentation_comment": "The inverse of 'water scale' in the map generator GUI.", 20 | "terrain_segmentation": 1, 21 | 22 | "_water_comment": 23 | [ 24 | "The equivalent to 'water coverage' in the map generator GUI. Higher coverage means more water in larger oceans.", 25 | "Water level = 10 * log2(this value)" 26 | ], 27 | "water": 1, 28 | 29 | "_comment_width+height": "Width and height of map, in tiles; 0 means infinite", 30 | "width": 0, 31 | "height": 0, 32 | 33 | "_starting_area_comment": "Multiplier for 'biter free zone radius'", 34 | "starting_area": 1, 35 | 36 | "peaceful_mode": false, 37 | "autoplace_controls": 38 | { 39 | "coal": {"frequency": 1, "size": 1, "richness": 1}, 40 | "stone": {"frequency": 1, "size": 1, "richness": 1}, 41 | "copper-ore": {"frequency": 1, "size": 1,"richness": 1}, 42 | "iron-ore": {"frequency": 1, "size": 1, "richness": 1}, 43 | "uranium-ore": {"frequency": 1, "size": 1, "richness": 1}, 44 | "crude-oil": {"frequency": 1, "size": 1, "richness": 1}, 45 | "trees": {"frequency": 1, "size": 1, "richness": 1}, 46 | "enemy-base": {"frequency": 1, "size": 1, "richness": 1} 47 | }, 48 | 49 | "cliff_settings": 50 | { 51 | "_name_comment": "Name of the cliff prototype", 52 | "name": "cliff", 53 | 54 | "_cliff_elevation_0_comment": "Elevation of first row of cliffs", 55 | "cliff_elevation_0": 10, 56 | 57 | "_cliff_elevation_interval_comment": 58 | [ 59 | "Elevation difference between successive rows of cliffs.", 60 | "This is inversely proportional to 'frequency' in the map generation GUI. Specifically, when set from the GUI the value is 40 / frequency." 61 | ], 62 | "cliff_elevation_interval": 40, 63 | 64 | "_richness_comment": "Called 'cliff continuity' in the map generator GUI. 0 will result in no cliffs, 10 will make all cliff rows completely solid", 65 | "richness": 1 66 | }, 67 | 68 | "_property_expression_names_comment": 69 | [ 70 | "Overrides for property value generators (map type)", 71 | "Leave 'elevation' blank to get 'normal' terrain.", 72 | "Use 'elevation': '0_16-elevation' to reproduce terrain from 0.16.", 73 | "Use 'elevation': '0_17-island' to get an island.", 74 | "Moisture and terrain type are also controlled via this.", 75 | "'control-setting:moisture:frequency:multiplier' is the inverse of the 'moisture scale' in the map generator GUI.", 76 | "'control-setting:moisture:bias' is the 'moisture bias' in the map generator GUI.", 77 | "'control-setting:aux:frequency:multiplier' is the inverse of the 'terrain type scale' in the map generator GUI.", 78 | "'control-setting:aux:bias' is the 'terrain type bias' in the map generator GUI." 79 | ], 80 | "property_expression_names": 81 | { 82 | "control-setting:moisture:frequency:multiplier": "1", 83 | "control-setting:moisture:bias": "0", 84 | "control-setting:aux:frequency:multiplier": "1", 85 | "control-setting:aux:bias": "0" 86 | }, 87 | 88 | "starting_points": 89 | [ 90 | { "x": 0, "y": 0} 91 | ], 92 | 93 | "_seed_comment": "Use null for a random seed, number for a specific seed.", 94 | "seed": null 95 | } 96 | map-settings.json: > 97 | { 98 | "difficulty_settings": 99 | { 100 | "recipe_difficulty": 0, 101 | "technology_difficulty": 0, 102 | "technology_price_multiplier": 1, 103 | "research_queue_setting": "after-victory" 104 | }, 105 | "pollution": 106 | { 107 | "enabled": true, 108 | "_comment_min_to_diffuse_1": "these are values for 60 ticks (1 simulated second)", 109 | "_comment_min_to_diffuse_2": "amount that is diffused to neighboring chunk", 110 | "diffusion_ratio": 0.02, 111 | "min_to_diffuse": 15, 112 | "ageing": 1, 113 | "expected_max_per_chunk": 150, 114 | "min_to_show_per_chunk": 50, 115 | "min_pollution_to_damage_trees": 60, 116 | "pollution_with_max_forest_damage": 150, 117 | "pollution_per_tree_damage": 50, 118 | "pollution_restored_per_tree_damage": 10, 119 | "max_pollution_to_restore_trees": 20, 120 | "enemy_attack_pollution_consumption_modifier": 1 121 | }, 122 | "enemy_evolution": 123 | { 124 | "enabled": true, 125 | "time_factor": 0.000004, 126 | "destroy_factor": 0.002, 127 | "pollution_factor": 0.0000009 128 | }, 129 | "enemy_expansion": 130 | { 131 | "enabled": true, 132 | "min_base_spacing": 3, 133 | "max_expansion_distance": 7, 134 | "friendly_base_influence_radius": 2, 135 | "enemy_building_influence_radius": 2, 136 | "building_coefficient": 0.1, 137 | "other_base_coefficient": 2.0, 138 | "neighbouring_chunk_coefficient": 0.5, 139 | "neighbouring_base_chunk_coefficient": 0.4, 140 | "max_colliding_tiles_coefficient": 0.9, 141 | "settler_group_min_size": 5, 142 | "settler_group_max_size": 20, 143 | "min_expansion_cooldown": 14400, 144 | "max_expansion_cooldown": 216000 145 | }, 146 | "unit_group": 147 | { 148 | "min_group_gathering_time": 3600, 149 | "max_group_gathering_time": 36000, 150 | "max_wait_time_for_late_members": 7200, 151 | "max_group_radius": 30.0, 152 | "min_group_radius": 5.0, 153 | "max_member_speedup_when_behind": 1.4, 154 | "max_member_slowdown_when_ahead": 0.6, 155 | "max_group_slowdown_factor": 0.3, 156 | "max_group_member_fallback_factor": 3, 157 | "member_disown_distance": 10, 158 | "tick_tolerance_when_member_arrives": 60, 159 | "max_gathering_unit_groups": 30, 160 | "max_unit_group_size": 200 161 | }, 162 | "steering": 163 | { 164 | "default": 165 | { 166 | "radius": 1.2, 167 | "separation_force": 0.005, 168 | "separation_factor": 1.2, 169 | "force_unit_fuzzy_goto_behavior": false 170 | }, 171 | "moving": 172 | { 173 | "radius": 3, 174 | "separation_force": 0.01, 175 | "separation_factor": 3, 176 | "force_unit_fuzzy_goto_behavior": false 177 | } 178 | }, 179 | "path_finder": 180 | { 181 | "fwd2bwd_ratio": 5, 182 | "goal_pressure_ratio": 2, 183 | "max_steps_worked_per_tick": 100, 184 | "max_work_done_per_tick": 8000, 185 | "use_path_cache": true, 186 | "short_cache_size": 5, 187 | "long_cache_size": 25, 188 | "short_cache_min_cacheable_distance": 10, 189 | "short_cache_min_algo_steps_to_cache": 50, 190 | "long_cache_min_cacheable_distance": 30, 191 | "cache_max_connect_to_cache_steps_multiplier": 100, 192 | "cache_accept_path_start_distance_ratio": 0.2, 193 | "cache_accept_path_end_distance_ratio": 0.15, 194 | "negative_cache_accept_path_start_distance_ratio": 0.3, 195 | "negative_cache_accept_path_end_distance_ratio": 0.3, 196 | "cache_path_start_distance_rating_multiplier": 10, 197 | "cache_path_end_distance_rating_multiplier": 20, 198 | "stale_enemy_with_same_destination_collision_penalty": 30, 199 | "ignore_moving_enemy_collision_distance": 5, 200 | "enemy_with_different_destination_collision_penalty": 30, 201 | "general_entity_collision_penalty": 10, 202 | "general_entity_subsequent_collision_penalty": 3, 203 | "extended_collision_penalty": 3, 204 | "max_clients_to_accept_any_new_request": 10, 205 | "max_clients_to_accept_short_new_request": 100, 206 | "direct_distance_to_consider_short_request": 100, 207 | "short_request_max_steps": 1000, 208 | "short_request_ratio": 0.5, 209 | "min_steps_to_check_path_find_termination": 2000, 210 | "start_to_goal_cost_multiplier_to_terminate_path_find": 500.0, 211 | "overload_levels": [0, 100, 500], 212 | "overload_multipliers": [2, 3, 4], 213 | "negative_path_cache_delay_interval": 20 214 | }, 215 | "max_failed_behavior_count": 3 216 | } 217 | server-settings.json: > 218 | { 219 | "name": "Name of the game as it will appear in the game listing", 220 | "description": "Description of the game that will appear in the listing", 221 | "tags": ["game", "tags"], 222 | 223 | "_comment_max_players": "Maximum number of players allowed, admins can join even a full server. 0 means unlimited.", 224 | "max_players": 0, 225 | 226 | "_comment_visibility": ["public: Game will be published on the official Factorio matching server", 227 | "lan: Game will be broadcast on LAN"], 228 | "visibility": 229 | { 230 | "public": true, 231 | "lan": true 232 | }, 233 | 234 | "_comment_credentials": "Your factorio.com login credentials. Required for games with visibility public", 235 | "username": "", 236 | "password": "", 237 | 238 | "_comment_token": "Authentication token. May be used instead of 'password' above.", 239 | "token": "", 240 | 241 | "game_password": "", 242 | 243 | "_comment_require_user_verification": "When set to true, the server will only allow clients that have a valid Factorio.com account", 244 | "require_user_verification": true, 245 | 246 | "_comment_max_upload_in_kilobytes_per_second" : "optional, default value is 0. 0 means unlimited.", 247 | "max_upload_in_kilobytes_per_second": 0, 248 | 249 | "_comment_max_upload_slots" : "optional, default value is 5. 0 means unlimited.", 250 | "max_upload_slots": 5, 251 | 252 | "_comment_minimum_latency_in_ticks": "optional one tick is 16ms in default speed, default value is 0. 0 means no minimum.", 253 | "minimum_latency_in_ticks": 0, 254 | 255 | "_comment_ignore_player_limit_for_returning_players": "Players that played on this map already can join even when the max player limit was reached.", 256 | "ignore_player_limit_for_returning_players": false, 257 | 258 | "_comment_allow_commands": "possible values are, true, false and admins-only", 259 | "allow_commands": "admins-only", 260 | 261 | "_comment_autosave_interval": "Autosave interval in minutes", 262 | "autosave_interval": 10, 263 | 264 | "_comment_autosave_slots": "server autosave slots, it is cycled through when the server autosaves.", 265 | "autosave_slots": 5, 266 | 267 | "_comment_afk_autokick_interval": "How many minutes until someone is kicked when doing nothing, 0 for never.", 268 | "afk_autokick_interval": 0, 269 | 270 | "_comment_auto_pause": "Whether should the server be paused when no players are present.", 271 | "auto_pause": true, 272 | 273 | "only_admins_can_pause_the_game": true, 274 | 275 | "_comment_autosave_only_on_server": "Whether autosaves should be saved only on server or also on all connected clients. Default is true.", 276 | "autosave_only_on_server": true, 277 | 278 | "_comment_non_blocking_saving": "Highly experimental feature, enable only at your own risk of losing your saves. On UNIX systems, server will fork itself to create an autosave. Autosaving on connected Windows clients will be disabled regardless of autosave_only_on_server option.", 279 | "non_blocking_saving": false, 280 | 281 | "_comment_segment_sizes": "Long network messages are split into segments that are sent over multiple ticks. Their size depends on the number of peers currently connected. Increasing the segment size will increase upload bandwidth requirement for the server and download bandwidth requirement for clients. This setting only affects server outbound messages. Changing these settings can have a negative impact on connection stability for some clients.", 282 | "minimum_segment_size": 25, 283 | "minimum_segment_size_peer_count": 20, 284 | "maximum_segment_size": 100, 285 | "maximum_segment_size_peer_count": 10 286 | } 287 | server-adminlist.json: '["you", "friend"]' 288 | server-banlist.json: '["you", "friend"]' 289 | --- 290 | apiVersion: v1 291 | kind: ConfigMap 292 | metadata: 293 | name: factorio-test 294 | namespace: default 295 | data: 296 | server-whitelist.json: '["me", "friend"]' 297 | map-gen-settings.json: > 298 | { 299 | "_terrain_segmentation_comment": "The inverse of 'water scale' in the map generator GUI.", 300 | "terrain_segmentation": 1, 301 | 302 | "_water_comment": 303 | [ 304 | "The equivalent to 'water coverage' in the map generator GUI. Higher coverage means more water in larger oceans.", 305 | "Water level = 10 * log2(this value)" 306 | ], 307 | "water": 1, 308 | 309 | "_comment_width+height": "Width and height of map, in tiles; 0 means infinite", 310 | "width": 0, 311 | "height": 0, 312 | 313 | "_starting_area_comment": "Multiplier for 'biter free zone radius'", 314 | "starting_area": 1, 315 | 316 | "peaceful_mode": false, 317 | "autoplace_controls": 318 | { 319 | "coal": {"frequency": 1, "size": 1, "richness": 1}, 320 | "stone": {"frequency": 1, "size": 1, "richness": 1}, 321 | "copper-ore": {"frequency": 1, "size": 1,"richness": 1}, 322 | "iron-ore": {"frequency": 1, "size": 1, "richness": 1}, 323 | "uranium-ore": {"frequency": 1, "size": 1, "richness": 1}, 324 | "crude-oil": {"frequency": 1, "size": 1, "richness": 1}, 325 | "trees": {"frequency": 1, "size": 1, "richness": 1}, 326 | "enemy-base": {"frequency": 1, "size": 1, "richness": 1} 327 | }, 328 | 329 | "cliff_settings": 330 | { 331 | "_name_comment": "Name of the cliff prototype", 332 | "name": "cliff", 333 | 334 | "_cliff_elevation_0_comment": "Elevation of first row of cliffs", 335 | "cliff_elevation_0": 10, 336 | 337 | "_cliff_elevation_interval_comment": 338 | [ 339 | "Elevation difference between successive rows of cliffs.", 340 | "This is inversely proportional to 'frequency' in the map generation GUI. Specifically, when set from the GUI the value is 40 / frequency." 341 | ], 342 | "cliff_elevation_interval": 40, 343 | 344 | "_richness_comment": "Called 'cliff continuity' in the map generator GUI. 0 will result in no cliffs, 10 will make all cliff rows completely solid", 345 | "richness": 1 346 | }, 347 | 348 | "_property_expression_names_comment": 349 | [ 350 | "Overrides for property value generators (map type)", 351 | "Leave 'elevation' blank to get 'normal' terrain.", 352 | "Use 'elevation': '0_16-elevation' to reproduce terrain from 0.16.", 353 | "Use 'elevation': '0_17-island' to get an island.", 354 | "Moisture and terrain type are also controlled via this.", 355 | "'control-setting:moisture:frequency:multiplier' is the inverse of the 'moisture scale' in the map generator GUI.", 356 | "'control-setting:moisture:bias' is the 'moisture bias' in the map generator GUI.", 357 | "'control-setting:aux:frequency:multiplier' is the inverse of the 'terrain type scale' in the map generator GUI.", 358 | "'control-setting:aux:bias' is the 'terrain type bias' in the map generator GUI." 359 | ], 360 | "property_expression_names": 361 | { 362 | "control-setting:moisture:frequency:multiplier": "1", 363 | "control-setting:moisture:bias": "0", 364 | "control-setting:aux:frequency:multiplier": "1", 365 | "control-setting:aux:bias": "0" 366 | }, 367 | 368 | "starting_points": 369 | [ 370 | { "x": 0, "y": 0} 371 | ], 372 | 373 | "_seed_comment": "Use null for a random seed, number for a specific seed.", 374 | "seed": null 375 | } 376 | map-settings.json: > 377 | { 378 | "difficulty_settings": 379 | { 380 | "recipe_difficulty": 0, 381 | "technology_difficulty": 0, 382 | "technology_price_multiplier": 1, 383 | "research_queue_setting": "after-victory" 384 | }, 385 | "pollution": 386 | { 387 | "enabled": true, 388 | "_comment_min_to_diffuse_1": "these are values for 60 ticks (1 simulated second)", 389 | "_comment_min_to_diffuse_2": "amount that is diffused to neighboring chunk", 390 | "diffusion_ratio": 0.02, 391 | "min_to_diffuse": 15, 392 | "ageing": 1, 393 | "expected_max_per_chunk": 150, 394 | "min_to_show_per_chunk": 50, 395 | "min_pollution_to_damage_trees": 60, 396 | "pollution_with_max_forest_damage": 150, 397 | "pollution_per_tree_damage": 50, 398 | "pollution_restored_per_tree_damage": 10, 399 | "max_pollution_to_restore_trees": 20, 400 | "enemy_attack_pollution_consumption_modifier": 1 401 | }, 402 | "enemy_evolution": 403 | { 404 | "enabled": true, 405 | "time_factor": 0.000004, 406 | "destroy_factor": 0.002, 407 | "pollution_factor": 0.0000009 408 | }, 409 | "enemy_expansion": 410 | { 411 | "enabled": true, 412 | "min_base_spacing": 3, 413 | "max_expansion_distance": 7, 414 | "friendly_base_influence_radius": 2, 415 | "enemy_building_influence_radius": 2, 416 | "building_coefficient": 0.1, 417 | "other_base_coefficient": 2.0, 418 | "neighbouring_chunk_coefficient": 0.5, 419 | "neighbouring_base_chunk_coefficient": 0.4, 420 | "max_colliding_tiles_coefficient": 0.9, 421 | "settler_group_min_size": 5, 422 | "settler_group_max_size": 20, 423 | "min_expansion_cooldown": 14400, 424 | "max_expansion_cooldown": 216000 425 | }, 426 | "unit_group": 427 | { 428 | "min_group_gathering_time": 3600, 429 | "max_group_gathering_time": 36000, 430 | "max_wait_time_for_late_members": 7200, 431 | "max_group_radius": 30.0, 432 | "min_group_radius": 5.0, 433 | "max_member_speedup_when_behind": 1.4, 434 | "max_member_slowdown_when_ahead": 0.6, 435 | "max_group_slowdown_factor": 0.3, 436 | "max_group_member_fallback_factor": 3, 437 | "member_disown_distance": 10, 438 | "tick_tolerance_when_member_arrives": 60, 439 | "max_gathering_unit_groups": 30, 440 | "max_unit_group_size": 200 441 | }, 442 | "steering": 443 | { 444 | "default": 445 | { 446 | "radius": 1.2, 447 | "separation_force": 0.005, 448 | "separation_factor": 1.2, 449 | "force_unit_fuzzy_goto_behavior": false 450 | }, 451 | "moving": 452 | { 453 | "radius": 3, 454 | "separation_force": 0.01, 455 | "separation_factor": 3, 456 | "force_unit_fuzzy_goto_behavior": false 457 | } 458 | }, 459 | "path_finder": 460 | { 461 | "fwd2bwd_ratio": 5, 462 | "goal_pressure_ratio": 2, 463 | "max_steps_worked_per_tick": 100, 464 | "max_work_done_per_tick": 8000, 465 | "use_path_cache": true, 466 | "short_cache_size": 5, 467 | "long_cache_size": 25, 468 | "short_cache_min_cacheable_distance": 10, 469 | "short_cache_min_algo_steps_to_cache": 50, 470 | "long_cache_min_cacheable_distance": 30, 471 | "cache_max_connect_to_cache_steps_multiplier": 100, 472 | "cache_accept_path_start_distance_ratio": 0.2, 473 | "cache_accept_path_end_distance_ratio": 0.15, 474 | "negative_cache_accept_path_start_distance_ratio": 0.3, 475 | "negative_cache_accept_path_end_distance_ratio": 0.3, 476 | "cache_path_start_distance_rating_multiplier": 10, 477 | "cache_path_end_distance_rating_multiplier": 20, 478 | "stale_enemy_with_same_destination_collision_penalty": 30, 479 | "ignore_moving_enemy_collision_distance": 5, 480 | "enemy_with_different_destination_collision_penalty": 30, 481 | "general_entity_collision_penalty": 10, 482 | "general_entity_subsequent_collision_penalty": 3, 483 | "extended_collision_penalty": 3, 484 | "max_clients_to_accept_any_new_request": 10, 485 | "max_clients_to_accept_short_new_request": 100, 486 | "direct_distance_to_consider_short_request": 100, 487 | "short_request_max_steps": 1000, 488 | "short_request_ratio": 0.5, 489 | "min_steps_to_check_path_find_termination": 2000, 490 | "start_to_goal_cost_multiplier_to_terminate_path_find": 500.0, 491 | "overload_levels": [0, 100, 500], 492 | "overload_multipliers": [2, 3, 4], 493 | "negative_path_cache_delay_interval": 20 494 | }, 495 | "max_failed_behavior_count": 3 496 | } 497 | server-settings.json: > 498 | { 499 | "name": "Name of the game as it will appear in the game listing", 500 | "description": "Description of the game that will appear in the listing", 501 | "tags": ["game", "tags"], 502 | 503 | "_comment_max_players": "Maximum number of players allowed, admins can join even a full server. 0 means unlimited.", 504 | "max_players": 0, 505 | 506 | "_comment_visibility": ["public: Game will be published on the official Factorio matching server", 507 | "lan: Game will be broadcast on LAN"], 508 | "visibility": 509 | { 510 | "public": true, 511 | "lan": true 512 | }, 513 | 514 | "_comment_credentials": "Your factorio.com login credentials. Required for games with visibility public", 515 | "username": "", 516 | "password": "", 517 | 518 | "_comment_token": "Authentication token. May be used instead of 'password' above.", 519 | "token": "", 520 | 521 | "game_password": "", 522 | 523 | "_comment_require_user_verification": "When set to true, the server will only allow clients that have a valid Factorio.com account", 524 | "require_user_verification": true, 525 | 526 | "_comment_max_upload_in_kilobytes_per_second" : "optional, default value is 0. 0 means unlimited.", 527 | "max_upload_in_kilobytes_per_second": 0, 528 | 529 | "_comment_max_upload_slots" : "optional, default value is 5. 0 means unlimited.", 530 | "max_upload_slots": 5, 531 | 532 | "_comment_minimum_latency_in_ticks": "optional one tick is 16ms in default speed, default value is 0. 0 means no minimum.", 533 | "minimum_latency_in_ticks": 0, 534 | 535 | "_comment_ignore_player_limit_for_returning_players": "Players that played on this map already can join even when the max player limit was reached.", 536 | "ignore_player_limit_for_returning_players": false, 537 | 538 | "_comment_allow_commands": "possible values are, true, false and admins-only", 539 | "allow_commands": "admins-only", 540 | 541 | "_comment_autosave_interval": "Autosave interval in minutes", 542 | "autosave_interval": 10, 543 | 544 | "_comment_autosave_slots": "server autosave slots, it is cycled through when the server autosaves.", 545 | "autosave_slots": 5, 546 | 547 | "_comment_afk_autokick_interval": "How many minutes until someone is kicked when doing nothing, 0 for never.", 548 | "afk_autokick_interval": 0, 549 | 550 | "_comment_auto_pause": "Whether should the server be paused when no players are present.", 551 | "auto_pause": true, 552 | 553 | "only_admins_can_pause_the_game": true, 554 | 555 | "_comment_autosave_only_on_server": "Whether autosaves should be saved only on server or also on all connected clients. Default is true.", 556 | "autosave_only_on_server": true, 557 | 558 | "_comment_non_blocking_saving": "Highly experimental feature, enable only at your own risk of losing your saves. On UNIX systems, server will fork itself to create an autosave. Autosaving on connected Windows clients will be disabled regardless of autosave_only_on_server option.", 559 | "non_blocking_saving": false, 560 | 561 | "_comment_segment_sizes": "Long network messages are split into segments that are sent over multiple ticks. Their size depends on the number of peers currently connected. Increasing the segment size will increase upload bandwidth requirement for the server and download bandwidth requirement for clients. This setting only affects server outbound messages. Changing these settings can have a negative impact on connection stability for some clients.", 562 | "minimum_segment_size": 25, 563 | "minimum_segment_size_peer_count": 20, 564 | "maximum_segment_size": 100, 565 | "maximum_segment_size_peer_count": 10 566 | } 567 | server-adminlist.json: '["me", "friend"]' 568 | server-banlist.json: '["me", "friend"]' 569 | --- 570 | apiVersion: gameserver.martinheinz.dev/v1alpha1 571 | kind: Server 572 | metadata: 573 | name: factorio 574 | namespace: default 575 | spec: 576 | gameName: Factorio 577 | config: 578 | from: 579 | - configMapRef: 580 | name: factorio 581 | - secretRef: 582 | name: factorio 583 | mountAs: File 584 | mountPath: "/factorio/config" 585 | resources: 586 | limits: 587 | cpu: "2" 588 | memory: 1Gi 589 | requests: 590 | cpu: 250m 591 | memory: 64Mi 592 | storage: 593 | size: 12Gi 594 | -------------------------------------------------------------------------------- /config/samples/gameserver_v1alpha1_server.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: gameserver.martinheinz.dev/v1alpha1 2 | kind: Server 3 | metadata: 4 | name: csgo 5 | spec: 6 | gameName: "CSGO" 7 | port: # These are defaults, can be omitted 8 | - port: 27015 9 | targetPort: 27015 10 | nodePort: 30015 11 | envFrom: 12 | mountAs: Env 13 | configSource: 14 | - configMapRef: 15 | name: csgo 16 | - secretRef: 17 | name: csgo 18 | storage: 19 | size: 12Gi 20 | resources: 21 | requests: 22 | memory: "64Mi" 23 | cpu: "250m" 24 | limits: 25 | memory: "1Gi" 26 | cpu: "2" 27 | -------------------------------------------------------------------------------- /config/samples/kustomization.yaml: -------------------------------------------------------------------------------- 1 | ## Append samples you want in your CSV to this file as resources ## 2 | resources: 3 | - gameserver_v1alpha1_server.yaml 4 | # +kubebuilder:scaffold:manifestskustomizesamples 5 | -------------------------------------------------------------------------------- /config/samples/minecraft.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: minecraft 6 | namespace: default 7 | data: 8 | RCON_PASSWORD: UkNPTl9QQVNTV09SRA== 9 | type: Opaque 10 | --- 11 | apiVersion: v1 12 | kind: ConfigMap 13 | metadata: 14 | name: minecraft 15 | namespace: default 16 | data: 17 | EULA: "TRUE" 18 | ENABLE_RCON: "true" 19 | RCON_PORT: "28016" 20 | SERVER_NAME: "KubernetesOperatorMinecraft" 21 | # OVERRIDE_SERVER_PROPERTIES: "true" 22 | --- 23 | apiVersion: gameserver.martinheinz.dev/v1alpha1 24 | kind: Server 25 | metadata: 26 | name: minecraft 27 | namespace: default 28 | spec: 29 | gameName: Minecraft 30 | config: 31 | from: 32 | - configMapRef: 33 | name: minecraft 34 | - secretRef: 35 | name: minecraft 36 | resources: 37 | limits: 38 | cpu: "2" 39 | memory: 1Gi 40 | requests: 41 | cpu: 250m 42 | memory: 64Mi 43 | storage: 44 | size: 12Gi -------------------------------------------------------------------------------- /config/samples/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: csgo 5 | labels: 6 | # insert any desired labels to identify your claim 7 | app: csgo 8 | spec: 9 | storageClassName: standard 10 | accessModes: 11 | - ReadWriteOnce 12 | resources: 13 | requests: 14 | # The amount of the volume's storage to request 15 | storage: 10Gi -------------------------------------------------------------------------------- /config/samples/rust.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: rust 5 | namespace: default 6 | data: 7 | RUST_RCON_PASSWORD: UlVTVF9SQ09OX1BBU1NXT1JE 8 | type: Opaque 9 | --- 10 | apiVersion: v1 11 | kind: ConfigMap 12 | metadata: 13 | name: rust 14 | namespace: default 15 | data: 16 | RUST_SERVER_IDENTITY: "rust_game_server" 17 | RUST_SERVER_NAME: "My Awesome Server" 18 | RUST_SERVER_DESCRIPTION: "Test Rust server using Kubernetes game-server-operator" 19 | RUST_SERVER_SEED: "98765" 20 | RUST_SERVER_STARTUP_ARGUMENTS: "-batchmode -load -logfile /dev/stdout +server.secure 1" 21 | RUST_SERVER_WORLDSIZE: "500" # To make start-up faster during testing 22 | --- 23 | apiVersion: gameserver.martinheinz.dev/v1alpha1 24 | kind: Server 25 | metadata: 26 | name: rust 27 | namespace: default 28 | spec: 29 | gameName: Rust 30 | config: 31 | from: 32 | - configMapRef: 33 | name: rust 34 | - secretRef: 35 | name: rust 36 | resources: 37 | limits: 38 | cpu: "2" 39 | memory: 1Gi 40 | requests: 41 | cpu: 250m 42 | memory: 64Mi 43 | storage: 44 | size: 12Gi 45 | -------------------------------------------------------------------------------- /config/scorecard/bases/config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: scorecard.operatorframework.io/v1alpha3 2 | kind: Configuration 3 | metadata: 4 | name: config 5 | stages: 6 | - parallel: true 7 | tests: [] 8 | -------------------------------------------------------------------------------- /config/scorecard/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - bases/config.yaml 3 | patchesJson6902: 4 | - path: patches/basic.config.yaml 5 | target: 6 | group: scorecard.operatorframework.io 7 | version: v1alpha3 8 | kind: Configuration 9 | name: config 10 | - path: patches/olm.config.yaml 11 | target: 12 | group: scorecard.operatorframework.io 13 | version: v1alpha3 14 | kind: Configuration 15 | name: config 16 | # +kubebuilder:scaffold:patchesJson6902 17 | -------------------------------------------------------------------------------- /config/scorecard/patches/basic.config.yaml: -------------------------------------------------------------------------------- 1 | - op: add 2 | path: /stages/0/tests/- 3 | value: 4 | entrypoint: 5 | - scorecard-test 6 | - basic-check-spec 7 | image: quay.io/operator-framework/scorecard-test:v1.3.0 8 | labels: 9 | suite: basic 10 | test: basic-check-spec-test 11 | -------------------------------------------------------------------------------- /config/scorecard/patches/olm.config.yaml: -------------------------------------------------------------------------------- 1 | - op: add 2 | path: /stages/0/tests/- 3 | value: 4 | entrypoint: 5 | - scorecard-test 6 | - olm-bundle-validation 7 | image: quay.io/operator-framework/scorecard-test:v1.3.0 8 | labels: 9 | suite: olm 10 | test: olm-bundle-validation-test 11 | - op: add 12 | path: /stages/0/tests/- 13 | value: 14 | entrypoint: 15 | - scorecard-test 16 | - olm-crds-have-validation 17 | image: quay.io/operator-framework/scorecard-test:v1.3.0 18 | labels: 19 | suite: olm 20 | test: olm-crds-have-validation-test 21 | - op: add 22 | path: /stages/0/tests/- 23 | value: 24 | entrypoint: 25 | - scorecard-test 26 | - olm-crds-have-resources 27 | image: quay.io/operator-framework/scorecard-test:v1.3.0 28 | labels: 29 | suite: olm 30 | test: olm-crds-have-resources-test 31 | - op: add 32 | path: /stages/0/tests/- 33 | value: 34 | entrypoint: 35 | - scorecard-test 36 | - olm-spec-descriptors 37 | image: quay.io/operator-framework/scorecard-test:v1.3.0 38 | labels: 39 | suite: olm 40 | test: olm-spec-descriptors-test 41 | - op: add 42 | path: /stages/0/tests/- 43 | value: 44 | entrypoint: 45 | - scorecard-test 46 | - olm-status-descriptors 47 | image: quay.io/operator-framework/scorecard-test:v1.3.0 48 | labels: 49 | suite: olm 50 | test: olm-status-descriptors-test 51 | -------------------------------------------------------------------------------- /controllers/server_controller.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021 Martin Heinz. 3 | */ 4 | 5 | package controllers 6 | 7 | import ( 8 | "context" 9 | "fmt" 10 | "reflect" 11 | "strings" 12 | 13 | "github.com/go-logr/logr" 14 | appsv1 "k8s.io/api/apps/v1" 15 | corev1 "k8s.io/api/core/v1" 16 | "k8s.io/apimachinery/pkg/api/errors" 17 | "k8s.io/apimachinery/pkg/api/resource" 18 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 19 | "k8s.io/apimachinery/pkg/runtime" 20 | "k8s.io/apimachinery/pkg/types" 21 | ctrl "sigs.k8s.io/controller-runtime" 22 | "sigs.k8s.io/controller-runtime/pkg/client" 23 | 24 | gameserverv1alpha1 "github.com/MartinHeinz/game-server-operator/api/v1alpha1" 25 | ) 26 | 27 | // ServerReconciler reconciles a Server object 28 | type ServerReconciler struct { 29 | client.Client 30 | Log logr.Logger 31 | Scheme *runtime.Scheme 32 | } 33 | 34 | var ( 35 | depSuffix = "-deployment" 36 | svcSuffix = "-service" 37 | pvcSuffix = "-persistentvolumeclaim" 38 | ) 39 | 40 | // +kubebuilder:rbac:groups=gameserver.martinheinz.dev,resources=servers,verbs=get;list;watch;create;update;patch;delete 41 | // +kubebuilder:rbac:groups=gameserver.martinheinz.dev,resources=servers/status,verbs=get;update;patch 42 | // +kubebuilder:rbac:groups=gameserver.martinheinz.dev,resources=servers/finalizers,verbs=update 43 | // +kubebuilder:rbac:groups=apps,resources=deployments;services,verbs=get;list;watch;create;update;patch;delete 44 | // +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete 45 | // +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch 46 | // +kubebuilder:rbac:groups=core,resources=services;persistentvolumeclaims,verbs=get;list;watch;create;update;patch;delete 47 | 48 | // For more details, check Reconcile and its Result here: 49 | // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.7.0/pkg/reconcile 50 | func (r *ServerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { 51 | log := r.Log.WithValues("server", req.NamespacedName) 52 | 53 | server := &gameserverv1alpha1.Server{} 54 | if err := r.Get(ctx, req.NamespacedName, server); err != nil { 55 | log.Error(err, "unable to fetch Server") 56 | return ctrl.Result{}, client.IgnoreNotFound(err) 57 | } 58 | 59 | var childDep appsv1.Deployment 60 | server.Status.Status = gameserverv1alpha1.Inactive 61 | if err := r.Get(ctx, types.NamespacedName{Name: server.Name + depSuffix, Namespace: server.Namespace}, &childDep); err != nil && errors.IsNotFound(err) { 62 | log.Info("Child Deployment not available for status update") 63 | } else if *childDep.Spec.Replicas > int32(0) { 64 | server.Status.Status = gameserverv1alpha1.Active 65 | } 66 | 67 | var childPvc corev1.PersistentVolumeClaim 68 | server.Status.Storage = gameserverv1alpha1.Pending 69 | if err := r.Get(ctx, types.NamespacedName{Name: server.Name + pvcSuffix, Namespace: server.Namespace}, &childPvc); err != nil && errors.IsNotFound(err) { 70 | log.Info("Child PersistentVolumeClaim not available for status update") 71 | } else if childPvc.Status.Phase == corev1.ClaimBound { 72 | server.Status.Storage = gameserverv1alpha1.Bound 73 | } 74 | 75 | var childService corev1.Service 76 | server.Status.Ports = []int32{} 77 | if err := r.Get(ctx, types.NamespacedName{Name: server.Name + svcSuffix, Namespace: server.Namespace}, &childService); err != nil && errors.IsNotFound(err) { 78 | log.Info("Child Service not available for status update") 79 | } else { 80 | for _, port := range childService.Spec.Ports { 81 | server.Status.Ports = append(server.Status.Ports, port.NodePort) 82 | } 83 | } 84 | 85 | if err := r.Status().Update(ctx, server); err != nil { 86 | log.Error(err, "unable to update Server status") 87 | return ctrl.Result{}, err 88 | } 89 | 90 | gameName := server.Spec.GameName 91 | 92 | var gameSettings gameserverv1alpha1.GameSetting 93 | 94 | for name, game := range gameserverv1alpha1.Games { 95 | if name == gameName { 96 | gameSettings = game 97 | break 98 | } 99 | } 100 | 101 | found := []client.Object{ 102 | &appsv1.Deployment{}, 103 | &corev1.Service{}, 104 | &corev1.PersistentVolumeClaim{}, 105 | } 106 | 107 | // -------------------------- 108 | // Initialize 109 | 110 | for _, f := range found { 111 | t := reflect.TypeOf(f).String() 112 | suffix := "-" + strings.ToLower(strings.Split(reflect.TypeOf(f).String(), ".")[1]) 113 | objectName := server.Name + suffix 114 | if err := r.Get(ctx, types.NamespacedName{Name: objectName, Namespace: server.Namespace}, f); err != nil && errors.IsNotFound(err) { 115 | // Define a new Object 116 | obj := client.Object(nil) 117 | 118 | switch f.(type) { 119 | default: 120 | log.Info("Invalid Kind") 121 | case *appsv1.Deployment: 122 | obj = r.deploymentForServer(server, &gameSettings) 123 | case *corev1.Service: 124 | obj = r.serviceForServer(server, &gameSettings) 125 | case *corev1.PersistentVolumeClaim: 126 | // TODO To preserve PVC after Server deletion - delete PVC OwnersReference - Use Mutating Admission Webhook - https://book.kubebuilder.io/reference/webhook-for-core-types.html 127 | obj = r.persistentVolumeClaimForServer(server, &gameSettings) 128 | } 129 | log.Info(fmt.Sprintf("Creating a new %s", t), fmt.Sprintf("%s.Namespace", t), obj.GetNamespace(), fmt.Sprintf("%s.Name", t), obj.GetName()) 130 | err = r.Create(ctx, obj) 131 | if err != nil { 132 | log.Error(err, fmt.Sprintf("Failed to create new %s", t), fmt.Sprintf("%s.Namespace", t), obj.GetNamespace(), fmt.Sprintf("%s.Name", t), obj.GetName()) 133 | return ctrl.Result{}, err 134 | } 135 | // Object created successfully - return and requeue 136 | return ctrl.Result{Requeue: true}, nil 137 | } else if err != nil { 138 | log.Error(err, fmt.Sprintf("Failed to get %s", t)) 139 | return ctrl.Result{}, err 140 | } 141 | } 142 | // -------------------------- 143 | 144 | currentGen := server.Generation 145 | if currentGen == 1 { 146 | return ctrl.Result{}, nil 147 | } 148 | 149 | // -------------------------- 150 | // Update 151 | requeue := false 152 | for _, f := range found { 153 | t := reflect.TypeOf(f).String() 154 | suffix := "-" + strings.ToLower(strings.Split(reflect.TypeOf(f).String(), ".")[1]) 155 | objectName := server.Name + suffix 156 | if err := r.Get(ctx, types.NamespacedName{Name: objectName, Namespace: server.Namespace}, f); err == nil { 157 | // Define a new Object 158 | obj := client.Object(nil) 159 | 160 | switch f.(type) { 161 | default: 162 | log.Info("Invalid Kind") 163 | case *appsv1.Deployment: 164 | obj, requeue = r.updateDeploymentForServer(server, f.(*appsv1.Deployment)) 165 | case *corev1.Service: 166 | obj, requeue = r.updateServiceForServer(server, f.(*corev1.Service)) 167 | case *corev1.PersistentVolumeClaim: 168 | continue // TODO 169 | //obj, requeue = r.updatePersistentVolumeClaimForServer(server, &gameSettings) 170 | } 171 | log.Info(fmt.Sprintf("Updating a %s", t), fmt.Sprintf("%s.Namespace", t), obj.GetNamespace(), fmt.Sprintf("%s.Name", t), obj.GetName()) 172 | 173 | if err = r.Update(ctx, obj); err != nil { 174 | log.Error(err, fmt.Sprintf("Failed to update %s", t), fmt.Sprintf("%s.Namespace", t), obj.GetNamespace(), fmt.Sprintf("%s.Name", t), obj.GetName()) 175 | return ctrl.Result{}, err 176 | } 177 | // Object updated successfully - return and requeue 178 | } else if err != nil { 179 | log.Error(err, fmt.Sprintf("Failed to get %s", t)) 180 | return ctrl.Result{}, err 181 | } 182 | } 183 | 184 | if requeue { 185 | return ctrl.Result{Requeue: true}, nil 186 | } 187 | 188 | return ctrl.Result{}, nil 189 | } 190 | 191 | func (r *ServerReconciler) deploymentForServer(m *gameserverv1alpha1.Server, gs *gameserverv1alpha1.GameSetting) *appsv1.Deployment { 192 | ls := labelsForServer(m.Name) 193 | 194 | dep := &appsv1.Deployment{} 195 | gs.Deployment.DeepCopyInto(dep) 196 | 197 | dep.ObjectMeta = metav1.ObjectMeta{ 198 | Name: m.Name + depSuffix, 199 | Namespace: m.Namespace, 200 | Labels: ls, 201 | } 202 | dep.Spec.Selector = &metav1.LabelSelector{ 203 | MatchLabels: ls, 204 | } 205 | dep.Spec.Template.Labels = ls 206 | dep.Spec.Template.Spec.Volumes[0].VolumeSource.PersistentVolumeClaim.ClaimName = m.Name + pvcSuffix 207 | 208 | if m.Spec.Config.MountAs == gameserverv1alpha1.File { 209 | 210 | // Setup `volumes` block of spec 211 | volume := corev1.Volume{ 212 | Name: m.Name + "-config", 213 | VolumeSource: corev1.VolumeSource{ 214 | Projected: &corev1.ProjectedVolumeSource{ 215 | DefaultMode: func(val int32) *int32 { return &val }(0777), 216 | }, 217 | }, 218 | } 219 | for _, res := range m.Spec.Config.From { 220 | projection := corev1.VolumeProjection{} 221 | if res.ConfigMapRef != nil { 222 | projection.ConfigMap = &corev1.ConfigMapProjection{ 223 | LocalObjectReference: corev1.LocalObjectReference{Name: res.ConfigMapRef.Name}, 224 | } 225 | } else if res.SecretRef != nil { 226 | projection.Secret = &corev1.SecretProjection{ 227 | LocalObjectReference: corev1.LocalObjectReference{Name: res.SecretRef.Name}, 228 | } 229 | } 230 | volume.VolumeSource.Projected.Sources = append(volume.VolumeSource.Projected.Sources, projection) 231 | 232 | } 233 | dep.Spec.Template.Spec.Volumes = append(dep.Spec.Template.Spec.Volumes, volume) 234 | 235 | // Setup `volumeMounts` block of containers 236 | // If mounted game config is present, then replace it 237 | replaced := false 238 | for i, volumeMount := range dep.Spec.Template.Spec.Containers[0].VolumeMounts { 239 | if volumeMount.Name == m.Name+"-config" { 240 | dep.Spec.Template.Spec.Containers[0].VolumeMounts[i] = corev1.VolumeMount{ 241 | Name: m.Name + "-config", // Must be same as in `volume` var above 242 | ReadOnly: false, 243 | MountPath: m.Spec.Config.MountPath, 244 | } 245 | replaced = true 246 | } 247 | } 248 | 249 | // If mounted game config is not present, append it 250 | if !replaced { 251 | dep.Spec.Template.Spec.Containers[0].VolumeMounts = append(dep.Spec.Template.Spec.Containers[0].VolumeMounts, corev1.VolumeMount{ 252 | Name: m.Name + "-config", // Must be same as in `volume` var above 253 | ReadOnly: false, 254 | MountPath: m.Spec.Config.MountPath, 255 | }) 256 | } 257 | 258 | } else { 259 | dep.Spec.Template.Spec.Containers[0].EnvFrom = nil 260 | for i, res := range m.Spec.Config.From { 261 | dep.Spec.Template.Spec.Containers[0].EnvFrom = append(dep.Spec.Template.Spec.Containers[0].EnvFrom, corev1.EnvFromSource{}) 262 | if res.ConfigMapRef != nil { 263 | dep.Spec.Template.Spec.Containers[0].EnvFrom[i].ConfigMapRef = res.ConfigMapRef 264 | } else if res.SecretRef != nil { 265 | dep.Spec.Template.Spec.Containers[0].EnvFrom[i].SecretRef = res.SecretRef 266 | } 267 | } 268 | } 269 | 270 | if m.Spec.ResourceRequirements != nil { 271 | dep.Spec.Template.Spec.Containers[0].Resources = *m.Spec.ResourceRequirements 272 | } 273 | 274 | ctrl.SetControllerReference(m, dep, r.Scheme) 275 | return dep 276 | } 277 | 278 | func (r *ServerReconciler) updateDeploymentForServer(m *gameserverv1alpha1.Server, dep *appsv1.Deployment) (*appsv1.Deployment, bool) { 279 | existingConfig := dep.Spec.Template.Spec.Containers[0].EnvFrom 280 | existingResources := dep.Spec.Template.Spec.Containers[0].Resources 281 | requeue := false 282 | 283 | // If ConfigMap/Secret were changed 284 | if !reflect.DeepEqual(m.Spec.Config, existingConfig) { 285 | requeue = true 286 | if m.Spec.Config.MountAs == gameserverv1alpha1.File { 287 | 288 | // Remove old projected volume 289 | for i, vol := range dep.Spec.Template.Spec.Volumes { 290 | if vol.Name == m.Name+"-config" { 291 | dep.Spec.Template.Spec.Volumes = append(dep.Spec.Template.Spec.Volumes[:i], dep.Spec.Template.Spec.Volumes[i+1:]...) 292 | } 293 | } 294 | 295 | // Prepare new projected volume 296 | volume := corev1.Volume{ 297 | Name: m.Name + "-config", 298 | VolumeSource: corev1.VolumeSource{ 299 | Projected: &corev1.ProjectedVolumeSource{ 300 | DefaultMode: func(val int32) *int32 { return &val }(0777), 301 | }, 302 | }, 303 | } 304 | 305 | for _, res := range m.Spec.Config.From { 306 | projection := corev1.VolumeProjection{} 307 | if res.ConfigMapRef != nil { 308 | projection.ConfigMap = &corev1.ConfigMapProjection{ 309 | LocalObjectReference: corev1.LocalObjectReference{Name: res.ConfigMapRef.Name}, 310 | } 311 | } else if res.SecretRef != nil { 312 | projection.Secret = &corev1.SecretProjection{ 313 | LocalObjectReference: corev1.LocalObjectReference{Name: res.SecretRef.Name}, 314 | } 315 | } 316 | volume.VolumeSource.Projected.Sources = append(volume.VolumeSource.Projected.Sources, projection) 317 | } 318 | // Append new projected volume 319 | dep.Spec.Template.Spec.Volumes = append(dep.Spec.Template.Spec.Volumes, volume) 320 | 321 | // Remove old volumeMount 322 | for i, vol := range dep.Spec.Template.Spec.Containers[0].VolumeMounts { 323 | if vol.Name == m.Name+"-config" { 324 | dep.Spec.Template.Spec.Containers[0].VolumeMounts = append( 325 | dep.Spec.Template.Spec.Containers[0].VolumeMounts[:i], 326 | dep.Spec.Template.Spec.Containers[0].VolumeMounts[i+1:]..., 327 | ) 328 | } 329 | } 330 | // Append to volumeMount block of container 331 | dep.Spec.Template.Spec.Containers[0].VolumeMounts = append(dep.Spec.Template.Spec.Containers[0].VolumeMounts, corev1.VolumeMount{ 332 | Name: m.Name + "-config", // Must be same as in `volume` var above 333 | ReadOnly: true, 334 | MountPath: m.Spec.Config.MountPath, 335 | }) 336 | 337 | } else { 338 | var newConfig []corev1.EnvFromSource 339 | for i, res := range m.Spec.Config.From { 340 | newConfig = append(newConfig, corev1.EnvFromSource{}) 341 | if res.ConfigMapRef != nil { 342 | newConfig[i].ConfigMapRef = res.ConfigMapRef 343 | } else if res.SecretRef != nil { 344 | newConfig[i].SecretRef = res.SecretRef 345 | } 346 | } 347 | dep.Spec.Template.Spec.Containers[0].EnvFrom = newConfig 348 | } 349 | } 350 | 351 | // If ResourceRequirements were changed 352 | if !reflect.DeepEqual(m.Spec.ResourceRequirements, existingResources) { 353 | requeue = true 354 | dep.Spec.Template.Spec.Containers[0].Resources = *m.Spec.ResourceRequirements 355 | } 356 | 357 | return dep, requeue 358 | } 359 | 360 | func (r *ServerReconciler) serviceForServer(m *gameserverv1alpha1.Server, gs *gameserverv1alpha1.GameSetting) *corev1.Service { 361 | ls := labelsForServer(m.Name) 362 | 363 | svc := &corev1.Service{} 364 | gs.Service.DeepCopyInto(svc) 365 | svc.ObjectMeta = metav1.ObjectMeta{ 366 | Name: m.Name + svcSuffix, 367 | Namespace: m.Namespace, 368 | Labels: ls, 369 | } 370 | svc.Spec.Selector = ls 371 | 372 | if m.Spec.Ports != nil { 373 | svc.Spec.Ports = m.Spec.Ports 374 | } 375 | 376 | ctrl.SetControllerReference(m, svc, r.Scheme) 377 | return svc 378 | } 379 | 380 | func (r *ServerReconciler) updateServiceForServer(m *gameserverv1alpha1.Server, svc *corev1.Service) (*corev1.Service, bool) { 381 | existingServicePorts := svc.Spec.Ports 382 | requeue := false 383 | 384 | for i, port := range existingServicePorts { 385 | if port.NodePort != m.Spec.Ports[i].NodePort { 386 | requeue = true 387 | svc.Spec.Ports[i].NodePort = m.Spec.Ports[i].NodePort 388 | } 389 | } 390 | 391 | return svc, requeue 392 | } 393 | 394 | func (r *ServerReconciler) persistentVolumeClaimForServer(m *gameserverv1alpha1.Server, gs *gameserverv1alpha1.GameSetting) *corev1.PersistentVolumeClaim { 395 | ls := labelsForServer(m.Name) 396 | 397 | pvc := &corev1.PersistentVolumeClaim{} 398 | gs.PersistentVolumeClaim.DeepCopyInto(pvc) 399 | 400 | pvc.ObjectMeta = metav1.ObjectMeta{ 401 | Name: m.Name + pvcSuffix, 402 | Namespace: m.Namespace, 403 | Labels: ls, 404 | } 405 | 406 | pvc.Spec.Resources.Requests = corev1.ResourceList{ 407 | corev1.ResourceStorage: resource.MustParse(m.Spec.Storage.Size), 408 | } 409 | 410 | ctrl.SetControllerReference(m, pvc, r.Scheme) // TODO if PVC is to be preserved, then do not set Server as owner 411 | return pvc 412 | } 413 | 414 | func labelsForServer(name string) map[string]string { 415 | return map[string]string{"server": name} 416 | } 417 | 418 | // SetupWithManager sets up the controller with the Manager. 419 | func (r *ServerReconciler) SetupWithManager(mgr ctrl.Manager) error { 420 | return ctrl.NewControllerManagedBy(mgr). 421 | For(&gameserverv1alpha1.Server{}). 422 | Owns(&appsv1.Deployment{}). 423 | Owns(&corev1.Service{}). 424 | Owns(&corev1.PersistentVolumeClaim{}). 425 | Complete(r) 426 | } 427 | -------------------------------------------------------------------------------- /controllers/server_controller_test.go: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | import ( 4 | "context" 5 | appsv1 "k8s.io/api/apps/v1" 6 | corev1 "k8s.io/api/core/v1" 7 | "k8s.io/apimachinery/pkg/api/resource" 8 | "k8s.io/apimachinery/pkg/util/intstr" 9 | "time" 10 | 11 | . "github.com/onsi/ginkgo" 12 | . "github.com/onsi/gomega" 13 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 14 | "k8s.io/apimachinery/pkg/types" 15 | 16 | gameserverv1alpha1 "github.com/MartinHeinz/game-server-operator/api/v1alpha1" 17 | ) 18 | 19 | // Run `go test ./...` in `controllers/` directory 20 | var _ = Describe("Server controller", func() { 21 | 22 | // Define utility constants for object names and testing. 23 | const ( 24 | ServerName = "test-server" 25 | ServerNamespace = "default" 26 | 27 | DeploymentName = ServerName + "-deployment" 28 | ServiceName = ServerName + "-service" 29 | PvcName = ServerName + "-persistentvolumeclaim" 30 | ConfigMapName = "csgo-env-config" 31 | SecretName = "csgo-secret" 32 | GameName = gameserverv1alpha1.CSGO 33 | 34 | timeout = time.Second * 10 35 | interval = time.Millisecond * 250 36 | ) 37 | 38 | var ( 39 | gameSettings gameserverv1alpha1.GameSetting 40 | server = &gameserverv1alpha1.Server{} 41 | resources = &corev1.ResourceRequirements{} 42 | storage = &gameserverv1alpha1.ServerStorage{Size: "2G"} 43 | 44 | serverLookupKey = types.NamespacedName{Name: ServerName, Namespace: ServerNamespace} 45 | deploymentLookupKey = types.NamespacedName{Name: DeploymentName, Namespace: ServerNamespace} 46 | serviceLookupKey = types.NamespacedName{Name: ServiceName, Namespace: ServerNamespace} 47 | pvcLookupKey = types.NamespacedName{Name: PvcName, Namespace: ServerNamespace} 48 | 49 | createdServer = &gameserverv1alpha1.Server{} 50 | ) 51 | 52 | BeforeEach(func() { 53 | for name, game := range gameserverv1alpha1.Games { 54 | if name == GameName { 55 | gameSettings = game 56 | break 57 | } 58 | } 59 | ctx := context.Background() 60 | 61 | // Create ConfigMap and Secret that will be mounted into Server 62 | configMap := &corev1.ConfigMap{ 63 | ObjectMeta: metav1.ObjectMeta{ 64 | Name: ConfigMapName, 65 | Namespace: ServerNamespace, 66 | }, 67 | Data: map[string]string{"SERVER_HOSTNAME": "hostname"}, 68 | } 69 | secret := &corev1.Secret{ 70 | ObjectMeta: metav1.ObjectMeta{ 71 | Name: SecretName, 72 | Namespace: ServerNamespace, 73 | }, 74 | StringData: map[string]string{"SERVER_PASSWORD": "password"}, 75 | } 76 | 77 | Expect(k8sClient.Create(ctx, configMap)).Should(Succeed()) 78 | Expect(k8sClient.Create(ctx, secret)).Should(Succeed()) 79 | 80 | configMapLookupKey := types.NamespacedName{Name: ConfigMapName, Namespace: ServerNamespace} 81 | createdConfigMap := &corev1.ConfigMap{} 82 | 83 | secretLookupKey := types.NamespacedName{Name: SecretName, Namespace: ServerNamespace} 84 | createdSecret := &corev1.Secret{} 85 | 86 | Eventually(func() bool { 87 | if err := k8sClient.Get(ctx, configMapLookupKey, createdConfigMap); err != nil { 88 | return false 89 | } 90 | if err := k8sClient.Get(ctx, secretLookupKey, createdSecret); err != nil { 91 | return false 92 | } 93 | return true 94 | }, timeout, interval).Should(BeTrue()) 95 | 96 | // Create Server and check whether it's available 97 | resources = &corev1.ResourceRequirements{ 98 | Requests: corev1.ResourceList{ 99 | corev1.ResourceCPU: resource.MustParse("250m"), 100 | corev1.ResourceMemory: resource.MustParse("64Mi"), 101 | }, 102 | Limits: corev1.ResourceList{ 103 | corev1.ResourceCPU: resource.MustParse("2"), 104 | corev1.ResourceMemory: resource.MustParse("1Gi"), 105 | }, 106 | } 107 | 108 | server = &gameserverv1alpha1.Server{ 109 | TypeMeta: metav1.TypeMeta{ 110 | APIVersion: "servers.gameserver.martinheinz.dev/v1alpha1", 111 | Kind: "Server", 112 | }, 113 | ObjectMeta: metav1.ObjectMeta{ 114 | Name: ServerName, 115 | Namespace: ServerNamespace, 116 | }, 117 | Spec: gameserverv1alpha1.ServerSpec{ 118 | GameName: gameserverv1alpha1.CSGO, 119 | Ports: []corev1.ServicePort{ 120 | {Name: "27015-tcp", Port: 27015, NodePort: 30020, TargetPort: intstr.IntOrString{Type: 0, IntVal: 27015, StrVal: ""}, Protocol: corev1.ProtocolTCP}, 121 | {Name: "27015-udp", Port: 27015, NodePort: 30020, TargetPort: intstr.IntOrString{Type: 0, IntVal: 27015, StrVal: ""}, Protocol: corev1.ProtocolUDP}, 122 | }, 123 | Config: gameserverv1alpha1.Config{ 124 | From: []corev1.EnvFromSource{{ 125 | ConfigMapRef: &corev1.ConfigMapEnvSource{ 126 | LocalObjectReference: corev1.LocalObjectReference{Name: ConfigMapName}, 127 | }, 128 | }, { 129 | SecretRef: &corev1.SecretEnvSource{ 130 | LocalObjectReference: corev1.LocalObjectReference{Name: SecretName}, 131 | }, 132 | }, 133 | }, 134 | MountAs: "Env", 135 | }, 136 | Storage: storage, 137 | ResourceRequirements: resources, 138 | }, 139 | } 140 | Expect(k8sClient.Create(ctx, server)).Should(Succeed()) 141 | 142 | createdServer = &gameserverv1alpha1.Server{} 143 | 144 | Eventually(func() bool { 145 | if err := k8sClient.Get(ctx, serverLookupKey, createdServer); err != nil { 146 | return false 147 | } 148 | return true 149 | }, timeout, interval).Should(BeTrue()) 150 | 151 | }) 152 | 153 | AfterEach(func() { 154 | ctx := context.Background() 155 | 156 | // Tear down configMap, Secret and Server 157 | configMapLookupKey := types.NamespacedName{Name: ConfigMapName, Namespace: ServerNamespace} 158 | createdConfigMap := &corev1.ConfigMap{} 159 | 160 | secretLookupKey := types.NamespacedName{Name: SecretName, Namespace: ServerNamespace} 161 | createdSecret := &corev1.Secret{} 162 | 163 | Eventually(func() bool { 164 | if err := k8sClient.Get(ctx, configMapLookupKey, createdConfigMap); err != nil { 165 | return false 166 | } 167 | if err := k8sClient.Get(ctx, secretLookupKey, createdSecret); err != nil { 168 | return false 169 | } 170 | return true 171 | }, timeout, interval).Should(BeTrue()) 172 | 173 | Expect(k8sClient.Delete(ctx, createdServer)).Should(Succeed()) 174 | Expect(k8sClient.Delete(ctx, createdConfigMap)).Should(Succeed()) 175 | Expect(k8sClient.Delete(ctx, createdSecret)).Should(Succeed()) 176 | }) 177 | 178 | Describe("creating Server", func() { 179 | Context("When Server is successfully created", func() { 180 | It("Should create objects with game-specific attributes", func() { 181 | ctx := context.Background() 182 | 183 | // Server is created in BeforeEach 184 | // Lookup child Deployment that should be now present 185 | By("Creating child Deployment") 186 | createdDeployment := &appsv1.Deployment{} 187 | Eventually(func() bool { 188 | if err := k8sClient.Get(ctx, deploymentLookupKey, createdDeployment); err != nil { 189 | return false 190 | } 191 | return true 192 | }, timeout, interval).Should(BeTrue()) 193 | 194 | // Verify that container in Deployment has values from Server spec (image, ports, envFrom, resources) 195 | By("Populating child Deployment's container with values from Server spec (image, ports, envFrom, resources)") 196 | createdContainer := createdDeployment.Spec.Template.Spec.Containers[0] 197 | expectedContainer := gameSettings.Deployment.Spec.Template.Spec.Containers[0] 198 | Expect(createdContainer.Image).Should(Equal(expectedContainer.Image)) 199 | Expect(createdContainer.Ports).Should(Equal(expectedContainer.Ports)) 200 | Expect(createdContainer.EnvFrom).Should(Equal([]corev1.EnvFromSource{ 201 | {ConfigMapRef: &corev1.ConfigMapEnvSource{ 202 | LocalObjectReference: corev1.LocalObjectReference{ 203 | Name: ConfigMapName, 204 | }, 205 | }}, {SecretRef: &corev1.SecretEnvSource{ 206 | LocalObjectReference: corev1.LocalObjectReference{ 207 | Name: SecretName, 208 | }, 209 | }}, 210 | })) 211 | Expect(&createdContainer.Resources).Should(Equal(resources)) 212 | 213 | // Lookup child Service that should be now present 214 | By("Creating child Service") 215 | createdService := &corev1.Service{} 216 | Eventually(func() bool { 217 | if err := k8sClient.Get(ctx, serviceLookupKey, createdService); err != nil { 218 | return false 219 | } 220 | return true 221 | }, timeout, interval).Should(BeTrue()) 222 | 223 | // Verify that Service has values from Server spec (selector, ports) 224 | By("Populating child Service's values from Server spec (selector, ports)") 225 | Expect(createdService.Spec.Selector["server"]).Should(Equal(ServerName)) 226 | Expect(createdService.Spec.Ports).Should(Equal(server.Spec.Ports)) 227 | 228 | // Lookup child PVC that should be now present 229 | By("Creating child PVC") 230 | createdPvc := &corev1.PersistentVolumeClaim{} 231 | Eventually(func() bool { 232 | if err := k8sClient.Get(ctx, pvcLookupKey, createdPvc); err != nil { 233 | return false 234 | } 235 | return true 236 | }, timeout, interval).Should(BeTrue()) 237 | // Verify that PVC has values from Server spec (size, name) 238 | By("Populating child PVC's values from Server spec (size, name)") 239 | Expect(createdPvc.Spec.Resources.Requests.Storage().String()).Should(Equal(storage.Size)) 240 | Expect(createdPvc.Name).Should(Equal(PvcName)) 241 | 242 | // Check whether ClaimName in Deployment's volume was correctly assigned 243 | By("Setting setting child Deployment's volume ClaimName") 244 | Expect(createdDeployment.Spec.Template.Spec.Volumes[0].PersistentVolumeClaim.ClaimName).Should(Equal(PvcName)) 245 | }) 246 | }) 247 | }) 248 | 249 | Describe("updating Server spec", func() { 250 | Context("When Server config (using env vars) is successfully updated", func() { 251 | It("Should modify Deployment with new attributes", func() { 252 | ctx := context.Background() 253 | 254 | // Create configMap that will replace Server's existing configMap during update 255 | By("Replacing existing configMap with new one") 256 | newConfigMapName := "csgo-env-config-new" 257 | newConfigMap := &corev1.ConfigMap{ 258 | ObjectMeta: metav1.ObjectMeta{ 259 | Name: newConfigMapName, 260 | Namespace: ServerNamespace, 261 | }, 262 | Data: map[string]string{"SERVER_HOSTNAME": "new-hostname"}, 263 | } 264 | Expect(k8sClient.Create(ctx, newConfigMap)).Should(Succeed()) 265 | 266 | newConfigMapLookupKey := types.NamespacedName{Name: newConfigMapName, Namespace: ServerNamespace} 267 | createdNewConfigMap := &corev1.ConfigMap{} 268 | 269 | Eventually(func() bool { 270 | if err := k8sClient.Get(ctx, newConfigMapLookupKey, createdNewConfigMap); err != nil { 271 | return false 272 | } 273 | return true 274 | }, timeout, interval).Should(BeTrue()) 275 | 276 | // Lookup child Deployment that should be now present 277 | createdDeployment := &appsv1.Deployment{} 278 | Eventually(func() bool { 279 | if err := k8sClient.Get(ctx, deploymentLookupKey, createdDeployment); err != nil { 280 | return false 281 | } 282 | return true 283 | }, timeout, interval).Should(BeTrue()) 284 | 285 | // Lookup child Deployment's generation before Server update 286 | deploymentGeneration := createdDeployment.Generation 287 | 288 | // Update Server with new ConfigMap 289 | createdServer.Spec.Config = gameserverv1alpha1.Config{ 290 | From: []corev1.EnvFromSource{ 291 | {ConfigMapRef: &corev1.ConfigMapEnvSource{ 292 | LocalObjectReference: corev1.LocalObjectReference{ 293 | Name: newConfigMapName, 294 | }, 295 | }}}} 296 | Expect(k8sClient.Update(ctx, createdServer)).Should(Succeed()) 297 | 298 | // Lookup updated Server and verify that generation increased 299 | updatedServer := &gameserverv1alpha1.Server{} 300 | Eventually(func() bool { 301 | if err := k8sClient.Get(ctx, serverLookupKey, updatedServer); err != nil { 302 | return false 303 | } 304 | if updatedServer.Generation < 2 { 305 | return false 306 | } 307 | return true 308 | }, timeout, interval).Should(BeTrue()) 309 | Expect(updatedServer.Generation).Should(Equal(int64(2))) 310 | Expect(createdServer.Spec.Config).Should(Equal(updatedServer.Spec.Config)) 311 | 312 | // Lookup child Deployment and verify that generation increased 313 | updatedDeployment := &appsv1.Deployment{} 314 | Eventually(func() bool { 315 | if err := k8sClient.Get(ctx, deploymentLookupKey, updatedDeployment); err != nil { 316 | return false 317 | } 318 | if updatedDeployment.Generation <= deploymentGeneration { 319 | return false 320 | } 321 | return true 322 | }, timeout, interval).Should(BeTrue()) 323 | 324 | // Verify that child Deployment's container uses new configMap 325 | newCreatedContainer := updatedDeployment.Spec.Template.Spec.Containers[0] 326 | Expect(newCreatedContainer.EnvFrom).Should(Equal([]corev1.EnvFromSource{ 327 | {ConfigMapRef: &corev1.ConfigMapEnvSource{ 328 | LocalObjectReference: corev1.LocalObjectReference{ 329 | Name: newConfigMapName, 330 | }, 331 | }}, 332 | })) 333 | }) 334 | }) 335 | Context("When Server ResourceRequirements are successfully updated", func() { 336 | It("Should modify deployment with new attributes", func() { 337 | 338 | ctx := context.Background() 339 | 340 | // Prepare new resources that will be used in Server update 341 | By("replacing container resource limits and requests with new ones") 342 | newResources := &corev1.ResourceRequirements{ 343 | Requests: corev1.ResourceList{ 344 | corev1.ResourceCPU: resource.MustParse("500m"), 345 | corev1.ResourceMemory: resource.MustParse("128Mi"), 346 | }, 347 | Limits: corev1.ResourceList{ 348 | corev1.ResourceCPU: resource.MustParse("1"), 349 | corev1.ResourceMemory: resource.MustParse("2Gi"), 350 | }, 351 | } 352 | 353 | // Lookup child Deployment that should be now present 354 | createdDeployment := &appsv1.Deployment{} 355 | Eventually(func() bool { 356 | if err := k8sClient.Get(ctx, deploymentLookupKey, createdDeployment); err != nil { 357 | return false 358 | } 359 | return true 360 | }, timeout, interval).Should(BeTrue()) 361 | 362 | // Lookup child Deployment's and Server's generation before Server update 363 | serverGeneration := createdServer.Generation 364 | deploymentGeneration := createdDeployment.Generation 365 | 366 | // Update Server's resources 367 | createdServer.Spec.ResourceRequirements = newResources 368 | Expect(k8sClient.Update(ctx, createdServer)).Should(Succeed()) 369 | 370 | // Lookup updated Server and verify that generation increased 371 | updatedServer := &gameserverv1alpha1.Server{} 372 | Eventually(func() bool { 373 | if err := k8sClient.Get(ctx, serverLookupKey, updatedServer); err != nil { 374 | return false 375 | } 376 | if updatedServer.Generation <= serverGeneration { 377 | return false 378 | } 379 | return true 380 | }, timeout, interval).Should(BeTrue()) 381 | Expect(updatedServer.Generation).Should(Equal(int64(2))) 382 | Expect(createdServer.Spec.Config).Should(Equal(updatedServer.Spec.Config)) 383 | 384 | // Lookup child Deployment and verify that generation increased 385 | updatedDeployment := &appsv1.Deployment{} 386 | Eventually(func() bool { 387 | if err := k8sClient.Get(ctx, deploymentLookupKey, updatedDeployment); err != nil { 388 | return false 389 | } 390 | if updatedDeployment.Generation <= deploymentGeneration { 391 | return false 392 | } 393 | return true 394 | }, timeout, interval).Should(BeTrue()) 395 | 396 | // Verify that child Deployment's container uses new resource limits/requests 397 | newCreatedContainer := updatedDeployment.Spec.Template.Spec.Containers[0] 398 | Expect(&newCreatedContainer.Resources).Should(Equal(newResources)) 399 | }) 400 | }) 401 | Context("When Server NodePort is successfully updated", func() { 402 | It("Should modify Service with new attribute", func() { 403 | ctx := context.Background() 404 | 405 | // Prepare new NodePort value that will be used in Server update 406 | By("replacing it's NodePort with new one") 407 | newNodePort := int32(30030) 408 | 409 | // Lookup child Service that should be now present and save its generation before Server update 410 | createdService := &corev1.Service{} 411 | Eventually(func() bool { 412 | if err := k8sClient.Get(ctx, serviceLookupKey, createdService); err != nil { 413 | return false 414 | } 415 | return true 416 | }, timeout, interval).Should(BeTrue()) 417 | createdServerGeneration := createdServer.Generation 418 | 419 | // Change and update NodePort of Server 420 | createdServer.Spec.Ports[0].NodePort = newNodePort 421 | Expect(k8sClient.Update(ctx, createdServer)).Should(Succeed()) 422 | 423 | // Wait for Server to be updated (Generation increased) 424 | updatedServer := &gameserverv1alpha1.Server{} 425 | Eventually(func() bool { 426 | if err := k8sClient.Get(ctx, serverLookupKey, updatedServer); err != nil { 427 | return false 428 | } 429 | if updatedServer.Generation <= createdServerGeneration { 430 | return false 431 | } 432 | return true 433 | }, timeout, interval).Should(BeTrue()) 434 | 435 | // New generation created 436 | Expect(updatedServer.Generation).Should(Equal(int64(2))) 437 | 438 | // New Generation of Server should have updated NodePort 439 | Expect(createdServer.Spec.Ports[0].NodePort).Should(Equal(updatedServer.Spec.Ports[0].NodePort)) 440 | 441 | // Wait for child Service to be updated (Generation increased) 442 | updatedService := &corev1.Service{} 443 | Eventually(func() bool { 444 | if err := k8sClient.Get(ctx, serviceLookupKey, updatedService); err != nil { 445 | return false 446 | } 447 | if updatedService.Spec.Ports[0].NodePort != newNodePort { 448 | return false 449 | } 450 | return true 451 | }, timeout, interval).Should(BeTrue()) 452 | 453 | // New Generation of Service should have updated NodePort 454 | updatedNodePort := updatedService.Spec.Ports[0].NodePort 455 | Expect(updatedNodePort).Should(Equal(newNodePort)) 456 | }) 457 | }) 458 | }) 459 | }) 460 | -------------------------------------------------------------------------------- /controllers/suite_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021 Martin Heinz. 3 | */ 4 | 5 | package controllers 6 | 7 | import ( 8 | "path/filepath" 9 | ctrl "sigs.k8s.io/controller-runtime" 10 | "testing" 11 | 12 | . "github.com/onsi/ginkgo" 13 | . "github.com/onsi/gomega" 14 | "k8s.io/client-go/kubernetes/scheme" 15 | "k8s.io/client-go/rest" 16 | "sigs.k8s.io/controller-runtime/pkg/client" 17 | "sigs.k8s.io/controller-runtime/pkg/envtest" 18 | "sigs.k8s.io/controller-runtime/pkg/envtest/printer" 19 | logf "sigs.k8s.io/controller-runtime/pkg/log" 20 | "sigs.k8s.io/controller-runtime/pkg/log/zap" 21 | 22 | gameserverv1alpha1 "github.com/MartinHeinz/game-server-operator/api/v1alpha1" 23 | // +kubebuilder:scaffold:imports 24 | ) 25 | 26 | // These tests use Ginkgo (BDD-style Go testing framework). Refer to 27 | // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. 28 | 29 | var cfg *rest.Config 30 | var k8sClient client.Client 31 | var testEnv *envtest.Environment 32 | 33 | func TestAPIs(t *testing.T) { 34 | RegisterFailHandler(Fail) 35 | 36 | RunSpecsWithDefaultAndCustomReporters(t, 37 | "Controller Suite", 38 | []Reporter{printer.NewlineReporter{}}) 39 | } 40 | 41 | var _ = BeforeSuite(func(done Done) { 42 | logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) 43 | 44 | By("bootstrapping test environment") 45 | testEnv = &envtest.Environment{ 46 | CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, 47 | } 48 | 49 | cfg, err := testEnv.Start() 50 | Expect(err).NotTo(HaveOccurred()) 51 | Expect(cfg).NotTo(BeNil()) 52 | 53 | err = gameserverv1alpha1.AddToScheme(scheme.Scheme) 54 | Expect(err).NotTo(HaveOccurred()) 55 | 56 | // +kubebuilder:scaffold:scheme 57 | 58 | k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) 59 | Expect(err).NotTo(HaveOccurred()) 60 | Expect(k8sClient).NotTo(BeNil()) 61 | 62 | k8sManager, err := ctrl.NewManager(cfg, ctrl.Options{ 63 | Scheme: scheme.Scheme, 64 | }) 65 | Expect(err).ToNot(HaveOccurred()) 66 | 67 | err = (&ServerReconciler{ 68 | Client: k8sManager.GetClient(), 69 | Scheme: k8sManager.GetScheme(), 70 | Log: ctrl.Log.WithName("controllers").WithName("Server"), 71 | }).SetupWithManager(k8sManager) 72 | Expect(err).ToNot(HaveOccurred()) 73 | 74 | go func() { 75 | err = k8sManager.Start(ctrl.SetupSignalHandler()) 76 | Expect(err).ToNot(HaveOccurred()) 77 | }() 78 | 79 | k8sClient = k8sManager.GetClient() 80 | Expect(k8sClient).ToNot(BeNil()) 81 | 82 | close(done) 83 | 84 | }, 60) 85 | 86 | var _ = AfterSuite(func() { 87 | By("tearing down the test environment") 88 | err := testEnv.Stop() 89 | Expect(err).NotTo(HaveOccurred()) 90 | }) 91 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/MartinHeinz/game-server-operator 2 | 3 | go 1.15 4 | 5 | require ( 6 | github.com/go-logr/logr v0.3.0 7 | github.com/onsi/ginkgo v1.14.1 8 | github.com/onsi/gomega v1.10.2 9 | k8s.io/api v0.19.2 10 | k8s.io/apimachinery v0.19.2 11 | k8s.io/client-go v0.19.2 12 | sigs.k8s.io/controller-runtime v0.7.0 13 | ) 14 | -------------------------------------------------------------------------------- /hack/boilerplate.go.txt: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021 Martin Heinz. 3 | */ -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021 Martin Heinz. 3 | */ 4 | 5 | package main 6 | 7 | import ( 8 | "flag" 9 | "os" 10 | 11 | // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) 12 | // to ensure that exec-entrypoint and run can make use of them. 13 | _ "k8s.io/client-go/plugin/pkg/client/auth" 14 | 15 | "k8s.io/apimachinery/pkg/runtime" 16 | utilruntime "k8s.io/apimachinery/pkg/util/runtime" 17 | clientgoscheme "k8s.io/client-go/kubernetes/scheme" 18 | ctrl "sigs.k8s.io/controller-runtime" 19 | "sigs.k8s.io/controller-runtime/pkg/healthz" 20 | "sigs.k8s.io/controller-runtime/pkg/log/zap" 21 | 22 | gameserverv1alpha1 "github.com/MartinHeinz/game-server-operator/api/v1alpha1" 23 | "github.com/MartinHeinz/game-server-operator/controllers" 24 | // +kubebuilder:scaffold:imports 25 | ) 26 | 27 | var ( 28 | scheme = runtime.NewScheme() 29 | setupLog = ctrl.Log.WithName("setup") 30 | ) 31 | 32 | func init() { 33 | utilruntime.Must(clientgoscheme.AddToScheme(scheme)) 34 | 35 | utilruntime.Must(gameserverv1alpha1.AddToScheme(scheme)) 36 | // +kubebuilder:scaffold:scheme 37 | } 38 | 39 | func main() { 40 | var metricsAddr string 41 | var enableLeaderElection bool 42 | var probeAddr string 43 | flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.") 44 | flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") 45 | flag.BoolVar(&enableLeaderElection, "leader-elect", false, 46 | "Enable leader election for controller manager. "+ 47 | "Enabling this will ensure there is only one active controller manager.") 48 | opts := zap.Options{ 49 | Development: true, 50 | } 51 | opts.BindFlags(flag.CommandLine) 52 | flag.Parse() 53 | 54 | ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts))) 55 | 56 | mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ 57 | Scheme: scheme, 58 | MetricsBindAddress: metricsAddr, 59 | Port: 9443, 60 | HealthProbeBindAddress: probeAddr, 61 | LeaderElection: enableLeaderElection, 62 | LeaderElectionID: "0fb7271b.martinheinz.dev", 63 | }) 64 | if err != nil { 65 | setupLog.Error(err, "unable to start manager") 66 | os.Exit(1) 67 | } 68 | 69 | if err = (&controllers.ServerReconciler{ 70 | Client: mgr.GetClient(), 71 | Log: ctrl.Log.WithName("controllers").WithName("Server"), 72 | Scheme: mgr.GetScheme(), 73 | }).SetupWithManager(mgr); err != nil { 74 | setupLog.Error(err, "unable to create controller", "controller", "Server") 75 | os.Exit(1) 76 | } 77 | if os.Getenv("ENABLE_WEBHOOKS") != "false" { 78 | if err = (&gameserverv1alpha1.Server{}).SetupWebhookWithManager(mgr); err != nil { 79 | setupLog.Error(err, "unable to create webhook", "webhook", "Server") 80 | os.Exit(1) 81 | } 82 | } 83 | // +kubebuilder:scaffold:builder 84 | 85 | if err := mgr.AddHealthzCheck("health", healthz.Ping); err != nil { 86 | setupLog.Error(err, "unable to set up health check") 87 | os.Exit(1) 88 | } 89 | if err := mgr.AddReadyzCheck("check", healthz.Ping); err != nil { 90 | setupLog.Error(err, "unable to set up ready check") 91 | os.Exit(1) 92 | } 93 | 94 | setupLog.Info("starting manager") 95 | if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { 96 | setupLog.Error(err, "problem running manager") 97 | os.Exit(1) 98 | } 99 | } 100 | --------------------------------------------------------------------------------