├── .dockerignore ├── .github └── workflows │ ├── pr.yaml │ └── push.yaml ├── .gitignore ├── Dockerfile ├── LICENSE ├── Makefile ├── PROJECT ├── api └── v1alpha1 │ ├── groupversion_info.go │ ├── nodescalingwatermark_types.go │ └── zz_generated.deepcopy.go ├── ci.Dockerfile ├── config ├── certmanager │ ├── certificate.yaml │ ├── kustomization.yaml │ └── kustomizeconfig.yaml ├── community-operators │ ├── ci.yaml │ ├── pr-body.txt │ └── pr-first-release-body.txt ├── crd │ ├── bases │ │ └── redhatcop.redhat.io_nodescalingwatermarks.yaml │ ├── kustomization.yaml │ ├── kustomizeconfig.yaml │ └── patches │ │ ├── cainjection_in_nodescalingwatermarks.yaml │ │ └── webhook_in_nodescalingwatermarks.yaml ├── default │ ├── kustomization.yaml │ ├── manager_auth_proxy_patch.yaml │ └── manager_config_patch.yaml ├── helmchart │ ├── .helmignore │ ├── Chart.yaml.tpl │ ├── kustomization.yaml │ ├── templates │ │ ├── _helpers.tpl │ │ ├── manager.yaml │ │ └── service-account.yaml │ └── values.yaml.tpl ├── local-development │ └── kustomization.yaml ├── manager │ ├── controller_manager_config.yaml │ ├── kustomization.yaml │ └── manager.yaml ├── manifests │ ├── bases │ │ └── proactive-node-scaling-operator.clusterserviceversion.yaml │ └── kustomization.yaml ├── operatorhub │ └── operator.yaml ├── prometheus │ ├── kustomization.yaml │ ├── kustomizeconfig.yaml │ └── monitor.yaml ├── rbac │ ├── auth_proxy_client_clusterrole.yaml │ ├── auth_proxy_role.yaml │ ├── auth_proxy_role_binding.yaml │ ├── auth_proxy_service.yaml │ ├── kustomization.yaml │ ├── leader_election_role.yaml │ ├── leader_election_role_binding.yaml │ ├── nodescalingwatermark_editor_role.yaml │ ├── nodescalingwatermark_viewer_role.yaml │ ├── role.yaml │ └── role_binding.yaml ├── samples │ ├── ai-ml-watermark.yaml │ ├── kustomization.yaml │ └── zone-watermark.yaml ├── scorecard │ ├── bases │ │ └── config.yaml │ ├── kustomization.yaml │ └── patches │ │ ├── basic.config.yaml │ │ └── olm.config.yaml └── templates │ └── watermarkDeploymentTemplate.yaml ├── controllers ├── nodescalingwatermark_controller.go └── suite_test.go ├── go.mod ├── go.sum ├── hack └── boilerplate.go.txt ├── main.go ├── media ├── icon.png └── proactive-node-scaling-operator.png ├── readme.md ├── renovate.json └── test ├── ai-ml-watermark.yaml └── zone-watermark.yaml /.dockerignore: -------------------------------------------------------------------------------- 1 | # More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file 2 | # Ignore all files which are not go type 3 | !**/*.go 4 | !**/*.mod 5 | !**/*.sum 6 | -------------------------------------------------------------------------------- /.github/workflows/pr.yaml: -------------------------------------------------------------------------------- 1 | name: pull request 2 | on: 3 | pull_request: 4 | branches: 5 | - master 6 | - main 7 | 8 | jobs: 9 | shared-operator-workflow: 10 | name: shared-operator-workflow 11 | uses: redhat-cop/github-workflows-operators/.github/workflows/pr-operator.yml@111e0405debdca28ead7616868b14bdde2c79d57 # v1.0.6 12 | -------------------------------------------------------------------------------- /.github/workflows/push.yaml: -------------------------------------------------------------------------------- 1 | name: push 2 | on: 3 | push: 4 | branches: 5 | - main 6 | - master 7 | tags: 8 | - v* 9 | 10 | jobs: 11 | shared-operator-workflow: 12 | name: shared-operator-workflow 13 | uses: redhat-cop/github-workflows-operators/.github/workflows/release-operator.yml@111e0405debdca28ead7616868b14bdde2c79d57 # v1.0.6 14 | secrets: 15 | COMMUNITY_OPERATOR_PAT: ${{ secrets.COMMUNITY_OPERATOR_PAT }} 16 | REGISTRY_USERNAME: ${{ secrets.REGISTRY_USERNAME }} 17 | REGISTRY_PASSWORD: ${{ secrets.REGISTRY_PASSWORD }} 18 | with: 19 | PR_ACTOR: "raffaele.spazzoli@gmail.com" 20 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | bin 9 | testbin/* 10 | 11 | # Test binary, build with `go test -c` 12 | *.test 13 | 14 | # Output of the go coverage tool, specifically when used with LiteIDE 15 | *.out 16 | 17 | # Kubernetes Generated files - skip generated files, except for vendored files 18 | 19 | !vendor/**/zz_generated.* 20 | 21 | # editor and IDE paraphernalia 22 | .idea 23 | *.swp 24 | *.swo 25 | *~ 26 | 27 | bundle/ 28 | bundle.Dockerfile 29 | charts/ 30 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Build the manager binary 2 | FROM golang:1.16 as builder 3 | 4 | WORKDIR /workspace 5 | # Copy the Go Modules manifests 6 | COPY go.mod go.mod 7 | COPY go.sum go.sum 8 | # cache deps before building and copying source so that we don't need to re-download as much 9 | # and so that source changes don't invalidate our downloaded layer 10 | RUN go mod download 11 | 12 | # Copy the go source 13 | COPY main.go main.go 14 | COPY api/ api/ 15 | COPY controllers/ controllers/ 16 | 17 | # Build 18 | RUN CGO_ENABLED=0 GOOS=linux go build -a -o manager main.go 19 | 20 | # Use distroless as minimal base image to package the manager binary 21 | # Refer to https://github.com/GoogleContainerTools/distroless for more details 22 | FROM registry.access.redhat.com/ubi8/ubi-minimal 23 | WORKDIR / 24 | COPY --from=builder /workspace/manager . 25 | COPY config/templates /templates 26 | USER 65532:65532 27 | 28 | ENTRYPOINT ["/manager"] 29 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | CHART_REPO_URL ?= http://example.com 2 | HELM_REPO_DEST ?= /tmp/gh-pages 3 | OPERATOR_NAME ?=$(shell basename -z `pwd`) 4 | 5 | # VERSION defines the project version for the bundle. 6 | # Update this value when you upgrade the version of your project. 7 | # To re-generate a bundle for another specific version without changing the standard setup, you can: 8 | # - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2) 9 | # - use environment variables to overwrite this value (e.g export VERSION=0.0.2) 10 | VERSION ?= 0.0.1 11 | 12 | # CHANNELS define the bundle channels used in the bundle. 13 | # Add a new line here if you would like to change its default config. (E.g CHANNELS = "candidate,fast,stable") 14 | # To re-generate a bundle for other specific channels without changing the standard setup, you can: 15 | # - use the CHANNELS as arg of the bundle target (e.g make bundle CHANNELS=candidate,fast,stable) 16 | # - use environment variables to overwrite this value (e.g export CHANNELS="candidate,fast,stable") 17 | ifneq ($(origin CHANNELS), undefined) 18 | BUNDLE_CHANNELS := --channels=$(CHANNELS) 19 | endif 20 | 21 | # DEFAULT_CHANNEL defines the default channel used in the bundle. 22 | # Add a new line here if you would like to change its default config. (E.g DEFAULT_CHANNEL = "stable") 23 | # To re-generate a bundle for any other default channel without changing the default setup, you can: 24 | # - use the DEFAULT_CHANNEL as arg of the bundle target (e.g make bundle DEFAULT_CHANNEL=stable) 25 | # - use environment variables to overwrite this value (e.g export DEFAULT_CHANNEL="stable") 26 | ifneq ($(origin DEFAULT_CHANNEL), undefined) 27 | BUNDLE_DEFAULT_CHANNEL := --default-channel=$(DEFAULT_CHANNEL) 28 | endif 29 | BUNDLE_METADATA_OPTS ?= $(BUNDLE_CHANNELS) $(BUNDLE_DEFAULT_CHANNEL) 30 | 31 | # IMAGE_TAG_BASE defines the docker.io namespace and part of the image name for remote images. 32 | # This variable is used to construct full image tags for bundle and catalog images. 33 | # 34 | # For example, running 'make bundle-build bundle-push catalog-build catalog-push' will build and push both 35 | # example.com/memcached-operator-bundle:$VERSION and example.com/memcached-operator-catalog:$VERSION. 36 | IMAGE_TAG_BASE ?= quay.io/redhat-cop/$(OPERATOR_NAME) 37 | 38 | # BUNDLE_IMG defines the image:tag used for the bundle. 39 | # You can use it as an arg. (E.g make bundle-build BUNDLE_IMG=/:) 40 | BUNDLE_IMG ?= $(IMAGE_TAG_BASE)-bundle:v$(VERSION) 41 | 42 | # Image URL to use all building/pushing image targets 43 | IMG ?= controller:latest 44 | # Produce CRDs that work back to Kubernetes 1.11 (no version conversion) 45 | CRD_OPTIONS ?= "crd:trivialVersions=true,preserveUnknownFields=false" 46 | 47 | # Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) 48 | ifeq (,$(shell go env GOBIN)) 49 | GOBIN=$(shell go env GOPATH)/bin 50 | else 51 | GOBIN=$(shell go env GOBIN) 52 | endif 53 | 54 | # Setting SHELL to bash allows bash commands to be executed by recipes. 55 | # This is a requirement for 'setup-envtest.sh' in the test target. 56 | # Options are set to exit when a recipe line exits non-zero or a piped command fails. 57 | SHELL = /usr/bin/env bash -o pipefail 58 | .SHELLFLAGS = -ec 59 | 60 | all: build 61 | 62 | ##@ General 63 | 64 | # The help target prints out all targets with their descriptions organized 65 | # beneath their categories. The categories are represented by '##@' and the 66 | # target descriptions by '##'. The awk commands is responsible for reading the 67 | # entire set of makefiles included in this invocation, looking for lines of the 68 | # file as xyz: ## something, and then pretty-format the target and help. Then, 69 | # if there's a line with ##@ something, that gets pretty-printed as a category. 70 | # More info on the usage of ANSI control characters for terminal formatting: 71 | # https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters 72 | # More info on the awk command: 73 | # http://linuxcommand.org/lc3_adv_awk.php 74 | 75 | help: ## Display this help. 76 | @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) 77 | 78 | ##@ Development 79 | 80 | manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects. 81 | $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases 82 | 83 | generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations. 84 | $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." 85 | 86 | fmt: ## Run go fmt against code. 87 | go fmt ./... 88 | 89 | vet: ## Run go vet against code. 90 | go vet ./... 91 | 92 | ENVTEST_ASSETS_DIR=$(shell pwd)/testbin 93 | test: manifests generate fmt vet ## Run tests. 94 | mkdir -p ${ENVTEST_ASSETS_DIR} 95 | test -f ${ENVTEST_ASSETS_DIR}/setup-envtest.sh || curl -sSLo ${ENVTEST_ASSETS_DIR}/setup-envtest.sh https://raw.githubusercontent.com/kubernetes-sigs/controller-runtime/v0.8.3/hack/setup-envtest.sh 96 | source ${ENVTEST_ASSETS_DIR}/setup-envtest.sh; fetch_envtest_tools $(ENVTEST_ASSETS_DIR); setup_envtest_env $(ENVTEST_ASSETS_DIR); go test ./... -coverprofile cover.out 97 | 98 | ##@ Build 99 | 100 | build: generate fmt vet ## Build manager binary. 101 | go build -o bin/manager main.go 102 | 103 | run: manifests generate fmt vet ## Run a controller from your host. 104 | go run ./main.go 105 | 106 | docker-build: test ## Build docker image with the manager. 107 | docker build -t ${IMG} . 108 | 109 | docker-push: ## Push docker image with the manager. 110 | docker push ${IMG} 111 | 112 | ##@ Deployment 113 | 114 | install: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config. 115 | $(KUSTOMIZE) build config/crd | kubectl apply -f - 116 | 117 | uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. 118 | $(KUSTOMIZE) build config/crd | kubectl delete -f - 119 | 120 | deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config. 121 | cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} 122 | $(KUSTOMIZE) build config/default | kubectl apply -f - 123 | 124 | undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. 125 | $(KUSTOMIZE) build config/default | kubectl delete -f - 126 | 127 | 128 | CONTROLLER_GEN = $(shell pwd)/bin/controller-gen 129 | controller-gen: ## Download controller-gen locally if necessary. 130 | $(call go-get-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.4.1) 131 | 132 | KUSTOMIZE = $(shell pwd)/bin/kustomize 133 | kustomize: ## Download kustomize locally if necessary. 134 | $(call go-get-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v3@v3.8.7) 135 | 136 | #go-get-tool will 'go get' any package $2 and install it to $1. 137 | PROJECT_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST)))) 138 | define go-get-tool 139 | @[ -f $(1) ] || { \ 140 | set -e ;\ 141 | TMP_DIR=$$(mktemp -d) ;\ 142 | cd $$TMP_DIR ;\ 143 | go mod init tmp ;\ 144 | echo "Downloading $(2)" ;\ 145 | GOBIN=$(PROJECT_DIR)/bin go get $(2) ;\ 146 | rm -rf $$TMP_DIR ;\ 147 | } 148 | endef 149 | 150 | .PHONY: bundle 151 | bundle: manifests kustomize ## Generate bundle manifests and metadata, then validate generated files. 152 | operator-sdk generate kustomize manifests -q 153 | cd config/manager && $(KUSTOMIZE) edit set image controller=$(IMG) 154 | $(KUSTOMIZE) build config/manifests | operator-sdk generate bundle -q --overwrite --version $(VERSION) $(BUNDLE_METADATA_OPTS) 155 | operator-sdk bundle validate ./bundle 156 | 157 | .PHONY: bundle-build 158 | bundle-build: ## Build the bundle image. 159 | docker build -f bundle.Dockerfile -t $(BUNDLE_IMG) . 160 | 161 | .PHONY: bundle-push 162 | bundle-push: ## Push the bundle image. 163 | $(MAKE) docker-push IMG=$(BUNDLE_IMG) 164 | 165 | .PHONY: opm 166 | OPM = ./bin/opm 167 | opm: ## Download opm locally if necessary. 168 | ifeq (,$(wildcard $(OPM))) 169 | ifeq (,$(shell which opm 2>/dev/null)) 170 | @{ \ 171 | set -e ;\ 172 | mkdir -p $(dir $(OPM)) ;\ 173 | OS=$(shell go env GOOS) && ARCH=$(shell go env GOARCH) && \ 174 | curl -sSLo $(OPM) https://github.com/operator-framework/operator-registry/releases/download/v1.15.1/$${OS}-$${ARCH}-opm ;\ 175 | chmod +x $(OPM) ;\ 176 | } 177 | else 178 | OPM = $(shell which opm) 179 | endif 180 | endif 181 | 182 | # A comma-separated list of bundle images (e.g. make catalog-build BUNDLE_IMGS=example.com/operator-bundle:v0.1.0,example.com/operator-bundle:v0.2.0). 183 | # These images MUST exist in a registry and be pull-able. 184 | BUNDLE_IMGS ?= $(BUNDLE_IMG) 185 | 186 | # The image tag given to the resulting catalog image (e.g. make catalog-build CATALOG_IMG=example.com/operator-catalog:v0.2.0). 187 | CATALOG_IMG ?= $(IMAGE_TAG_BASE)-catalog:v$(VERSION) 188 | 189 | # Set CATALOG_BASE_IMG to an existing catalog image tag to add $BUNDLE_IMGS to that image. 190 | ifneq ($(origin CATALOG_BASE_IMG), undefined) 191 | FROM_INDEX_OPT := --from-index $(CATALOG_BASE_IMG) 192 | endif 193 | 194 | # Build a catalog image by adding bundle images to an empty catalog using the operator package manager tool, 'opm'. 195 | # This recipe invokes 'opm' in 'semver' bundle add mode. For more information on add modes, see: 196 | # https://github.com/operator-framework/community-operators/blob/7f1438c/docs/packaging-operator.md#updating-your-existing-operator 197 | .PHONY: catalog-build 198 | catalog-build: opm ## Build a catalog image. 199 | $(OPM) index add --container-tool docker --mode semver --tag $(CATALOG_IMG) --bundles $(BUNDLE_IMGS) $(FROM_INDEX_OPT) 200 | 201 | # Push the catalog image. 202 | .PHONY: catalog-push 203 | catalog-push: ## Push a catalog image. 204 | $(MAKE) docker-push IMG=$(CATALOG_IMG) 205 | 206 | # Generate helm chart 207 | helmchart: kustomize 208 | mkdir -p ./charts/${OPERATOR_NAME}/templates 209 | mkdir -p ./charts/${OPERATOR_NAME}/crds 210 | cp ./config/helmchart/templates/* ./charts/${OPERATOR_NAME}/templates 211 | $(KUSTOMIZE) build ./config/helmchart | sed 's/release-namespace/{{.Release.Namespace}}/' > ./charts/${OPERATOR_NAME}/templates/rbac.yaml 212 | if [ -d "./config/crd" ]; then $(KUSTOMIZE) build ./config/crd > ./charts/${OPERATOR_NAME}/crds/crds.yaml; fi 213 | version=${VERSION} envsubst < ./config/helmchart/Chart.yaml.tpl > ./charts/${OPERATOR_NAME}/Chart.yaml 214 | version=${VERSION} image_repo=$${IMG%:*} envsubst < ./config/helmchart/values.yaml.tpl > ./charts/${OPERATOR_NAME}/values.yaml 215 | sed -i '/^apiVersion: monitoring.coreos.com/i {{ if .Values.enableMonitoring }}' ./charts/${OPERATOR_NAME}/templates/rbac.yaml 216 | echo {{ end }} >> ./charts/${OPERATOR_NAME}/templates/rbac.yaml 217 | helm lint ./charts/${OPERATOR_NAME} 218 | 219 | helmchart-repo: helmchart 220 | mkdir -p ${HELM_REPO_DEST}/${OPERATOR_NAME} 221 | helm package -d ${HELM_REPO_DEST}/${OPERATOR_NAME} ./charts/${OPERATOR_NAME} 222 | helm repo index --url ${CHART_REPO_URL} ${HELM_REPO_DEST} 223 | 224 | helmchart-repo-push: helmchart-repo 225 | git -C ${HELM_REPO_DEST} add . 226 | git -C ${HELM_REPO_DEST} status 227 | git -C ${HELM_REPO_DEST} commit -m "Release ${VERSION}" 228 | git -C ${HELM_REPO_DEST} push origin "gh-pages" -------------------------------------------------------------------------------- /PROJECT: -------------------------------------------------------------------------------- 1 | domain: redhat.io 2 | layout: 3 | - go.kubebuilder.io/v3 4 | projectName: proactive-node-scaling-operator 5 | repo: github.com/redhat-cop/proactive-node-scaling-operator 6 | resources: 7 | - api: 8 | crdVersion: v1 9 | namespaced: true 10 | controller: true 11 | domain: redhat.io 12 | group: redhatcop 13 | kind: NodeScalingWatermark 14 | version: v1alpha1 15 | path: github.com/redhat-cop/proactive-node-scaling-operator/api/v1alpha1 16 | version: "3" 17 | plugins: 18 | manifests.sdk.operatorframework.io/v2: {} 19 | scorecard.sdk.operatorframework.io/v2: {} 20 | -------------------------------------------------------------------------------- /api/v1alpha1/groupversion_info.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021 Red Hat Community of Practice. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Package v1alpha1 contains API Schema definitions for the redhatcop v1alpha1 API group 18 | // +kubebuilder:object:generate=true 19 | // +groupName=redhatcop.redhat.io 20 | package v1alpha1 21 | 22 | import ( 23 | "k8s.io/apimachinery/pkg/runtime/schema" 24 | "sigs.k8s.io/controller-runtime/pkg/scheme" 25 | ) 26 | 27 | var ( 28 | // GroupVersion is group version used to register these objects 29 | GroupVersion = schema.GroupVersion{Group: "redhatcop.redhat.io", Version: "v1alpha1"} 30 | 31 | // SchemeBuilder is used to add go types to the GroupVersionKind scheme 32 | SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} 33 | 34 | // AddToScheme adds the types in this group-version to the given scheme. 35 | AddToScheme = SchemeBuilder.AddToScheme 36 | ) 37 | -------------------------------------------------------------------------------- /api/v1alpha1/nodescalingwatermark_types.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021 Red Hat Community of Practice. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package v1alpha1 18 | 19 | import ( 20 | "github.com/redhat-cop/operator-utils/pkg/util/apis" 21 | corev1 "k8s.io/api/core/v1" 22 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 23 | ) 24 | 25 | // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! 26 | // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. 27 | 28 | // NodeScalingWatermarkSpec defines the desired state of NodeScalingWatermark 29 | type NodeScalingWatermarkSpec struct { 30 | // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster 31 | // Important: Run "make" to regenerate code after modifying this file 32 | 33 | // WatermarkPercentage: percentage of aggregated capacity of the selectd nodes after which the cluster should start scaling 34 | // +kubebuilder:validation:Required 35 | // +kubebuilder:validation:Maximum=100 36 | // +kubebuilder:validation:Minimum=1 37 | WatermarkPercentage int `json:"watermarkPercentage"` 38 | 39 | // NodeSelector for the nodes for which the watermark will be calculated. These nodes should be controlled by an autoscaler. 40 | // +kubebuilder:validation:Optional 41 | // +kubebuilder:default:={node-role.kubernetes.io/worker:""} 42 | NodeSelector map[string]string `json:"nodeSelector"` 43 | 44 | // Tolerations is the tolerations that the pause pod should have. 45 | 46 | // +kubebuilder:validation:Optional 47 | Tolerations []corev1.Toleration `json:"tolerations,omitempty"` 48 | 49 | // PausePodSize is size of the pause pods used to mark the watermark, smaller pods will distributed better but consume slightly more resources. Tuning may be required to find the optimal size. 50 | // +kubebuilder:validation:Optional 51 | // +kubebuilder:default:={memory: "200Mi",cpu: "200m"} 52 | PausePodSize corev1.ResourceList `json:"pausePodSize"` 53 | 54 | // +kubebuilder:validation:Optional 55 | // +kubebuilder:default:="k8s.gcr.io/pause" 56 | PausePodImage string `json:"pausePodImage"` 57 | 58 | // PriorityClassName is the priorityClassName assigned to the pause pods, if not set it will be default to proactive-node-autoscaling-pods 59 | // +kubebuilder:validation:Optional 60 | // +kubebuilder:default:=proactive-node-autoscaling-pods 61 | PriorityClassName string `json:"priorityClassName"` 62 | } 63 | 64 | // NodeScalingWatermarkStatus defines the observed state of NodeScalingWatermark 65 | type NodeScalingWatermarkStatus struct { 66 | // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster 67 | // Important: Run "make" to regenerate code after modifying this file 68 | 69 | // Conditions this is the general status of the main reconciler 70 | // +kubebuilder:validation:Optional 71 | apis.EnforcingReconcileStatus `json:",inline,omitempty"` 72 | } 73 | 74 | func (m *NodeScalingWatermark) GetEnforcingReconcileStatus() apis.EnforcingReconcileStatus { 75 | return m.Status.EnforcingReconcileStatus 76 | } 77 | 78 | func (m *NodeScalingWatermark) SetEnforcingReconcileStatus(reconcileStatus apis.EnforcingReconcileStatus) { 79 | m.Status.EnforcingReconcileStatus = reconcileStatus 80 | } 81 | 82 | // +kubebuilder:object:root=true 83 | // +kubebuilder:subresource:status 84 | 85 | // NodeScalingWatermark is the Schema for the nodescalingwatermarks API 86 | type NodeScalingWatermark struct { 87 | metav1.TypeMeta `json:",inline"` 88 | metav1.ObjectMeta `json:"metadata,omitempty"` 89 | 90 | Spec NodeScalingWatermarkSpec `json:"spec,omitempty"` 91 | Status NodeScalingWatermarkStatus `json:"status,omitempty"` 92 | } 93 | 94 | // +kubebuilder:object:root=true 95 | 96 | // NodeScalingWatermarkList contains a list of NodeScalingWatermark 97 | type NodeScalingWatermarkList struct { 98 | metav1.TypeMeta `json:",inline"` 99 | metav1.ListMeta `json:"metadata,omitempty"` 100 | Items []NodeScalingWatermark `json:"items"` 101 | } 102 | 103 | func init() { 104 | SchemeBuilder.Register(&NodeScalingWatermark{}, &NodeScalingWatermarkList{}) 105 | } 106 | -------------------------------------------------------------------------------- /api/v1alpha1/zz_generated.deepcopy.go: -------------------------------------------------------------------------------- 1 | // +build !ignore_autogenerated 2 | 3 | /* 4 | Copyright 2021 Red Hat Community of Practice. 5 | 6 | Licensed under the Apache License, Version 2.0 (the "License"); 7 | you may not use this file except in compliance with the License. 8 | You may obtain a copy of the License at 9 | 10 | http://www.apache.org/licenses/LICENSE-2.0 11 | 12 | Unless required by applicable law or agreed to in writing, software 13 | distributed under the License is distributed on an "AS IS" BASIS, 14 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | See the License for the specific language governing permissions and 16 | limitations under the License. 17 | */ 18 | 19 | // Code generated by controller-gen. DO NOT EDIT. 20 | 21 | package v1alpha1 22 | 23 | import ( 24 | "k8s.io/api/core/v1" 25 | runtime "k8s.io/apimachinery/pkg/runtime" 26 | ) 27 | 28 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 29 | func (in *NodeScalingWatermark) DeepCopyInto(out *NodeScalingWatermark) { 30 | *out = *in 31 | out.TypeMeta = in.TypeMeta 32 | in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) 33 | in.Spec.DeepCopyInto(&out.Spec) 34 | in.Status.DeepCopyInto(&out.Status) 35 | } 36 | 37 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeScalingWatermark. 38 | func (in *NodeScalingWatermark) DeepCopy() *NodeScalingWatermark { 39 | if in == nil { 40 | return nil 41 | } 42 | out := new(NodeScalingWatermark) 43 | in.DeepCopyInto(out) 44 | return out 45 | } 46 | 47 | // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. 48 | func (in *NodeScalingWatermark) DeepCopyObject() runtime.Object { 49 | if c := in.DeepCopy(); c != nil { 50 | return c 51 | } 52 | return nil 53 | } 54 | 55 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 56 | func (in *NodeScalingWatermarkList) DeepCopyInto(out *NodeScalingWatermarkList) { 57 | *out = *in 58 | out.TypeMeta = in.TypeMeta 59 | in.ListMeta.DeepCopyInto(&out.ListMeta) 60 | if in.Items != nil { 61 | in, out := &in.Items, &out.Items 62 | *out = make([]NodeScalingWatermark, len(*in)) 63 | for i := range *in { 64 | (*in)[i].DeepCopyInto(&(*out)[i]) 65 | } 66 | } 67 | } 68 | 69 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeScalingWatermarkList. 70 | func (in *NodeScalingWatermarkList) DeepCopy() *NodeScalingWatermarkList { 71 | if in == nil { 72 | return nil 73 | } 74 | out := new(NodeScalingWatermarkList) 75 | in.DeepCopyInto(out) 76 | return out 77 | } 78 | 79 | // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. 80 | func (in *NodeScalingWatermarkList) DeepCopyObject() runtime.Object { 81 | if c := in.DeepCopy(); c != nil { 82 | return c 83 | } 84 | return nil 85 | } 86 | 87 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 88 | func (in *NodeScalingWatermarkSpec) DeepCopyInto(out *NodeScalingWatermarkSpec) { 89 | *out = *in 90 | if in.NodeSelector != nil { 91 | in, out := &in.NodeSelector, &out.NodeSelector 92 | *out = make(map[string]string, len(*in)) 93 | for key, val := range *in { 94 | (*out)[key] = val 95 | } 96 | } 97 | if in.Tolerations != nil { 98 | in, out := &in.Tolerations, &out.Tolerations 99 | *out = make([]v1.Toleration, len(*in)) 100 | for i := range *in { 101 | (*in)[i].DeepCopyInto(&(*out)[i]) 102 | } 103 | } 104 | if in.PausePodSize != nil { 105 | in, out := &in.PausePodSize, &out.PausePodSize 106 | *out = make(v1.ResourceList, len(*in)) 107 | for key, val := range *in { 108 | (*out)[key] = val.DeepCopy() 109 | } 110 | } 111 | } 112 | 113 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeScalingWatermarkSpec. 114 | func (in *NodeScalingWatermarkSpec) DeepCopy() *NodeScalingWatermarkSpec { 115 | if in == nil { 116 | return nil 117 | } 118 | out := new(NodeScalingWatermarkSpec) 119 | in.DeepCopyInto(out) 120 | return out 121 | } 122 | 123 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 124 | func (in *NodeScalingWatermarkStatus) DeepCopyInto(out *NodeScalingWatermarkStatus) { 125 | *out = *in 126 | in.EnforcingReconcileStatus.DeepCopyInto(&out.EnforcingReconcileStatus) 127 | } 128 | 129 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeScalingWatermarkStatus. 130 | func (in *NodeScalingWatermarkStatus) DeepCopy() *NodeScalingWatermarkStatus { 131 | if in == nil { 132 | return nil 133 | } 134 | out := new(NodeScalingWatermarkStatus) 135 | in.DeepCopyInto(out) 136 | return out 137 | } 138 | -------------------------------------------------------------------------------- /ci.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM registry.access.redhat.com/ubi8/ubi-minimal 2 | WORKDIR / 3 | COPY config/templates /templates 4 | COPY bin/manager . 5 | USER 65532:65532 6 | 7 | ENTRYPOINT ["/manager"] 8 | -------------------------------------------------------------------------------- /config/certmanager/certificate.yaml: -------------------------------------------------------------------------------- 1 | # The following manifests contain a self-signed issuer CR and a certificate CR. 2 | # More document can be found at https://docs.cert-manager.io 3 | # WARNING: Targets CertManager v1.0. Check https://cert-manager.io/docs/installation/upgrading/ for breaking changes. 4 | apiVersion: cert-manager.io/v1 5 | kind: Issuer 6 | metadata: 7 | name: selfsigned-issuer 8 | namespace: system 9 | spec: 10 | selfSigned: {} 11 | --- 12 | apiVersion: cert-manager.io/v1 13 | kind: Certificate 14 | metadata: 15 | name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml 16 | namespace: system 17 | spec: 18 | # $(SERVICE_NAME) and $(SERVICE_NAMESPACE) will be substituted by kustomize 19 | dnsNames: 20 | - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc 21 | - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc.cluster.local 22 | issuerRef: 23 | kind: Issuer 24 | name: selfsigned-issuer 25 | secretName: webhook-server-cert # this secret will not be prefixed, since it's not managed by kustomize 26 | -------------------------------------------------------------------------------- /config/certmanager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - certificate.yaml 3 | 4 | configurations: 5 | - kustomizeconfig.yaml 6 | -------------------------------------------------------------------------------- /config/certmanager/kustomizeconfig.yaml: -------------------------------------------------------------------------------- 1 | # This configuration is for teaching kustomize how to update name ref and var substitution 2 | nameReference: 3 | - kind: Issuer 4 | group: cert-manager.io 5 | fieldSpecs: 6 | - kind: Certificate 7 | group: cert-manager.io 8 | path: spec/issuerRef/name 9 | 10 | varReference: 11 | - kind: Certificate 12 | group: cert-manager.io 13 | path: spec/commonName 14 | - kind: Certificate 15 | group: cert-manager.io 16 | path: spec/dnsNames 17 | -------------------------------------------------------------------------------- /config/community-operators/ci.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Use `replaces-mode` or `semver-mode`. Once you switch to `semver-mode`, there is no easy way back. 3 | updateGraph: semver-mode 4 | addReviewers: true 5 | reviewers: 6 | - raffaelespazzoli -------------------------------------------------------------------------------- /config/community-operators/pr-body.txt: -------------------------------------------------------------------------------- 1 | ### New Submissions 2 | * [ ] Has you operator [nested directory structure](https://github.com/operator-framework/community-operators/blob/master/docs/contributing.md#create-a-bundle)? 3 | * [ ] Have you selected the Project *Community Operator Submissions* in your PR on the right-hand menu bar? 4 | * [ ] Are you familiar with our [contribution guidelines](https://github.com/operator-framework/community-operators/blob/master/docs/contributing.md)? 5 | * [ ] Have you [packaged and deployed](https://github.com/operator-framework/community-operators/blob/master/docs/testing-operators.md) your Operator for Operator Framework? 6 | * [ ] Have you tested your Operator with all Custom Resource Definitions? 7 | * [ ] Have you tested your Operator in all supported [installation modes](https://github.com/operator-framework/operator-lifecycle-manager/blob/master/doc/design/building-your-csv.md#operator-metadata)? 8 | * [ ] Is your submission [signed](https://github.com/operator-framework/community-operators/blob/master/docs/contributing.md#sign-your-work)? 9 | ### Updates to existing Operators 10 | * [x] Is your new CSV pointing to the previous version with the `replaces` property? 11 | * [x] Is your new CSV referenced in the [appropriate channel](https://github.com/operator-framework/community-operators/blob/master/docs/contributing.md#bundle-format) defined in the `package.yaml` ? 12 | * [ ] Have you tested an update to your Operator when deployed via OLM? 13 | * [x] Is your submission [signed](https://github.com/operator-framework/community-operators/blob/master/docs/contributing.md#sign-your-work)? 14 | ### Your submission should not 15 | * [x] Modify more than one operator 16 | * [x] Modify an Operator you don't own 17 | * [x] Rename an operator - please remove and add with a different name instead 18 | * [x] Submit operators to both `upstream-community-operators` and `community-operators` at once 19 | * [x] Modify any files outside the above mentioned folders 20 | * [x] Contain more than one commit. **Please squash your commits.** 21 | ### Operator Description must contain (in order) 22 | 1. [x] Description about the managed Application and where to find more information 23 | 2. [x] Features and capabilities of your Operator and how to use it 24 | 3. [x] Any manual steps about potential pre-requisites for using your Operator 25 | ### Operator Metadata should contain 26 | * [x] Human readable name and 1-liner description about your Operator 27 | * [x] Valid [category name](https://github.com/operator-framework/community-operators/blob/master/docs/required-fields.md#categories)1 28 | * [x] One of the pre-defined [capability levels](https://github.com/operator-framework/operator-courier/blob/4d1a25d2c8d52f7de6297ec18d8afd6521236aa2/operatorcourier/validate.py#L556)2 29 | * [x] Links to the maintainer, source code and documentation 30 | * [x] Example templates for all Custom Resource Definitions intended to be used 31 | * [x] A quadratic logo 32 | Remember that you can preview your CSV [here](https://operatorhub.io/preview). 33 | -- 34 | 1 If you feel your Operator does not fit any of the pre-defined categories, file a PR against this repo and explain your need 35 | 2 For more information see [here](https://github.com/operator-framework/operator-sdk/blob/master/doc/images/operator-capability-level.svg) 36 | -------------------------------------------------------------------------------- /config/community-operators/pr-first-release-body.txt: -------------------------------------------------------------------------------- 1 | ### New Submissions 2 | * [x] Has you operator [nested directory structure](https://github.com/operator-framework/community-operators/blob/master/docs/contributing.md#create-a-bundle)? 3 | * [x] Have you selected the Project *Community Operator Submissions* in your PR on the right-hand menu bar? 4 | * [x] Are you familiar with our [contribution guidelines](https://github.com/operator-framework/community-operators/blob/master/docs/contributing.md)? 5 | * [x] Have you [packaged and deployed](https://github.com/operator-framework/community-operators/blob/master/docs/testing-operators.md) your Operator for Operator Framework? 6 | * [x] Have you tested your Operator with all Custom Resource Definitions? 7 | * [x] Have you tested your Operator in all supported [installation modes](https://github.com/operator-framework/operator-lifecycle-manager/blob/master/doc/design/building-your-csv.md#operator-metadata)? 8 | * [x] Is your submission [signed](https://github.com/operator-framework/community-operators/blob/master/docs/contributing.md#sign-your-work)? 9 | ### Updates to existing Operators 10 | * [ ] Is your new CSV pointing to the previous version with the `replaces` property? 11 | * [ ] Is your new CSV referenced in the [appropriate channel](https://github.com/operator-framework/community-operators/blob/master/docs/contributing.md#bundle-format) defined in the `package.yaml` ? 12 | * [ ] Have you tested an update to your Operator when deployed via OLM? 13 | * [ ] Is your submission [signed](https://github.com/operator-framework/community-operators/blob/master/docs/contributing.md#sign-your-work)? 14 | ### Your submission should not 15 | * [x] Modify more than one operator 16 | * [x] Modify an Operator you don't own 17 | * [x] Rename an operator - please remove and add with a different name instead 18 | * [x] Submit operators to both `upstream-community-operators` and `community-operators` at once 19 | * [x] Modify any files outside the above mentioned folders 20 | * [x] Contain more than one commit. **Please squash your commits.** 21 | ### Operator Description must contain (in order) 22 | 1. [x] Description about the managed Application and where to find more information 23 | 2. [x] Features and capabilities of your Operator and how to use it 24 | 3. [x] Any manual steps about potential pre-requisites for using your Operator 25 | ### Operator Metadata should contain 26 | * [x] Human readable name and 1-liner description about your Operator 27 | * [x] Valid [category name](https://github.com/operator-framework/community-operators/blob/master/docs/required-fields.md#categories)1 28 | * [x] One of the pre-defined [capability levels](https://github.com/operator-framework/operator-courier/blob/4d1a25d2c8d52f7de6297ec18d8afd6521236aa2/operatorcourier/validate.py#L556)2 29 | * [x] Links to the maintainer, source code and documentation 30 | * [x] Example templates for all Custom Resource Definitions intended to be used 31 | * [x] A quadratic logo 32 | Remember that you can preview your CSV [here](https://operatorhub.io/preview). 33 | -- 34 | 1 If you feel your Operator does not fit any of the pre-defined categories, file a PR against this repo and explain your need 35 | 2 For more information see [here](https://github.com/operator-framework/operator-sdk/blob/master/doc/images/operator-capability-level.svg) 36 | -------------------------------------------------------------------------------- /config/crd/bases/redhatcop.redhat.io_nodescalingwatermarks.yaml: -------------------------------------------------------------------------------- 1 | 2 | --- 3 | apiVersion: apiextensions.k8s.io/v1 4 | kind: CustomResourceDefinition 5 | metadata: 6 | annotations: 7 | controller-gen.kubebuilder.io/version: v0.4.1 8 | creationTimestamp: null 9 | name: nodescalingwatermarks.redhatcop.redhat.io 10 | spec: 11 | group: redhatcop.redhat.io 12 | names: 13 | kind: NodeScalingWatermark 14 | listKind: NodeScalingWatermarkList 15 | plural: nodescalingwatermarks 16 | singular: nodescalingwatermark 17 | scope: Namespaced 18 | versions: 19 | - name: v1alpha1 20 | schema: 21 | openAPIV3Schema: 22 | description: NodeScalingWatermark is the Schema for the nodescalingwatermarks 23 | API 24 | properties: 25 | apiVersion: 26 | description: 'APIVersion defines the versioned schema of this representation 27 | of an object. Servers should convert recognized schemas to the latest 28 | internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' 29 | type: string 30 | kind: 31 | description: 'Kind is a string value representing the REST resource this 32 | object represents. Servers may infer this from the endpoint the client 33 | submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' 34 | type: string 35 | metadata: 36 | type: object 37 | spec: 38 | description: NodeScalingWatermarkSpec defines the desired state of NodeScalingWatermark 39 | properties: 40 | nodeSelector: 41 | additionalProperties: 42 | type: string 43 | default: 44 | node-role.kubernetes.io/worker: "" 45 | description: NodeSelector for the nodes for which the watermark will 46 | be calculated. These nodes should be controlled by an autoscaler. 47 | type: object 48 | pausePodImage: 49 | default: k8s.gcr.io/pause 50 | type: string 51 | pausePodSize: 52 | additionalProperties: 53 | anyOf: 54 | - type: integer 55 | - type: string 56 | pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ 57 | x-kubernetes-int-or-string: true 58 | default: 59 | cpu: 200m 60 | memory: 200Mi 61 | description: PausePodSize is size of the pause pods used to mark the 62 | watermark, smaller pods will distributed better but consume slightly 63 | more resources. Tuning may be required to find the optimal size. 64 | type: object 65 | priorityClassName: 66 | default: proactive-node-autoscaling-pods 67 | description: PriorityClassName is the priorityClassName assigned to 68 | the pause pods, if not set it will be default to proactive-node-autoscaling-pods 69 | type: string 70 | tolerations: 71 | items: 72 | description: The pod this Toleration is attached to tolerates any 73 | taint that matches the triple using the matching 74 | operator . 75 | properties: 76 | effect: 77 | description: Effect indicates the taint effect to match. Empty 78 | means match all taint effects. When specified, allowed values 79 | are NoSchedule, PreferNoSchedule and NoExecute. 80 | type: string 81 | key: 82 | description: Key is the taint key that the toleration applies 83 | to. Empty means match all taint keys. If the key is empty, 84 | operator must be Exists; this combination means to match all 85 | values and all keys. 86 | type: string 87 | operator: 88 | description: Operator represents a key's relationship to the 89 | value. Valid operators are Exists and Equal. Defaults to Equal. 90 | Exists is equivalent to wildcard for value, so that a pod 91 | can tolerate all taints of a particular category. 92 | type: string 93 | tolerationSeconds: 94 | description: TolerationSeconds represents the period of time 95 | the toleration (which must be of effect NoExecute, otherwise 96 | this field is ignored) tolerates the taint. By default, it 97 | is not set, which means tolerate the taint forever (do not 98 | evict). Zero and negative values will be treated as 0 (evict 99 | immediately) by the system. 100 | format: int64 101 | type: integer 102 | value: 103 | description: Value is the taint value the toleration matches 104 | to. If the operator is Exists, the value should be empty, 105 | otherwise just a regular string. 106 | type: string 107 | type: object 108 | type: array 109 | watermarkPercentage: 110 | description: 'WatermarkPercentage: percentage of aggregated capacity 111 | of the selectd nodes after which the cluster should start scaling' 112 | maximum: 100 113 | minimum: 1 114 | type: integer 115 | required: 116 | - watermarkPercentage 117 | type: object 118 | status: 119 | description: NodeScalingWatermarkStatus defines the observed state of 120 | NodeScalingWatermark 121 | properties: 122 | conditions: 123 | description: ReconcileStatus this is the general status of the main 124 | reconciler 125 | items: 126 | description: "Condition contains details for one aspect of the current 127 | state of this API Resource. --- This struct is intended for direct 128 | use as an array at the field path .status.conditions. For example, 129 | type FooStatus struct{ // Represents the observations of a 130 | foo's current state. // Known .status.conditions.type are: 131 | \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type 132 | \ // +patchStrategy=merge // +listType=map // +listMapKey=type 133 | \ Conditions []metav1.Condition `json:\"conditions,omitempty\" 134 | patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` 135 | \n // other fields }" 136 | properties: 137 | lastTransitionTime: 138 | description: lastTransitionTime is the last time the condition 139 | transitioned from one status to another. This should be when 140 | the underlying condition changed. If that is not known, then 141 | using the time when the API field changed is acceptable. 142 | format: date-time 143 | type: string 144 | message: 145 | description: message is a human readable message indicating 146 | details about the transition. This may be an empty string. 147 | maxLength: 32768 148 | type: string 149 | observedGeneration: 150 | description: observedGeneration represents the .metadata.generation 151 | that the condition was set based upon. For instance, if .metadata.generation 152 | is currently 12, but the .status.conditions[x].observedGeneration 153 | is 9, the condition is out of date with respect to the current 154 | state of the instance. 155 | format: int64 156 | minimum: 0 157 | type: integer 158 | reason: 159 | description: reason contains a programmatic identifier indicating 160 | the reason for the condition's last transition. Producers 161 | of specific condition types may define expected values and 162 | meanings for this field, and whether the values are considered 163 | a guaranteed API. The value should be a CamelCase string. 164 | This field may not be empty. 165 | maxLength: 1024 166 | minLength: 1 167 | pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ 168 | type: string 169 | status: 170 | description: status of the condition, one of True, False, Unknown. 171 | enum: 172 | - "True" 173 | - "False" 174 | - Unknown 175 | type: string 176 | type: 177 | description: type of condition in CamelCase or in foo.example.com/CamelCase. 178 | --- Many .condition.type values are consistent across resources 179 | like Available, but because arbitrary conditions can be useful 180 | (see .node.status.conditions), the ability to deconflict is 181 | important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) 182 | maxLength: 316 183 | pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ 184 | type: string 185 | required: 186 | - lastTransitionTime 187 | - message 188 | - reason 189 | - status 190 | - type 191 | type: object 192 | type: array 193 | x-kubernetes-list-map-keys: 194 | - type 195 | x-kubernetes-list-type: map 196 | lockedPatchStatuses: 197 | additionalProperties: 198 | items: 199 | description: "Condition contains details for one aspect of the 200 | current state of this API Resource. --- This struct is intended 201 | for direct use as an array at the field path .status.conditions. 202 | \ For example, type FooStatus struct{ // Represents the 203 | observations of a foo's current state. // Known .status.conditions.type 204 | are: \"Available\", \"Progressing\", and \"Degraded\" // 205 | +patchMergeKey=type // +patchStrategy=merge // +listType=map 206 | \ // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" 207 | patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` 208 | \n // other fields }" 209 | properties: 210 | lastTransitionTime: 211 | description: lastTransitionTime is the last time the condition 212 | transitioned from one status to another. This should be 213 | when the underlying condition changed. If that is not known, 214 | then using the time when the API field changed is acceptable. 215 | format: date-time 216 | type: string 217 | message: 218 | description: message is a human readable message indicating 219 | details about the transition. This may be an empty string. 220 | maxLength: 32768 221 | type: string 222 | observedGeneration: 223 | description: observedGeneration represents the .metadata.generation 224 | that the condition was set based upon. For instance, if 225 | .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration 226 | is 9, the condition is out of date with respect to the current 227 | state of the instance. 228 | format: int64 229 | minimum: 0 230 | type: integer 231 | reason: 232 | description: reason contains a programmatic identifier indicating 233 | the reason for the condition's last transition. Producers 234 | of specific condition types may define expected values and 235 | meanings for this field, and whether the values are considered 236 | a guaranteed API. The value should be a CamelCase string. 237 | This field may not be empty. 238 | maxLength: 1024 239 | minLength: 1 240 | pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ 241 | type: string 242 | status: 243 | description: status of the condition, one of True, False, 244 | Unknown. 245 | enum: 246 | - "True" 247 | - "False" 248 | - Unknown 249 | type: string 250 | type: 251 | description: type of condition in CamelCase or in foo.example.com/CamelCase. 252 | --- Many .condition.type values are consistent across resources 253 | like Available, but because arbitrary conditions can be 254 | useful (see .node.status.conditions), the ability to deconflict 255 | is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) 256 | maxLength: 316 257 | pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ 258 | type: string 259 | required: 260 | - lastTransitionTime 261 | - message 262 | - reason 263 | - status 264 | - type 265 | type: object 266 | type: array 267 | description: LockedResourceStatuses contains the reconcile status 268 | for each of the managed resources 269 | type: object 270 | lockedResourceStatuses: 271 | additionalProperties: 272 | items: 273 | description: "Condition contains details for one aspect of the 274 | current state of this API Resource. --- This struct is intended 275 | for direct use as an array at the field path .status.conditions. 276 | \ For example, type FooStatus struct{ // Represents the 277 | observations of a foo's current state. // Known .status.conditions.type 278 | are: \"Available\", \"Progressing\", and \"Degraded\" // 279 | +patchMergeKey=type // +patchStrategy=merge // +listType=map 280 | \ // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" 281 | patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` 282 | \n // other fields }" 283 | properties: 284 | lastTransitionTime: 285 | description: lastTransitionTime is the last time the condition 286 | transitioned from one status to another. This should be 287 | when the underlying condition changed. If that is not known, 288 | then using the time when the API field changed is acceptable. 289 | format: date-time 290 | type: string 291 | message: 292 | description: message is a human readable message indicating 293 | details about the transition. This may be an empty string. 294 | maxLength: 32768 295 | type: string 296 | observedGeneration: 297 | description: observedGeneration represents the .metadata.generation 298 | that the condition was set based upon. For instance, if 299 | .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration 300 | is 9, the condition is out of date with respect to the current 301 | state of the instance. 302 | format: int64 303 | minimum: 0 304 | type: integer 305 | reason: 306 | description: reason contains a programmatic identifier indicating 307 | the reason for the condition's last transition. Producers 308 | of specific condition types may define expected values and 309 | meanings for this field, and whether the values are considered 310 | a guaranteed API. The value should be a CamelCase string. 311 | This field may not be empty. 312 | maxLength: 1024 313 | minLength: 1 314 | pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ 315 | type: string 316 | status: 317 | description: status of the condition, one of True, False, 318 | Unknown. 319 | enum: 320 | - "True" 321 | - "False" 322 | - Unknown 323 | type: string 324 | type: 325 | description: type of condition in CamelCase or in foo.example.com/CamelCase. 326 | --- Many .condition.type values are consistent across resources 327 | like Available, but because arbitrary conditions can be 328 | useful (see .node.status.conditions), the ability to deconflict 329 | is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) 330 | maxLength: 316 331 | pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ 332 | type: string 333 | required: 334 | - lastTransitionTime 335 | - message 336 | - reason 337 | - status 338 | - type 339 | type: object 340 | type: array 341 | description: LockedResourceStatuses contains the reconcile status 342 | for each of the managed resources 343 | type: object 344 | type: object 345 | type: object 346 | served: true 347 | storage: true 348 | subresources: 349 | status: {} 350 | status: 351 | acceptedNames: 352 | kind: "" 353 | plural: "" 354 | conditions: [] 355 | storedVersions: [] 356 | -------------------------------------------------------------------------------- /config/crd/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # This kustomization.yaml is not intended to be run by itself, 2 | # since it depends on service name and namespace that are out of this kustomize package. 3 | # It should be run by config/default 4 | resources: 5 | - bases/redhatcop.redhat.io_nodescalingwatermarks.yaml 6 | # +kubebuilder:scaffold:crdkustomizeresource 7 | 8 | patchesStrategicMerge: 9 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. 10 | # patches here are for enabling the conversion webhook for each CRD 11 | #- patches/webhook_in_nodescalingwatermarks.yaml 12 | # +kubebuilder:scaffold:crdkustomizewebhookpatch 13 | 14 | # [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix. 15 | # patches here are for enabling the CA injection for each CRD 16 | #- patches/cainjection_in_nodescalingwatermarks.yaml 17 | # +kubebuilder:scaffold:crdkustomizecainjectionpatch 18 | 19 | # the following config is for teaching kustomize how to do kustomization for CRDs. 20 | configurations: 21 | - kustomizeconfig.yaml 22 | -------------------------------------------------------------------------------- /config/crd/kustomizeconfig.yaml: -------------------------------------------------------------------------------- 1 | # This file is for teaching kustomize how to substitute name and namespace reference in CRD 2 | nameReference: 3 | - kind: Service 4 | version: v1 5 | fieldSpecs: 6 | - kind: CustomResourceDefinition 7 | version: v1 8 | group: apiextensions.k8s.io 9 | path: spec/conversion/webhook/clientConfig/service/name 10 | 11 | namespace: 12 | - kind: CustomResourceDefinition 13 | version: v1 14 | group: apiextensions.k8s.io 15 | path: spec/conversion/webhook/clientConfig/service/namespace 16 | create: false 17 | 18 | varReference: 19 | - path: metadata/annotations 20 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_nodescalingwatermarks.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 7 | name: nodescalingwatermarks.redhatcop.redhat.io 8 | -------------------------------------------------------------------------------- /config/crd/patches/webhook_in_nodescalingwatermarks.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables a conversion webhook for the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | name: nodescalingwatermarks.redhatcop.redhat.io 6 | spec: 7 | conversion: 8 | strategy: Webhook 9 | webhook: 10 | clientConfig: 11 | service: 12 | namespace: system 13 | name: webhook-service 14 | path: /convert 15 | -------------------------------------------------------------------------------- /config/default/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # Adds namespace to all resources. 2 | namespace: proactive-node-scaling-operator 3 | 4 | # Value of this field is prepended to the 5 | # names of all resources, e.g. a deployment named 6 | # "wordpress" becomes "alices-wordpress". 7 | # Note that it should also match with the prefix (text before '-') of the namespace 8 | # field above. 9 | namePrefix: proactive-node-scaling-operator- 10 | 11 | # Labels to add to all resources and selectors. 12 | #commonLabels: 13 | # someName: someValue 14 | 15 | bases: 16 | - ../crd 17 | - ../rbac 18 | - ../manager 19 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in 20 | # crd/kustomization.yaml 21 | #- ../webhook 22 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. 23 | #- ../certmanager 24 | # [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. 25 | - ../prometheus 26 | 27 | patchesStrategicMerge: 28 | # Protect the /metrics endpoint by putting it behind auth. 29 | # If you want your controller-manager to expose the /metrics 30 | # endpoint w/o any authn/z, please comment the following line. 31 | - manager_auth_proxy_patch.yaml 32 | 33 | # Mount the controller config file for loading manager configurations 34 | # through a ComponentConfig type 35 | #- manager_config_patch.yaml 36 | 37 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in 38 | # crd/kustomization.yaml 39 | #- manager_webhook_patch.yaml 40 | 41 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 42 | # Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. 43 | # 'CERTMANAGER' needs to be enabled to use ca injection 44 | #- webhookcainjection_patch.yaml 45 | 46 | # the following config is for teaching kustomize how to do var substitution 47 | vars: 48 | - name: METRICS_SERVICE_NAME 49 | objref: 50 | kind: Service 51 | version: v1 52 | name: controller-manager-metrics 53 | - name: METRICS_SERVICE_NAMESPACE 54 | objref: 55 | kind: Service 56 | version: v1 57 | name: controller-manager-metrics 58 | fieldref: 59 | fieldpath: metadata.namespace 60 | 61 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. 62 | #- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR 63 | # objref: 64 | # kind: Certificate 65 | # group: cert-manager.io 66 | # version: v1 67 | # name: serving-cert # this name should match the one in certificate.yaml 68 | # fieldref: 69 | # fieldpath: metadata.namespace 70 | #- name: CERTIFICATE_NAME 71 | # objref: 72 | # kind: Certificate 73 | # group: cert-manager.io 74 | # version: v1 75 | # name: serving-cert # this name should match the one in certificate.yaml 76 | #- name: SERVICE_NAMESPACE # namespace of the service 77 | # objref: 78 | # kind: Service 79 | # version: v1 80 | # name: webhook-service 81 | # fieldref: 82 | # fieldpath: metadata.namespace 83 | #- name: SERVICE_NAME 84 | # objref: 85 | # kind: Service 86 | # version: v1 87 | # name: webhook-service 88 | -------------------------------------------------------------------------------- /config/default/manager_auth_proxy_patch.yaml: -------------------------------------------------------------------------------- 1 | # This patch inject a sidecar container which is a HTTP proxy for the 2 | # controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. 3 | apiVersion: apps/v1 4 | kind: Deployment 5 | metadata: 6 | name: controller-manager 7 | namespace: system 8 | spec: 9 | template: 10 | spec: 11 | containers: 12 | - name: kube-rbac-proxy 13 | image: quay.io/redhat-cop/kube-rbac-proxy:v0.11.0 14 | args: 15 | - "--secure-listen-address=0.0.0.0:8443" 16 | - "--upstream=http://127.0.0.1:8080/" 17 | - "--logtostderr=true" 18 | - "--v=10" 19 | - "--tls-cert-file=/etc/certs/tls/tls.crt" 20 | - "--tls-private-key-file=/etc/certs/tls/tls.key" 21 | volumeMounts: 22 | - mountPath: /etc/certs/tls 23 | name: tls-cert 24 | ports: 25 | - containerPort: 8443 26 | name: https 27 | - name: manager 28 | args: 29 | - "--health-probe-bind-address=:8081" 30 | - "--metrics-bind-address=127.0.0.1:8080" 31 | - "--leader-elect" 32 | volumes: 33 | - name: tls-cert 34 | secret: 35 | defaultMode: 420 36 | secretName: proactive-node-scaling-operator-certs 37 | -------------------------------------------------------------------------------- /config/default/manager_config_patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: controller-manager 5 | namespace: system 6 | spec: 7 | template: 8 | spec: 9 | containers: 10 | - name: manager 11 | args: 12 | - "--config=controller_manager_config.yaml" 13 | volumeMounts: 14 | - name: manager-config 15 | mountPath: /controller_manager_config.yaml 16 | subPath: controller_manager_config.yaml 17 | volumes: 18 | - name: manager-config 19 | configMap: 20 | name: manager-config 21 | -------------------------------------------------------------------------------- /config/helmchart/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /config/helmchart/Chart.yaml.tpl: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | name: proactive-node-scaling-operator 3 | version: ${version} 4 | appVersion: ${version} 5 | description: Helm chart that deploys proactive-node-scaling-operator 6 | keywords: 7 | - volume 8 | - storage 9 | - csi 10 | - expansion 11 | - monitoring 12 | sources: 13 | - https://github.com/redhat-cop/proactive-node-scaling-operator 14 | engine: gotpl -------------------------------------------------------------------------------- /config/helmchart/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # Adds namespace to all resources. 2 | namespace: release-namespace 3 | 4 | # Value of this field is prepended to the 5 | # names of all resources, e.g. a deployment named 6 | # "wordpress" becomes "alices-wordpress". 7 | # Note that it should also match with the prefix (text before '-') of the namespace 8 | # field above. 9 | namePrefix: proactive-node-scaling-operator- 10 | 11 | # Labels to add to all resources and selectors. 12 | #commonLabels: 13 | # someName: someValue 14 | 15 | bases: 16 | - ../rbac 17 | - ../prometheus 18 | 19 | vars: 20 | - name: METRICS_SERVICE_NAME 21 | objref: 22 | kind: Service 23 | version: v1 24 | name: controller-manager-metrics 25 | - name: METRICS_SERVICE_NAMESPACE 26 | objref: 27 | kind: Service 28 | version: v1 29 | name: controller-manager-metrics 30 | fieldref: 31 | fieldpath: metadata.namespace 32 | -------------------------------------------------------------------------------- /config/helmchart/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "proactive-node-scaling-operator.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} 7 | {{- end }} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | If release name contains chart name it will be used as a full name. 13 | */}} 14 | {{- define "proactive-node-scaling-operator.fullname" -}} 15 | {{- if .Values.fullnameOverride }} 16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} 17 | {{- else }} 18 | {{- $name := default .Chart.Name .Values.nameOverride }} 19 | {{- if contains $name .Release.Name }} 20 | {{- .Release.Name | trunc 63 | trimSuffix "-" }} 21 | {{- else }} 22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} 23 | {{- end }} 24 | {{- end }} 25 | {{- end }} 26 | 27 | {{/* 28 | Create chart name and version as used by the chart label. 29 | */}} 30 | {{- define "proactive-node-scaling-operator.chart" -}} 31 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} 32 | {{- end }} 33 | 34 | {{/* 35 | Common labels 36 | */}} 37 | {{- define "proactive-node-scaling-operator.labels" -}} 38 | helm.sh/chart: {{ include "proactive-node-scaling-operator.chart" . }} 39 | {{ include "proactive-node-scaling-operator.selectorLabels" . }} 40 | {{- if .Chart.AppVersion }} 41 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} 42 | {{- end }} 43 | app.kubernetes.io/managed-by: {{ .Release.Service }} 44 | {{- end }} 45 | 46 | {{/* 47 | Selector labels 48 | */}} 49 | {{- define "proactive-node-scaling-operator.selectorLabels" -}} 50 | app.kubernetes.io/name: {{ include "proactive-node-scaling-operator.name" . }} 51 | app.kubernetes.io/instance: {{ .Release.Name }} 52 | {{- end }} 53 | 54 | -------------------------------------------------------------------------------- /config/helmchart/templates/manager.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: {{ include "proactive-node-scaling-operator.fullname" . }} 5 | labels: 6 | {{- include "proactive-node-scaling-operator.labels" . | nindent 4 }} 7 | spec: 8 | selector: 9 | matchLabels: 10 | {{- include "proactive-node-scaling-operator.selectorLabels" . | nindent 6 }} 11 | replicas: {{ .Values.replicaCount }} 12 | template: 13 | metadata: 14 | {{- with .Values.podAnnotations }} 15 | annotations: 16 | {{- toYaml . | nindent 8 }} 17 | {{- end }} 18 | labels: 19 | {{- include "proactive-node-scaling-operator.selectorLabels" . | nindent 8 }} 20 | operator: proactive-node-scaling-operator 21 | spec: 22 | serviceAccountName: proactive-node-scaling-operator-controller-manager 23 | {{- with .Values.imagePullSecrets }} 24 | imagePullSecrets: 25 | {{- toYaml . | nindent 8 }} 26 | {{- end }} 27 | containers: 28 | - args: 29 | - --secure-listen-address=0.0.0.0:8443 30 | - --upstream=http://127.0.0.1:8080/ 31 | - --logtostderr=true 32 | - --tls-cert-file=/etc/certs/tls/tls.crt 33 | - --tls-private-key-file=/etc/certs/tls/tls.key 34 | - --v=10 35 | image: "{{ .Values.kube_rbac_proxy.image.repository }}:{{ .Values.kube_rbac_proxy.image.tag | default "v0.5.0" }}" 36 | name: kube-rbac-proxy 37 | ports: 38 | - containerPort: 8443 39 | name: https 40 | volumeMounts: 41 | - mountPath: /etc/certs/tls 42 | name: tls-cert 43 | imagePullPolicy: {{ .Values.kube_rbac_proxy.image.pullPolicy }} 44 | {{- with .Values.env }} 45 | env: 46 | {{- toYaml . | nindent 8 }} 47 | {{- end }} 48 | resources: 49 | {{- toYaml .Values.kube_rbac_proxy.resources | nindent 10 }} 50 | - command: 51 | - /manager 52 | args: 53 | - --leader-elect 54 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" 55 | imagePullPolicy: {{ .Values.image.pullPolicy }} 56 | name: {{ .Chart.Name }} 57 | resources: 58 | {{- toYaml .Values.resources | nindent 10 }} 59 | livenessProbe: 60 | httpGet: 61 | path: /healthz 62 | port: 8081 63 | initialDelaySeconds: 15 64 | periodSeconds: 20 65 | readinessProbe: 66 | httpGet: 67 | path: /readyz 68 | port: 8081 69 | initialDelaySeconds: 5 70 | periodSeconds: 10 71 | {{- with .Values.nodeSelector }} 72 | nodeSelector: 73 | {{- toYaml . | nindent 8 }} 74 | {{- end }} 75 | {{- with .Values.affinity }} 76 | affinity: 77 | {{- toYaml . | nindent 8 }} 78 | {{- end }} 79 | {{- with .Values.tolerations }} 80 | tolerations: 81 | {{- toYaml . | nindent 8 }} 82 | {{- end }} 83 | volumes: 84 | - name: tls-cert 85 | secret: 86 | defaultMode: 420 87 | secretName: proactive-node-scaling-operator-certs -------------------------------------------------------------------------------- /config/helmchart/templates/service-account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: proactive-node-scaling-operator-controller-manager -------------------------------------------------------------------------------- /config/helmchart/values.yaml.tpl: -------------------------------------------------------------------------------- 1 | # Default values for helm-try. 2 | # This is a YAML-formatted file. 3 | # Declare variables to be passed into your templates. 4 | 5 | replicaCount: 1 6 | 7 | image: 8 | repository: ${image_repo} 9 | pullPolicy: IfNotPresent 10 | # Overrides the image tag whose default is the chart appVersion. 11 | tag: ${version} 12 | 13 | imagePullSecrets: [] 14 | nameOverride: "" 15 | fullnameOverride: "" 16 | env: [] 17 | podAnnotations: {} 18 | 19 | resources: 20 | requests: 21 | cpu: 100m 22 | memory: 20Mi 23 | 24 | nodeSelector: {} 25 | 26 | tolerations: [] 27 | 28 | affinity: {} 29 | 30 | kube_rbac_proxy: 31 | image: 32 | repository: quay.io/redhat-cop/kube-rbac-proxy 33 | pullPolicy: IfNotPresent 34 | tag: v0.11.0 35 | resources: 36 | requests: 37 | cpu: 100m 38 | memory: 20Mi 39 | 40 | enableMonitoring: true 41 | -------------------------------------------------------------------------------- /config/local-development/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # Adds namespace to all resources. 2 | namespace: proactive-node-scaling-operator-local 3 | 4 | # Value of this field is prepended to the 5 | # names of all resources, e.g. a deployment named 6 | # "wordpress" becomes "alices-wordpress". 7 | # Note that it should also match with the prefix (text before '-') of the namespace 8 | # field above. 9 | namePrefix: proactive-node-scaling- 10 | 11 | # Labels to add to all resources and selectors. 12 | #commonLabels: 13 | # someName: someValue 14 | 15 | bases: 16 | - ../rbac -------------------------------------------------------------------------------- /config/manager/controller_manager_config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: controller-runtime.sigs.k8s.io/v1alpha1 2 | kind: ControllerManagerConfig 3 | health: 4 | healthProbeBindAddress: :8081 5 | metrics: 6 | bindAddress: 127.0.0.1:8080 7 | webhook: 8 | port: 9443 9 | leaderElection: 10 | leaderElect: true 11 | resourceName: af18d8c9.redhat.io 12 | -------------------------------------------------------------------------------- /config/manager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - manager.yaml 3 | 4 | generatorOptions: 5 | disableNameSuffixHash: true 6 | 7 | configMapGenerator: 8 | - files: 9 | - controller_manager_config.yaml 10 | name: manager-config 11 | apiVersion: kustomize.config.k8s.io/v1beta1 12 | kind: Kustomization 13 | images: 14 | - name: controller 15 | newName: quay.io/raffaelespazzoli/proactive-node-scaling-operator 16 | newTag: latest 17 | -------------------------------------------------------------------------------- /config/manager/manager.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | labels: 5 | control-plane: controller-manager 6 | openshift.io/cluster-monitoring: "true" 7 | name: system 8 | --- 9 | apiVersion: apps/v1 10 | kind: Deployment 11 | metadata: 12 | name: controller-manager 13 | namespace: system 14 | labels: 15 | operator: proactive-node-scaling-operator 16 | spec: 17 | selector: 18 | matchLabels: 19 | operator: proactive-node-scaling-operator 20 | replicas: 1 21 | template: 22 | metadata: 23 | labels: 24 | operator: proactive-node-scaling-operator 25 | spec: 26 | serviceAccountName: controller-manager 27 | containers: 28 | - command: 29 | - /manager 30 | args: 31 | - --leader-elect 32 | image: controller:latest 33 | name: manager 34 | securityContext: 35 | allowPrivilegeEscalation: false 36 | livenessProbe: 37 | httpGet: 38 | path: /healthz 39 | port: 8081 40 | initialDelaySeconds: 15 41 | periodSeconds: 20 42 | readinessProbe: 43 | httpGet: 44 | path: /readyz 45 | port: 8081 46 | initialDelaySeconds: 5 47 | periodSeconds: 10 48 | resources: 49 | requests: 50 | cpu: 100m 51 | memory: 20Mi 52 | terminationGracePeriodSeconds: 10 53 | -------------------------------------------------------------------------------- /config/manifests/bases/proactive-node-scaling-operator.clusterserviceversion.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: operators.coreos.com/v1alpha1 2 | kind: ClusterServiceVersion 3 | metadata: 4 | labels: 5 | operatorframework.io/os.linux: supported 6 | operatorframework.io/arch.amd64: supported 7 | operatorframework.io/arch.arm64: supported 8 | operatorframework.io/arch.ppc64le: supported 9 | annotations: 10 | alm-examples: '[]' 11 | capabilities: Basic Install 12 | categories: AI/Machine Learning 13 | certified: "false" 14 | containerImage: quay.io/redhat-cop/proactive-node-scaling-operator 15 | createdAt: 2/2/2021 16 | description: This operator provides automation proactively scaling nodes before 17 | pods get stuck waiting for the cluster autoscaler. 18 | operatorframework.io/suggested-namespace: proactive-node-scaling-operator 19 | repository: https://github.com/redhat-cop/proactive-node-scaling-operator 20 | support: Best Effort 21 | operators.openshift.io/infrastructure-features: '["Disconnected"]' 22 | name: proactive-node-scaling-operator.v0.0.0 23 | namespace: placeholder 24 | spec: 25 | apiservicedefinitions: {} 26 | customresourcedefinitions: 27 | owned: 28 | - description: NodeScalingWatermark is the Schema for the nodescalingwatermarks 29 | API 30 | displayName: Node Scaling Watermark 31 | kind: NodeScalingWatermark 32 | name: nodescalingwatermarks.redhatcop.redhat.io 33 | version: v1alpha1 34 | description: | 35 | This operator makes the [cluster autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler) more proactive. As of now the cluster auto scaler will create new nodes only when a pod is pending because it cannot be allocated due to lack of capacity. This is not a good user experience as the pending workload has to wait for several minutes as the new node is create and joins the cluster. 36 | 37 | The Proactive Node Scaling Operator improves the user experience by allocating low priority pods that don't do anything. When the cluster is full and a new user pod is created the following happens: 38 | 39 | 1. some of the low priority pods are de-scheduled to make room for the user pod, which can then be scheduled. The user workload does not have to wait in this case. 40 | 41 | 2. the de-scheduled low priority pods are rescheduled and in doing so the trigger the cluster autoscaler to add new nodes. 42 | 43 | Essentially this operator allows you to trade wasted resources for faster response time. 44 | 45 | In order for this operator to work correctly [pod priorities](https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/) must be defined. The default name for the priority class used by this operator is "proactive-node-autoscaling-pods" and it should have the lowest possible priority, 0. To ensure your regular workloads get a normal priority you should also define a PriorityClass for those with a higher priority than 0 and set globalDefault to true. 46 | 47 | For example: 48 | 49 | ```yaml 50 | apiVersion: scheduling.k8s.io/v1 51 | kind: PriorityClass 52 | metadata: 53 | name: proactive-node-autoscaling-pods 54 | value: 0 55 | globalDefault: false 56 | description: "This priority class is the priority class used for Proactive Node Scaling Pods." 57 | --- 58 | apiVersion: scheduling.k8s.io/v1 59 | kind: PriorityClass 60 | metadata: 61 | name: normal-workload 62 | value: 1000 63 | globalDefault: true 64 | description: "This priority classis the cluster default and should be used for normal workloads." 65 | ``` 66 | 67 | Also for this operator to work the [cluster autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler) must be active, see OpenShift instructions [here](https://docs.openshift.com/container-platform/4.6/machine_management/applying-autoscaling.html) on how to turn it on. 68 | 69 | To activate the proactive autoscaling, a CR must be defined, here is an example: 70 | 71 | ```yaml 72 | apiVersion: redhatcop.redhat.io/v1alpha1 73 | kind: NodeScalingWatermark 74 | metadata: 75 | name: us-west-2a 76 | spec: 77 | priorityClassName: proactive-node-autoscaling-pods 78 | watermarkPercentage: 20 79 | nodeSelector: 80 | topology.kubernetes.io/zone: us-west-2a 81 | ``` 82 | 83 | The `nodeSelector` selects the nodes observed by this operator, which are also the nodes on which the low priority pods will be scheduled. The nodes observed by the cluster autoscaler should coincide with the nodes selected by this operator CR. 84 | 85 | The `watermarkPercentage` define the percentage of capacity of user workload that will be allocated to low priority pods. So in this example 20% of the user allocated capacity will be allocated via low priority pods. This also means that when the user workload reaches 80% capacity of the nodes selected by this CR (and the autoscaler), the cluster will start to scale. 86 | displayName: Proactive Node Scaling Operator 87 | icon: 88 | - base64data: /9j/4AAQSkZJRgABAQAAAQABAAD/4QAYRXhpZgAASUkqAAgAAAAAAAAAAAAAAP/hAytodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvADw/eHBhY2tldCBiZWdpbj0i77u/IiBpZD0iVzVNME1wQ2VoaUh6cmVTek5UY3prYzlkIj8+IDx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IkFkb2JlIFhNUCBDb3JlIDUuMy1jMDExIDY2LjE0NTY2MSwgMjAxMi8wMi8wNi0xNDo1NjoyNyAgICAgICAgIj4gPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4gPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIgeG1sbnM6eG1wPSJodHRwOi8vbnMuYWRvYmUuY29tL3hhcC8xLjAvIiB4bWxuczp4bXBNTT0iaHR0cDovL25zLmFkb2JlLmNvbS94YXAvMS4wL21tLyIgeG1sbnM6c3RSZWY9Imh0dHA6Ly9ucy5hZG9iZS5jb20veGFwLzEuMC9zVHlwZS9SZXNvdXJjZVJlZiMiIHhtcDpDcmVhdG9yVG9vbD0iQWRvYmUgUGhvdG9zaG9wIENTNiAoV2luZG93cykiIHhtcE1NOkluc3RhbmNlSUQ9InhtcC5paWQ6Nzg4QTJBMjVEMDI1MTFFN0EwQUVDODc5QjYyQkFCMUQiIHhtcE1NOkRvY3VtZW50SUQ9InhtcC5kaWQ6Nzg4QTJBMjZEMDI1MTFFN0EwQUVDODc5QjYyQkFCMUQiPiA8eG1wTU06RGVyaXZlZEZyb20gc3RSZWY6aW5zdGFuY2VJRD0ieG1wLmlpZDo3ODhBMkEyM0QwMjUxMUU3QTBBRUM4NzlCNjJCQUIxRCIgc3RSZWY6ZG9jdW1lbnRJRD0ieG1wLmRpZDo3ODhBMkEyNEQwMjUxMUU3QTBBRUM4NzlCNjJCQUIxRCIvPiA8L3JkZjpEZXNjcmlwdGlvbj4gPC9yZGY6UkRGPiA8L3g6eG1wbWV0YT4gPD94cGFja2V0IGVuZD0iciI/Pv/bAEMAAwICAgICAwICAgMDAwMEBgQEBAQECAYGBQYJCAoKCQgJCQoMDwwKCw4LCQkNEQ0ODxAQERAKDBITEhATDxAQEP/AAAsIA4QDhAEBEQD/xAAeAAEBAQEBAAMBAQEAAAAAAAAABQYJCAMECgcCAf/EAFkQAQAAAAkGCAkHCAgFBAIDAAABBRVEZIKiweECAwQHERIGCAkTNVFjkRQZITE4V4Wl00FhaHGmtLUiJXOBssTR5CMoQkNGk5XDF1JUVaEWMmJyJLMzo7H/2gAIAQEAAD8A6piRH03rXJD/ABns9mdHzWXn9IzuRms1m8mHKy8vLyoMnJyYIPPDDDD5oHnPX3yoXFk1JaRpMRxZHef4cx/o+3JytCiCHJzmYzeXB/Zzmkww83B8+7vQwdTwZrS5Y/jH8LM9n9G1dRJwe4FaBl7YM3lZOY8O0uDJ+fOZ38jb9WbgeYOGnG04y+sKHL/9Xa7+F+m5GXDDDDmsmMs5mc1Bt8+zIzcOTkwfqgfzTTY/j2MoYYYxjrT9K2+fntJy8vb3wvogAAAAAAAAAAAAAAAAAAAAAAA+/oPCGP4shghi2PIw0Td83MaTl5vZ3QwP6RwT42HGU4Ec1k8Gtd3C/Rs3mYduRmc5Gecz+agqZ2HKyYe56Q1Z8rpxheC2ezGj6wYmiDhloORsgzmVlZjwLSocn5s5mvyNv15EL3HqM5Szi3a5dI0eJYyjnP8AAqPc/sycnQ49hyc3mc5lw/2cjSIIebh+be3YYep7bzOezOk5nI0jR87kZ3NZzJgysjLyMqDKycrJh80MEMHkhgf7SI+m9a5IGuEiPpvWuSBrhIj6b1rkga4SI+m9a5IGuEiPpvWuSBrgBIj6b1rn8E4yXGs1UcWHgxDHPDuNYM/GmkZGVDF0SaLlQZWmablQdWT/AGMjb58vK2QQfPD5HHjjMcfDXdxktKz8WxhGuVwc4Jw5UPMxDFmdysjNZWT8kOfznkys9lfXsyerJgecAAAAAAAAAAAAAAAAAAAAAAAAAAB6a4rnKA69eLFpeYiyLo3yuEvBCDKg57g/GudysvNZGT8sOj5zy5WYyvq25PXkwu0fFj43GqLjU8FYI84ARvBmI10bIyYYziLS8vJydN0HKh68n+3kbfNnMnbkw/ND5H9Uj6b1rkga4SI+m9a5IGuEiPpvWuSBrhIj6b1rkga4SI+m9a5IGuBIl6h28CXqHbweV+PNx5uCvFk4K5uLYtzOYjTh7GmayoYriuHL3sjMZEPk8J0jZshgyIIYPJk+fLhg2eSCCGGDhxrB1h8MtafC2MOHPD2PtJjeOYzzkOcz+k5/K2w/Nk5MHmyciCDyQZMGyCCDzM6AAAAAAAAAAAAAAAAAAAAAAAAAAA0urrWPw11TcMIu4eavuEGlRNHcV52DO6PpOYytn15GVB5svIhg8kOTDtghg87uXxJeO1wY44XBiGKY2ydGiLh9EWYycqM4sycrbkaXkeSCHSdHghh2w5EMMEG9k+WHIhhg88EMEL1DINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gS9Q7eCQ/jfGs4yXBjiw6qNO4dxzDm9IjTPwZWixJF0OVsytN0yGD8mDr3Mn/wB2XD8kEHXDA4F6w9YPC3WnwyjTh7w5jjPRnHMb5/Kz+k5/OQ/LD5snJg/s5GTBsggyYPJBBBBAzoAAAAAAAAAAAAAAAAAAAAAAAAAAANLq41i8MNU3DWKtYPAOOs9FcdxNn8nP6NpGah6vPkZUHmysjKg2wZWTD5IYIYYH6GOKPxnOCvGp1RaBw/iSHN6NGuYgydEj2LIMrblaDpsGT+Vk9e5lf+7Ih+XJh64IX9rZEV4hnFW9XGRFeIZxVvVxkRXiGcVb1cZEV4hnFW9XGRB/jPZ7NaPmcvSM/nMnN5rNZMOXl5eVDsgycmCDbDDDD8kGxwX4+HGY0rjJa7tOjCLdKy4eCfBzKy4siHM7fycrNZOV/SaRs/5s5lQbf/rBkQfI84AAAAAAAAAAAAAAAAAAAAAAAAAAAAD01yf3Gj0vixa9dAjGM9Ly4OCHCXKzcVcIMzt/JyM1lZX9HpMEH/NmsqHe/wDrDlwfK/QXmc9mdJzOb0jR87k5zNZ3Jgy8jLyYduTlZMMG2CGCH5YIYGUFeIZxVvVxkRXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkQecOVC196RqS4skZRZEem+Dx/wAOc7IGhZWTlbMvN5jLyYYdJzmT9WbghydvyQ5yBwUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAdyuTT156Rrl4t0XRbHWmc/HvArOyFpmVlZW3LzmZyMmCHR85D9ebhgydvyw5uF7eEiPpvWuSBrhIj6b1rkga4SI+m9a5IGuEiPpvWuSBrgcT+WP1pZ7hZxj4t1daNpcOXoHAqJs1k5WbgyvyYNL0n+lzkP183zMH6ngcAAAAAAAAAAAAAAAAAAAAAAAAAAAAB7n5IvWZnuC3GFjHV9pGlbmg8MoozuTk5uGH8mHStG/pc3D9e5z0H63cESI+m9a5IGuEiPpvWuSBrhIj6b1rkga4SI+m9a5IGuB+bDjacNIdYXGX1lcLuchy8jTeEemZOahhh27M1m85Dm8iDb82TkZMD+TAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP6zxTOFMPAvjL6tOEUOfhzObzHCXQs1ncuCHzZrO5yDNZffk5cMD9HEvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4EvUO3g/L5H+mwxlHsYxjDlbfCtLz2e29e9lww3vogAAAAAAAAAAAAAAAAAAAAAAAAAAAAK/A/TJO4WxJGEGVu+Cxjo2e29W7ncmG5+l/RcvndGzOc/583k5XfA+RXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkRXiGcVb1cZEH5hQAAAAAAAAAAAAAAAAAAAAAAAAAAAAB9mLOktE/T5v9qB+mmLuj9F/Q5H7MD7CvEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIPzCgAAAAAAAAAAAAAAAAAAAAAAAAAAAAD7EXdIaL+myP2oH6l4s6N0T9Bm/wBmB9lIj6b1rkga4SI+m9a5IGuEiPpvWuSBrhIj6b1rkga4H5VwAAB1+4h3EO4rOuXis8DdY2sbVpKvCCNfDvC9LlTTMzznN6Zns3kfkZvO5OTBsycjJg8kHyP7dG/JkcSnRuZ5nU/u7+3b+etPh6u2T/Fp8TH1Re+dP+MeLT4mPqi986f8ZovFe8SL1N++4w+MeK94kXqb99xh8ZNjfkyOJTo3M8zqf3d/bt/PWnw9XbJ/i0+Jj6ovfOn/ABjxafEx9UXvnT/jNF4r3iRepv33GHxjxXvEi9TfvuMPjJsb8mRxKdG5nmdT+7v7dv560+Hq7ZP8WnxMfVF750/4x4tPiY+qL3zp/wAZovFe8SL1N++4w+MeK94kXqb99xh8ZNjfkyOJTo3M8zqf3d/bt/PWnw9XbJ/i0+Jj6ovfOn/GPFp8TH1Re+dP+M0XiveJF6m/fcYfGPFe8SL1N++4w+MmxvyZHEp0bmeZ1P7u/t2/nrT4ertk/wAWnxMfVF750/4x4tPiY+qL3zp/xmi8V7xIvU377jD4x4r3iRepv33GHxk2N+TI4lOjczzOp/d39u389afD1dsn+LT4mPqi986f8Y8WnxMfVF750/4zReK94kXqb99xh8Y8V7xIvU377jD4ybG/JkcSnRuZ5nU/u7+3b+etPh6u2T/Fp8TH1Re+dP8AjHi0+Jj6ovfOn/GaLxXvEi9TfvuMPjHiveJF6m/fcYfGTY35MjiU6NzPM6n93f27fz1p8PV2yf4tPiY+qL3zp/xjxafEx9UXvnT/AIzReK94kXqb99xh8Y8V7xIvU377jD4ybG/JkcSnRuZ5nU/u7+3b+etPh6u2T/Fp8TH1Re+dP+MeLT4mPqi986f8ZovFe8SL1N++4w+MeK94kXqb99xh8ZNjfkyOJTo3M8zqf3d/bt/PWnw9XbJ/i0+Jj6ovfOn/ABjxafEx9UXvnT/jNF4r3iRepv33GHxjxXvEi9TfvuMPjJsb8mRxKdG5nmdT+7v7dv560+Hq7ZP8WnxMfVF750/4x4tPiY+qL3zp/wAZovFe8SL1N++4w+MeK94kXqb99xh8ZNjfkyOJTo3M8zqf3d/bt/PWnw9XbJ/i0+Jj6ovfOn/GPFp8TH1Re+dP+M0XiveJF6m/fcYfGPFe8SL1N++4w+MmxvyZHEp0bmeZ1P7u/t2/nrT4ertk/wAWnxMfVF750/4x4tPiY+qL3zp/xmi8V7xIvU377jD4x4r3iRepv33GHxk2N+TI4lOjczzOp/d39u389afD1ds88ceHiPcWTVBxZOF+sHV9q5kuPor8C8F0qU9LzvN85peazeV+TnM7lZMO3Jy8qDywfK5NAAAPsRd0hov6bI/agfqXizo3RP0Gb/ZgfZSI+m9a5IGuEiPpvWuSBrhIj6b1rkga4SI+m9a5IGuB+VcAAAd7OTIjfwbiU6v8z4Nv7so+Xe2T/P8AzPUvTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293U8tcpvFHg3Ep1gZ7wnf3ZO8m7sn+Y+dwTAAAfYi3yxjosHbZH7UD9QEWx9si7RYPA/7nI/t/wDxg+Z9mXqHbwOm+x5mtvbe7qJBpljEkGmWMSXqHbwJeodvA6b7Hma29t7uokGmWMSQaZYxJeodvAl6h28DpvseZrb23u6iQaZYxJBpljEl6h28CXqHbwOm+x5mtvbe7qJBpljEkGmWMSXqHbwJeodvAl6h28H5bgAAB3c5NP0MeAPtH7/n3rWIZxVvVxkRXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkRXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkRXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkRXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkRXiGcVb3mzlQvQi1hezfxDR3AEAAB9mLOktE/T5v9qB+mmLuj9F/Q5H7MD7CvEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIPzCgAADu5yafoY8AfaP3/PvWsQzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3vNnKhehFrC9m/iGjuAIAAD7MWdJaJ+nzf7UD9NMXdH6L+hyP2YH2FeIZxVvVxkRXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkQfmFAAAHf7kvfQi1e+0vxDSHpOPpvWuSBrhIj6b1rkga4SI+m9a5IGuEiPpvWuSBrhIj6b1rkga4SI+m9a5IGuEiPpvWuSBrhIj6b1rkga4SI+m9a5IGuEiPpvWuSBrhIj6b1rkga4SI+m9a5IGuEiPpvWuSBrhIj6b1rnkrlLPQx4fezvv+YcIwAAH2Iu6Q0X9NkftQP1LxZ0bon6DN/swPspEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXA/KuAAAO/3Je+hFq99pfiGkPScfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXPJXKWehjw+9nff8AMOEYAAD7EXdIaL+myP2oH6l4s6N0T9Bm/wBmB9lIj6b1rkga4SI+m9a5IGuEiPpvWuSBrhIj6b1rkga4H5VwAAB3s5MiN/BuJTq/zPg2/uyj5d7ZP8/8z1L032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1PLXKbxR4NxKdYGe8J392TvJu7J/mPncEwAAH2It8sY6LB22R+1A/UBFsfbIu0WDwP8Aucj+3/8AGD5n2ZeodvA6b7Hma29t7uokGmWMSQaZYxJeodvAl6h28DpvseZrb23u6iQaZYxJBpljEl6h28CXqHbwOm+x5mtvbe7qJBpljEkGmWMSXqHbwJeodvA6b7Hma29t7uokGmWMSQaZYxJeodvAl6h28CXqHbwfluAAAHdzk0/Qx4A+0fv+fetYhnFW9XGRFeIZxVvVxkRXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkRXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkRXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkRXiGcVb1cZEV4hnFW9XGRFeIZxVvebOVC9CLWF7N/ENHcAQAAH2Ys6S0T9Pm/2oH6aYu6P0X9DkfswPsK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIg/MKAAAO7nJp+hjwB9o/f8+9axDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8Qzire82cqF6EWsL2b+IaO4AgAAPsxZ0lon6fN/tQP00xd0fov6HI/ZgfYV4hnFW9XGRFeIZxVvVxkRXiGcVb1cZEV4hnFW9XGRB+YUAAAd/uS99CLV77S/ENIek4+m9a5IGuEiPpvWuSBrhIj6b1rkga4SI+m9a5IGuEiPpvWuSBrhIj6b1rkga4SI+m9a5IGuEiPpvWuSBrhIj6b1rkga4SI+m9a5IGuEiPpvWuSBrhIj6b1rkga4SI+m9a5IGuEiPpvWueSuUs9DHh97O+/wCYcIwAAH2Iu6Q0X9NkftQP1LxZ0bon6DN/swPspEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXA/KuAAAO/3Je+hFq99pfiGkPScfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXPJXKWehjw+9nff8w4RgAAPsRd0hov6bI/agfqXizo3RP0Gb/ZgfZSI+m9a5IGuEiPpvWuSBrhIj6b1rkga4SI+m9a5IGuB+VcAAAd7OTIjfwbiU6v8z4Nv7so+Xe2T/P8AzPUvTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293U8tcpvFHg3Ep1gZ7wnf3ZO8m7sn+Y+dwTAAAfYi3yxjosHbZH7UD9QEWx9si7RYPA/7nI/t/wDxg+Z9mXqHbwOm+x5mtvbe7qJBpljEkGmWMSXqHbwJeodvA6b7Hma29t7uokGmWMSQaZYxJeodvAl6h28DpvseZrb23u6iQaZYxJBpljEl6h28CXqHbwOm+x5mtvbe7qJBpljEkGmWMSXqHbwJeodvAl6h28H5bgAAB3c5NP0MeAPtH7/n3rWIZxVvVxkRXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkRXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkRXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkRXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkRXiGcVb3mzlQvQi1hezfxDR3AEAAB9mLOktE/T5v9qB+mmLuj9F/Q5H7MD7CvEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIPzCgAADu5yafoY8AfaP3/PvWsQzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3vNnKhehFrC9m/iGjuAIAAD7MWdJaJ+nzf7UD9NMXdH6L+hyP2YH2FeIZxVvVxkRXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkQfmFAAAHf7kvfQi1e+0vxDSHpOPpvWuSBrhIj6b1rkga4SI+m9a5IGuEiPpvWuSBrhIj6b1rkga4SI+m9a5IGuEiPpvWuSBrhIj6b1rkga4SI+m9a5IGuEiPpvWuSBrhIj6b1rkga4SI+m9a5IGuEiPpvWuSBrhIj6b1rnkrlLPQx4fezvv+YcIwAAH2Iu6Q0X9NkftQP1LxZ0bon6DN/swPspEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXA/KuAAAO/3Je+hFq99pfiGkPScfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXPJXKWehjw+9nff8AMOEYAAD7EXdIaL+myP2oH6l4s6N0T9Bm/wBmB9lIj6b1rkga4SI+m9a5IGuEiPpvWuSBrhIj6b1rkga4H5VwAAB3s5MiN/BuJTq/zPg2/uyj5d7ZP8/8z1L032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1PLXKbxR4NxKdYGe8J392TvJu7J/mPncEwAAH2It8sY6LB22R+1A/UBFsfbIu0WDwP8Aucj+3/8AGD5n2ZeodvA6b7Hma29t7uokGmWMSQaZYxJeodvAl6h28DpvseZrb23u6iQaZYxJBpljEl6h28CXqHbwOm+x5mtvbe7qJBpljEkGmWMSXqHbwJeodvA6b7Hma29t7uokGmWMSQaZYxJeodvAl6h28CXqHbwfluAAAHdzk0/Qx4A+0fv+fetYhnFW9XGRFeIZxVvVxkRXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkRXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkRXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkRXiGcVb1cZEV4hnFW9XGRFeIZxVvebOVC9CLWF7N/ENHcAQAAH2Ys6S0T9Pm/2oH6aYu6P0X9DkfswPsK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIg/MKAAAO7nJp+hjwB9o/f8+9axDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8Qzirel60dV3AjXNwIjHVzrGiaVeD8a814XonP5zM85zecyc5kfl5vKycqDZlZGTD5IfkfwDxXvEi9TfvuMPjM74tPiY+qL3zp/xjxafEx9UXvnT/AIyhFHJkcSnSee57U/vbmzZ+etPg6+2UvFe8SL1N++4w+MeK94kXqb99xh8ZnfFp8TH1Re+dP+MeLT4mPqi986f8ZQijkyOJTpPPc9qf3tzZs/PWnwdfbKXiveJF6m/fcYfGf6zXJhcSXM5zIzub1O7MrIyoMrJhlvT/ACQweb++f3jN5vIzWbyc1m4NmTkZMGTkwdUED/SvEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIPzCgAADv9yXvoRavfaX4hpD0nH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcD8q4AAA7/cl76EWr32l+IaQ9Jx9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61zJcJuF3BXgXFmXHXC/hJFkS6Bm4NuVpOn6VkZjNwVsuGCB5u4f8pjxRuAmdytGzXD3SOEukZG2CHNxFoWXpOTtg7TK3c3D+rKhfy7hDy3GqfRc/lZvgxqZ4Uxjm4PNnNL0zR9G21cnfZXP8uNo0Gchg0Xi65zKyPkhznCSDJh7oNHhT9P5bfw3m/6u+5ze3/Eu3bt2Ub5n1PHUfR7+0n8seOo+j39pP5ZX8eP9HL7Tfyx48f6OX2m/ln09P5bfw3m/wCrvuc3t/xLt27dlG+Z9Tx1H0e/tJ/LHjqPo9/aT+WV/Hj/AEcvtN/LHjx/o5fab+WfT0/lt/Deb/q77nN7f8S7du3ZRvmfU8dR9Hv7Sfyx46j6Pf2k/llfx4/0cvtN/LHjx/o5fab+WfT0/lt/Deb/AKu+5ze3/Eu3bt2Ub5n1PHUfR7+0n8seOo+j39pP5ZX8eP8ARy+038sePH+jl9pv5Z9PT+W38N5v+rvuc3t/xLt27dlG+Z9Tx1H0e/tJ/LHjqPo9/aT+WV/Hj/Ry+038sePH+jl9pv5Z9PT+W38N5v8Aq77nN7f8S7du3ZRvmfU8dR9Hv7Sfyx46j6Pf2k/llfx4/wBHL7Tfyx48f6OX2m/ln09P5bfw3m/6u+5ze3/Eu3bt2Ub5n1PHUfR7+0n8seOo+j39pP5ZX8eP9HL7Tfyx48f6OX2m/ln09P5bfw3m/wCrvuc3t/xLt27dlG+Z9Tx1H0e/tJ/LHjqPo9/aT+WV/Hj/AEcvtN/LHjx/o5fab+WfT0/lt/Deb/q77nN7f8S7du3ZRvmfU8dR9Hv7Sfyx46j6Pf2k/llfx4/0cvtN/LHjx/o5fab+WfT0/lt/Deb/AKu+5ze3/Eu3bt2Ub5n1PHUfR7+0n8seOo+j39pP5ZX8eP8ARy+038sePH+jl9pv5Z9PT+W38N5v+rvuc3t/xLt27dlG+Z9Tx1H0e/tJ/LHjqPo9/aT+WV/Hj/Ry+038sePH+jl9pv5Z9PT+W38N5v8Aq77nN7f8S7du3ZRvmfU8dR9Hv7Sfyx46j6Pf2k/llfx4/wBHL7Tfyx48f6OX2m/ln09P5bfw3m/6u+5ze3/Eu3bt2Ub5n1PHUfR7+0n8seOo+j39pP5ZX8eP9HL7Tfyx48f6OX2m/ln09P5bfw3m/wCrvuc3t/xLt27dlG+Z9Tx1H0e/tJ/LHjqPo9/aT+WV/Hj/AEcvtN/LHjx/o5fab+WfT0/lt/Deb/q77nN7f8S7du3ZRvmfBmOWn0eHL2aTxfs5k5HXkcIoMqHuh0eBp4h5ZrVbpWeyc3wk1QcJovzcPnzmi6ZmNJ2fqytx6M1ecqPxOdYGdyNFzusLSODGk5eyCDNx/oOXo2Tth7XJ3s3B+vKgenOC3DHglw4ivNx3wN4TRXHkX52DbkaTF2l5vSM1DWyIYYH/AGPpvWuSBrgflXAAAHezkyI38G4lOr/M+Db+7KPl3tk/z/zPUvTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eDKayteWr3U/wZz/C/WTH+gRFFeYgh/pdKz+yHOZX/ACZvI2b2cyofkycmCGFzC4y3LD8KeE2c0jg5xeODOREWg5MOXm4I+jTJgzul52CHyb2azH/szfm8kOVvQ/NA5+8PNZ2sTWhGuVHesPhrHHCHTcqGGHnYw0vLzu782Tkww7MmD5oIIIGYAAAAAAAAAAAAAAAAAAAAAAAAAGp1f61NZGqqN8iPdXHDiOuDmnZEMEPOxfpeXmd7Z8mVkwQ7uXB82VBDA9/8X3lhOFWhZegcGuMjEGTHOg5EMGbh4QRTmcnN6XkZMOyDez2Yg2ZGd2bPLDkbsPzQunerDh3q51zcFtH4Z6suHWgR/FOkQQf02i5W3KzWV/yZzIhh3s3lwfLk5UEELXSDTLGJL1Dt4EvUO3gS9Q7eD8twAAA7ucmn6GPAH2j9/wA+9axDOKt6uMiK8QzirerjIivEM4q3q48hccHjr8AeKrweg0fO5OajzhpGGahyotiPN53ZDBB8me0iGDy5vNQQ/ryvNB8sMHFXXZr51ncYLhfnuGeszhFnow0nKhhg0bRsmGHI0bQ83t8mbzOb82Rk/wDmHzwwwwv58AAAAAAAAAAAAAAAAAAAAAAAAAAD+jai+MFrT4unDPMcNtV3CXPRdpWTDBBpOi5UMOXoum5rb5c3n815svJ/8weeCGCF3I4mPHm1e8bbg5Do2ayczEPDiLs1BlRnEOcz22GGD5c/o8MPlzmahhrZPmh+SGH+2A/MKAAAO7nJp+hjwB9o/f8APvWsQzirerjIivEM4q3q4yIrxDOKt6u8y8ebjncHOKTq9gzujZOZjHhxH2bzmbiGLMqHbBBDB5IdJz0HngzWRDDB5P7WV+TB8sMHAvhpw04Uaw+FEY8M+GcdaTGscxrn8rSNK0rSMuHKysvKhh/8QQeaCCDyQQQQQQIoAAAAAAAAAAAAAAAAAAAAAAAAAAAucCOG/CvVvwri3htwJjvSoojqKM/k6Roml6Plw5OXkZcEP/mCHzQwQ+SGCGGCHyO5/Eo44PB7jVcAYc7pGTmYv4aRHm8jNx5FuTDsghhh8kGk5qDzw5rLhgqw+SH5IYfR4/MKAAAO/wByXvoRavfaX4hpD0nH03rXJA1wkR9N61yQNcJEfTetc/n2tTWVwZ1Qavo81kcL9LgzEVxFomXpWd8v5WcyoPJkZvI68rLyoYMmCDrhgfnw1867OF/GC1nRvrM4Z6TDlaTGGc3dG0aDKhhzeh6Nkw/0eYzcHyZOTB3wwwww+WF/PgAAAAAAAAAAAAAAAAAAAAAAAAAAAH9G4vuvThnxdNacT60eBOlQ5OlRdnd3StGhyoYM1pui5UMHO5jOQfLk5UHdDBBDB5YH6NNUus/gtrm1cxDrN4GaZBpEUx/oeRpWZ8v5WayofJl5rL6svIyoMrJhg64IWuflXAAAHf7kvfQi1e+0vxDSHpOPpvWuSBrhIj6b1rkga4SI+m9a5yM5XjjD5cc8KIp4vHByMIfAolyciNI+gzeX5M5peXB/QZnK2efcyId/Z15yDqc4QAAAAAAAAAAAAAAAAAAAAAAAAAAAAdM+Rs4yGciThXHHFu4SxlD4DHmTlxtwfgzmX5M3pmRB/T5nJ2+bfyIN+CDrzcPywuur8q4AAA72cmRG/g3Ep1f5nwbf3ZR8u9sn+f8Amepem+x5mtvbe7qJBpljEkGmWMSXqHbwJeodvA6b7Hma29t7uokGmWMSQaZYxJeodvAl6h28EfhjwpiuIuCUc8Mo7z0GiRfwd0DPxjpWXD5duazebhy8rq+TIfmt1ncPI11oaxOEesOO85lZem8IYyz+n53bDt3d/Lhhycn6snJ2ZMHzQQMwAAAAAAAAAAAAAAAAAAAAAAAAAAAADU6q9YEb6qtZHBrWREWcysnTuDkZ6PGOa2Q7N7m8uCHKyIfmysnbkw/NDC/SjwR1iRZw04KxPwviXMc5oEdaDmNP0bKgy/Pm87kQZeT8nVC/MKAAAO7nJp+hjwB9o/f8+9axDOKt6uMiK8QzirerjIjzHylGsPO6v+Jzw0zWi5zc0nhPn9DiDNwwQ7Id3PZcOVne/NZvOQfrcIQAAAAAAAAAAAAAAAAAAAAAAAAAAAAB3Q5NPWN/614o3BXNadn9ukcHM9pMRZcMMO2HdzOXvZvuzeczcH6nC8AAAd3OTT9DHgD7R+/5961iGcVb1cZEV4hnFW9XGRHPDlmeEGf0XVPwD4M5vL2ZuMY/z2l5yDb5+ZzEOTk//uhclAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB6/wCKJxh461Was9P4NaFn4cnNZyOs/pcEG/s2b2ZzGTD+w8gAAADv9yXvoRavfaX4hpD0nH03rXJA1wkR9N61yQNcOV3Li5/OQaPqi0aCH8jKyo4zkP1wQaLBfC5UAAAAAAAAAAAAAAAAAAAAAAAAAAAAACnFnCGMIp0fK0bRMvZkZWXDlw/XsgguTAAAB3+5L30ItXvtL8Q0h6Tj6b1rkga4SI+m9a5IGuHKrlxvPqg+qOv3RysAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHezkyI38G4lOr/M+Db+7KPl3tk/z/wAz1L032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3g5bctvp3hv/CSHm9zcln5du3b4J/By3AAAarVfqv4b65eG8XaudXMTSrwgjXnfBNE5/N5nnObzeVnMv8vOZWTkwbMnIyofLD8j+/8AivuO56nPfcX/ABn1PFp8c/1Qw/6zoPxjxafHP9UMP+s6D8Z82Y5MjjraTt5nU/vbuzb+etAg/wB75ny+K+47nqc99xf8Y8V9x3PU577i/wCM+p4tPjn+qGH/AFnQfjHi0+Of6oYf9Z0H4z5sxyZHHW0nbzOp/e3dm389aBB/vfM+XxX3Hc9TnvuL/jHivuO56nPfcX/GfU8Wnxz/AFQw/wCs6D8Y8Wnxz/VDD/rOg/GfNmOTI462k7eZ1P727s2/nrQIP975ny+K+47nqc99xf8AGPFfcdz1Oe+4v+M+p4tPjn+qGH/WdB+MeLT45/qhh/1nQfjPmzHJkcdbSdvM6n97d2bfz1oEH+98z5fFfcdz1Oe+4v8AjHivuO56nPfcX/GfU8Wnxz/VDD/rOg/GPFp8c/1Qw/6zoPxnzZjkyOOtpO3mdT+9u7Nv560CD/e+Z8vivuO56nPfcX/GPFfcdz1Oe+4v+M+p4tPjn+qGH/WdB+MeLT45/qhh/wBZ0H4z5sxyZHHW0nbzOp/e3dm389aBB/vfM+XxX3Hc9TnvuL/jHivuO56nPfcX/GfU8Wnxz/VDD/rOg/GPFp8c/wBUMP8ArOg/GfNmOTI462k7eZ1P727s2/nrQIP975ny+K+47nqc99xf8Y8V9x3PU577i/4z6ni0+Of6oYf9Z0H4x4tPjn+qGH/WdB+M+bMcmRx1tJ28zqf3t3Zt/PWgQf73zPl8V9x3PU577i/4x4r7juepz33F/wAZ9TxafHP9UMP+s6D8Y8Wnxz/VDD/rOg/GfNmOTI462k7eZ1P727s2/nrQIP8Ae+Z8vivuO56nPfcX/GPFfcdz1Oe+4v8AjPqeLT45/qhh/wBZ0H4x4tPjn+qGH/WdB+M+bMcmRx1tJ28zqf3t3Zt/PWgQf73zPl8V9x3PU577i/4x4r7juepz33F/xn1PFp8c/wBUMP8ArOg/GPFp8c/1Qw/6zoPxnzZjkyOOtpO3mdT+9u7Nv560CD/e+Z8vivuO56nPfcX/ABjxX3Hc9TnvuL/jPqeLT45/qhh/1nQfjHi0+Of6oYf9Z0H4z5sxyZHHW0nbzOp/e3dm389aBB/vfM+XxX3Hc9TnvuL/AIx4r7juepz33F/xn1PFp8c/1Qw/6zoPxjxafHP9UMP+s6D8Z82Y5MjjraTt5nU/vbuzb+etAg/3vmfL4r7juepz33F/xjxX3Hc9TnvuL/jPqeLT45/qhh/1nQfjP4RrL1acNNUHDSMNX2sGJ5Lj6K+b8K0Xns3neb5zN5Ocyfys3DlZMO3Jy8mHyQ/KzAAAAAAADu5yafoY8AfaP3/PvWsQzirerjIivEM4q3q4yI5h8tR5tVH1xx+6uYIAAD1VyXvpu6vfaX4fpDv8yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMi4R8pZ6Z3D72d9wzDzCAAAAAAA7ucmn6GPAH2j9/z71rEM4q3q4yIrxDOKt6uMiOYfLUebVR9ccfurmCAAA9Vcl76bur32l+H6Q7/ADIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yLhHylnpncPvZ33DMPMIAAAAAADv9yXvoRavfaX4hpD0nH03rXJA1wkR9N61yQNcOVXLjefVB9UdfujlYAAA9Pcmn6Z3AH2j9wz7u41wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNc4A8qF6busL2b+H6O8qgAAAAAAO/wByXvoRavfaX4hpD0nH03rXJA1wkR9N61yQNcOVXLjefVB9UdfujlYAAA9Pcmn6Z3AH2j9wz7u41wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNc4A8qF6busL2b+H6O8qgAAAAAAO9nJkRv4NxKdX+Z8G392UfLvbJ/n/mepem+x5mtvbe7qJBpljEkGmWMSXqHbwJeodvA6b7Hma29t7uokGmWMSQaZYxJeodvAl6h28HLblt9O8N/4SQ83ubks/Lt27fBP4OW4AAD1JyZGj+E8dbV/md/c3pR8uzbMM+72yDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3g4JcpvpHhPHW1gZ7c3N6TvJt2zDMPLYAAAAAADu5yafoY8AfaP3/PvWsQzirerjIivEM4q3q4yI5h8tR5tVH1xx+6uYIAAD1VyXvpu6vfaX4fpDv8AMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIuEfKWemdw+9nfcMw8wgAAAAAAO7nJp+hjwB9o/f8+9axDOKt6uMiK8QzirerjIjmHy1Hm1UfXHH7q5ggAAPVXJe+m7q99pfh+kO/zIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yLhHylnpncPvZ33DMPMIAAAAAADv9yXvoRavfaX4hpD0nH03rXJA1wkR9N61yQNcOVXLjefVB9UdfujlYAAA9Pcmn6Z3AH2j9wz7u41wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNc4A8qF6busL2b+H6O8qgAAAAAAO/3Je+hFq99pfiGkPScfTetckDXCRH03rXJA1w5VcuN59UH1R1+6OVgAAD09yafpncAfaP3DPu7jXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1zgDyoXpu6wvZv4fo7yqAAAAAAA72cmRG/g3Ep1f5nwbf3ZR8u9sn+f+Z6l6b7Hma29t7uokGmWMSQaZYxJeodvAl6h28DpvseZrb23u6iQaZYxJBpljEl6h28CXqHbwctuW307w3/hJDze5uSz8u3bt8E/g5bgAAPUnJkaP4Tx1tX+Z39zelHy7Nswz7vbINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eDglym+keE8dbWBntzc3pO8m3bMMw8tgAAAAAAO7nJp+hjwB9o/f8+9axDOKt6uMiK8QzirerjIjmHy1Hm1UfXHH7q5ggAAPVXJe+m7q99pfh+kO/zIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yLhHylnpncPvZ33DMPMIAAAAAADu5yafoY8AfaP3/AD71rEM4q3q4yIrxDOKt6uMiOYfLUebVR9ccfurmCAAA9Vcl76bur32l+H6Q7/MiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIuEfKWemdw+9nfcMw8wgAAAAAAO/wByXvoRavfaX4hpD0nH03rXJA1wkR9N61yQNcOVXLjefVB9UdfujlYAAA9Pcmn6Z3AH2j9wz7u41wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNc4A8qF6busL2b+H6O8qgAAAAAAO/3Je+hFq99pfiGkPScfTetckDXCRH03rXJA1w5VcuN59UH1R1+6OVgAAD09yafpncAfaP3DPu7jXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1zgDyoXpu6wvZv4fo7yqAAAAAAA72cmRG/g3Ep1f5nwbf3ZR8u9sn+f8Amepem+x5mtvbe7qJBpljEkGmWMSXqHbwJeodvA6b7Hma29t7uokGmWMSQaZYxJeodvAl6h28HLblt9O8N/4SQ83ubks/Lt27fBP4OW4AAD1JyZGj+E8dbV/md/c3pR8uzbMM+72yDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3g4JcpvpHhPHW1gZ7c3N6TvJt2zDMPLYAAAAAADu5yafoY8AfaP3/AD71rEM4q3q4yIrxDOKt6uMiOYfLUebVR9ccfurmCAAA9Vcl76bur32l+H6Q7/MiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIuEfKWemdw+9nfcMw8wgAAAAAAO7nJp+hjwB9o/f8+9axDOKt6uMiK8QzirerjIjmHy1Hm1UfXHH7q5ggAAPVXJe+m7q99pfh+kO/zIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yLhHylnpncPvZ33DMPMIAAAAAADv9yXvoRavfaX4hpD0nH03rXJA1wkR9N61yQNcOVXLjefVB9UdfujlYAAA9Pcmn6Z3AH2j9wz7u41wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNc4A8qF6busL2b+H6O8qgAAAAAAO/3Je+hFq99pfiGkPScfTetckDXCRH03rXJA1w5VcuN59UH1R1+6OVgAAD09yafpncAfaP3DPu7jXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1zgDyoXpu6wvZv4fo7yqAAAAAAA72cmRG/g3Ep1f5nwbf3ZR8u9sn+f+Z6l6b7Hma29t7uokGmWMSQaZYxJeodvAl6h28DpvseZrb23u6iQaZYxJBpljEl6h28CXqHbwctuW307w3/AISQ83ubks/Lt27fBP4OW4AAD1JyZGj+E8dbV/md/c3pR8uzbMM+72yDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3g4JcpvpHhPHW1gZ7c3N6TvJt2zDMPLYAAAAAADu5yafoY8AfaP3/PvWsQzirerjIivEM4q3q4yI5h8tR5tVH1xx+6uYIAAD1VyXvpu6vfaX4fpDv8yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMi4R8pZ6Z3D72d9wzDzCAAAAAAA7ucmn6GPAH2j9/z71rEM4q3q4yIrxDOKt6uMiOYfLUebVR9ccfurmCAAA9Vcl76bur32l+H6Q7/ADIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yLhHylnpncPvZ33DMPMIAAAAAADv9yXvoRavfaX4hpD0nH03rXJA1wkR9N61yQNcOVXLjefVB9UdfujlYAAA9Pcmn6Z3AH2j9wz7u41wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNc4A8qF6busL2b+H6O8qgAAAAAAO/wByXvoRavfaX4hpD0nH03rXJA1wkR9N61yQNcOVXLjefVB9UdfujlYAAA9Pcmn6Z3AH2j9wz7u41wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNc4A8qF6busL2b+H6O8qgAAAAAAO9nJkRv4NxKdX+Z8G392UfLvbJ/n/mepem+x5mtvbe7qJBpljEkGmWMSXqHbwJeodvA6b7Hma29t7uokGmWMSQaZYxJeodvAl6h28HLblt9O8N/4SQ83ubks/Lt27fBP4OW4AAD1JyZGj+E8dbV/md/c3pR8uzbMM+72yDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3g4JcpvpHhPHW1gZ7c3N6TvJt2zDMPLYAAAAAADu5yafoY8AfaP3/PvWsQzirerjIivEM4q3q4yI5h8tR5tVH1xx+6uYIAAD1VyXvpu6vfaX4fpDv8AMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIuEfKWemdw+9nfcMw8wgAAAAAAOsvEe48PFk1QcWTghq+1g6xpLj6K/DfCtFkzS87zfOaXnc5k/lZvNZWTDtycvJh8kPyvQ8UcpvxKdG57ntcG7v7Nn5l0+Hr7FS8aFxIvXJ7kjD4J40LiReuT3JGHwWd8ZZxMfW77m0/4J4yziY+t33Np/wVCKOU34lOjc9z2uDd39mz8y6fD19ipeNC4kXrk9yRh8E8aFxIvXJ7kjD4L+q6tNZfAvW/wLi/WDq+jeVIhjTnPBdK5nOZrnObzmVm8r8nOZOTlQbMrIyoPLB8jTuYfLUebVR9ccfurmCAAA9Vcl76bur32l+H6Q7/ADIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yLhHylnpncPvZ33DMPMIAAAAAAAAADv9yXvoRavfaX4hpD1U5VcuN59UH1R1+6OVgAAD09yafpncAfaP3DPu7jXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1zgDyoXpu6wvZv4fo7yqAAAAAAAAAA7/AHJe+hFq99pfiGkPVTlVy43n1QfVHX7o5WAAAPT3Jp+mdwB9o/cM+7uNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXOAPKhem7rC9m/h+jvKoAAA+/H+hSdHsZRfu7vgul57M7Ordy4YLn0AAAAAAHezkyI38G4lOr/ADPg2/uyj5d7ZP8AP/M9Sy9Q7eDlty2+neG/8JIeb3NyWfl27dvgn8HLcAAB6k5MjR/CeOtq/wAzv7m9KPl2bZhn3e2QaZYxJeodvAl6h28DpvseZrb23u6iQaZYxJBpljEl6h28CXqHbwOm+x5mtvbe7qJBpljEkGmWMSXqHbwJeodvA6b7Hma29t7uokGmWMSQaZYxJeodvAl6h28DpvseZrb23u6iQaZYxJBpljEl6h28CXqHbwOm+x5mtvbe7qJBpljEkGmWMSXqHbwJeodvA6b7Hma29t7uokGmWMSQaZYxJeodvAl6h28DpvseZrb23u6iQaZYxJBpljEl6h28CXqHbwOm+x5mtvbe7qJBpljEkGmWMSXqHbwJeodvA6b7Hma29t7uokGmWMSQaZYxJeodvAl6h28DpvseZrb23u6iQaZYxJBpljEl6h28CXqHbwOm+x5mtvbe7qJBpljEkGmWMSXqHbwJeodvA6b7Hma29t7uokGmWMSQaZYxJeodvAl6h28DpvseZrb23u6iQaZYxJBpljEl6h28HBLlN9I8J462sDPbm5vSd5Nu2YZh5bAAAH9V41nBGHgLxkNY/BeDMw5rN6Jwi0zLzWTDBs/os5nIc5kQ/rycuCF/KgAAAAAHdzk0/Qx4A+0fv+fennMPlqPNqo+uOP3VzBAAAequS99N3V77S/D9Id/mRFeIZxVvVxkRXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkRXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkRXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkRXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkXCPlLPTO4fezvuGYeYQAAB7n5XTVnnuC3GFi7WDo+i7mg8MoozWVlZyCD8mHStG/os5B9e5zMP63hgAAAAAB3c5NP0MeAPtH7/n3p5zD5ajzaqPrjj91cwQAAHqrkvfTd1e+0vw/SHf5kRXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkRXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkRXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkRXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkRXiGcVb1cZFwj5Sz0zuH3s77hmHmEAAAd6+VC1CaRrt4skZRnEeheER/wGzsv6Fk5OTty85mMjJhg0nN5P15uGHK2fLDm4HBQAAAAAB3+5L30ItXvtL8Q0h6qcquXG8+qD6o6/dHKwAAB6e5NP0zuAPtH7hn3dxrhIj6b1rkga4SI+m9a5IGuEiPpvWuSBrhIj6b1rkga4SI+m9a5IGuEiPpvWuSBrhIj6b1rkga4SI+m9a5IGuEiPpvWuSBrhIj6b1rkga4SI+m9a5IGuEiPpvWuSBrhIj6b1rkga5wB5UL03dYXs38P0d5VAAAH6pc9mczpOZzmj6RmsnOZrO5MORl5GVBtycrJhg2QwQwfLBDA/PpygPFc0vixa9dPi6LNEy4OCHCXKzka8H89s/JyM1lZX9Jo0MP/NmsqHd/+sORD8rzKAAAAADv9yXvoRavfaX4hpD1U5VcuN59UH1R1+6OVgAAD09yafpncAfaP3DPu7jXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1zgDyoXpu6wvZv4fo7yqAAAP1Iy9Q7eD+KcbfUDwU41OqPT+AEeaFm9FjXMb2lxFGcPly9B02CD8nK822HIyv8A25eT8sEPXBA/P3rC1fcLdVnDKNOAXDiJ89FkcxRn8rMaRmM5B1ebLyYf7WRlQbIcnKg8kMEMELOgAAAADvZyZEb+DcSnV/mfBt/dlHy72yf5/wCZ6ll6h28HLblt9O8N/wCEkPN7m5LPy7du3wT+DluAAA9ScmRo/hPHW1f5nf3N6UfLs2zDPu9sg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4OCXKb6R4Tx1tYGe3Nzek7ybdswzDy2AAAP09DzbxyuIzwX42vBPORnFed0eJuH8S5rZFUZ5WR+RpOR+VD4NpGzyw5EMPmyvPkQw7fLBDDBDw81jauOGuqbhjGPALWDwf0qJo7ivOw5rSNGz+Tsh+bLyYfNl5EMHlgyoNsEMHmZoAAAAB3c5NP0MeAPtH7/n3p5zD5ajzaqPrjj91cwQAAHqrkvfTd1e+0vw/SHf5kRXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkRXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkRXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkRXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkRXiGcVb1cZFwj5Sz0zuH3s77hmHmEAAAfp6FeIZxVvfyvjOcUfVFxqeCsMScP4ogzEa6NkZUEWR7omRk5Om6DlQ9WV/byNvnzeVtyYfmh8ri5xo+T+168WLS8/GcYxRlcJeCEGVDzPCCKs1lZeayMn5INIzflysxlfXtyerKheZQAAAAd3OTT9DHgD7R+/596ecw+Wo82qj644/dXMEAAB6q5L303dXvtL8P0h3+ZEV4hnFW9XGRFeIZxVvVxkRXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkRXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkRXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkRXiGcVb1cZEV4hnFW9XGRcI+Us9M7h97O+4Zh5hAAAH6qBIj6b1rkh/jPZnM6RmsvMaRmsjO5rOZMOTl5GXkwZWTlQQ+eCGCHzwPOevvkveLJrt0jSY8iyJM/wGj/SNuVlabEEGTm8xnMuH+1nNGhg5uH593dhh63gzWlyOHGP4J57P6Tq6jvg9w10DI2w5vJyc/wCA6XDk/Pm87+Rt+rOQvMXDLik8ZngBlZcHCvUbwx0PIyIYYIc7kRZnM/modnVl5qDKyYf1Qv5zpfBLhVF+2DT+DMbaNs8/PaFnMjZ3wPpSdGH/AEGkf5WV/B/2TIy/7fpP+VlfwJMjL/t+k/5WV/B/yTYxg8+gaT/lZX8CTow/6DSP8rK/gSdGH/QaR/lZX8H/AGTIy/7fpP8AlZX8CTIy/wC36T/lZX8H/JNjGDz6BpP+VlfwJOjD/oNI/wArK/gSdGH/AEGkf5WV/B345MLNZzM8SXV7m87m8rIyoJS25OVBshg//P0j5Hqhyq5cbz6oPqjr90crAAAHp7k0/TO4A+0fuGfd3GuEiPpvWuSBrhIj6b1rkga4SI+m9a5IGuEiPpvWuSBrhIj6b1rkga4SI+m9a5IGuEiPpvWuSBrhIj6b1rkga4SI+m9a5IGuEiPpvWuSBrhIj6b1rkga4SI+m9a5IGuEiPpvWuSBrnAHlQvTd1hezfw/R3lUAAAfqoEiPpvWuSBrhIj6b1rkTL0XRs7/APyaPmsv/wC2RBC+OTov/wCg0f8Aysn+DWyZFv8A2/Rv8rJ/gSZFv/b9G/ysn+CRH0WxdB4Ps0DRv7X91k/N8yTJ0X/9Bo/+Vk/wJOi//oNH/wArJ/g1smRb/wBv0b/Kyf4EmRb/ANv0b/Kyf4JEfRbF0Hg+zQNG/tf3WT83zJMnRf8A9Bo/+Vk/wJOi/wD6DR/8rJ/g2ebzWbzORBm8zm8nIyIPNk5MGyCD9T/blVy43n1QfVHX7o5WAAAPT3Jp+mdwB9o/cM+7uNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXOAPKhem7rC9m/h+jvKoAAA/UjL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eDlty2+neG/wDCSHm9zcln5du3b4J/By3AAAepOTI0fwnjrav8zv7m9KPl2bZhn3e2QaZYxJeodvAl6h28DpvseZrb23u6iQaZYxJBpljEl6h28CXqHbwOm+x5mtvbe7qJBpljEkGmWMSXqHbwJeodvA6b7Hma29t7uokGmWMSQaZYxJeodvAl6h28DpvseZrb23u6iQaZYxJBpljEl6h28CXqHbwOm+x5mtvbe7qJBpljEkGmWMSXqHbwJeodvA6b7Hma29t7uokGmWMSQaZYxJeodvAl6h28DpvseZrb23u6iQaZYxJBpljEl6h28CXqHbwOm+x5mtvbe7qJBpljEkGmWMSXqHbwJeodvA6b7Hma29t7uokGmWMSQaZYxJeodvAl6h28DpvseZrb23u6iQaZYxJBpljEl6h28CXqHbwOm+x5mtvbe7qJBpljEkGmWMSXqHbwJeodvA6b7Hma29t7uokGmWMSQaZYxJeodvAl6h28DpvseZrb23u6iQaZYxJBpljEl6h28HBLlN9I8J462sDPbm5vSd5Nu2YZh5bAAAH6ehXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkRXiGcVb1cZEcw+Wo82qj644/dXMEAAB6q5L303dXvtL8P0h3+ZEV4hnFW9XGRFeIZxVvVxkRXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkRXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkRXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkRXiGcVb1cZEV4hnFW9XGRcI+Us9M7h97O+4Zh5hAAAH6ehXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkRXiGcVb1cZEcw+Wo82qj644/dXMEAAB6q5L303dXvtL8P0h3+ZEV4hnFW9XGRFeIZxVvVxkRXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkRXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkRXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkRXiGcVb1cZEV4hnFW9XGRcI+Us9M7h97O+4Zh5hAAAH6qBIj6b1rkga4SI+m9a5IGuEiPpvWuSBrhIj6b1rkga4cquXG8+qD6o6/dHKwAAB6e5NP0zuAPtH7hn3dxrhIj6b1rkga4SI+m9a5IGuEiPpvWuSBrhIj6b1rkga4SI+m9a5IGuEiPpvWuSBrhIj6b1rkga4SI+m9a5IGuEiPpvWuSBrhIj6b1rkga4SI+m9a5IGuEiPpvWuSBrhIj6b1rkga5wB5UL03dYXs38P0d5VAAAH6qBIj6b1rkga4SI+m9a5IGuEiPpvWuSBrhIj6b1rkga4cquXG8+qD6o6/dHKwAAB6e5NP0zuAPtH7hn3dxrhIj6b1rkga4SI+m9a5IGuEiPpvWuSBrhIj6b1rkga4SI+m9a5IGuEiPpvWuSBrhIj6b1rkga4SI+m9a5IGuEiPpvWuSBrhIj6b1rkga4SI+m9a5IGuEiPpvWuSBrhIj6b1rkga5wB5UL03dYXs38P0d5VAAAH6kZeodvAl6h28DpvseZrb23u6iQaZYxJBpljEl6h28CXqHbwOm+x5mtvbe7qJBpljEkGmWMSXqHbwJeodvA6b7Hma29t7uokGmWMSQaZYxJeodvAl6h28DpvseZrb23u6iQaZYxJBpljEl6h28CXqHbwctuW307w3/hJDze5uSz8u3bt8E/g5bgAAPUnJkaP4Tx1tX+Z39zelHy7Nswz7vbINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eDglym+keE8dbWBntzc3pO8m3bMMw8tgAAD9PQrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiOYfLUebVR9ccfurmCAAA9Vcl76bur32l+H6Q7/MiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIuEfKWemdw+9nfcMw8wgAAD9PQrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiOYfLUebVR9ccfurmCAAA9Vcl76bur32l+H6Q7/MiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIuEfKWemdw+9nfcMw8wgAAD9VAkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcOVXLjefVB9UdfujlYAAA9Pcmn6Z3AH2j9wz7u41wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNc4A8qF6busL2b+H6O8qgAAD9VAkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcOVXLjefVB9UdfujlYAAA9Pcmn6Z3AH2j9wz7u41wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNc4A8qF6busL2b+H6O8qgAAD9SMvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4OW3Lb6d4b/wkh5vc3JZ+Xbt2+CfwctwAAHqTkyNH8J462r/ADO/ub0o+XZtmGfd7ZBpljEl6h28CXqHbwOm+x5mtvbe7qJBpljEkGmWMSXqHbwJeodvA6b7Hma29t7uokGmWMSQaZYxJeodvAl6h28DpvseZrb23u6iQaZYxJBpljEl6h28CXqHbwOm+x5mtvbe7qJBpljEkGmWMSXqHbwJeodvA6b7Hma29t7uokGmWMSQaZYxJeodvAl6h28DpvseZrb23u6iQaZYxJBpljEl6h28CXqHbwOm+x5mtvbe7qJBpljEkGmWMSXqHbwJeodvA6b7Hma29t7uokGmWMSQaZYxJeodvAl6h28DpvseZrb23u6iQaZYxJBpljEl6h28CXqHbwOm+x5mtvbe7qJBpljEkGmWMSXqHbwJeodvA6b7Hma29t7uokGmWMSQaZYxJeodvAl6h28DpvseZrb23u6iQaZYxJBpljEl6h28CXqHbwOm+x5mtvbe7qJBpljEkGmWMSXqHbwcEuU30jwnjrawM9ubm9J3k27ZhmHlsAAAfp6FeIZxVvVxkRXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkRzD5ajzaqPrjj91cwQAAHqrkvfTd1e+0vw/SHf5kRXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkRXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkRXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkRXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkRXiGcVb1cZFwj5Sz0zuH3s77hmHmEAAAfp6FeIZxVvVxkRXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkRzD5ajzaqPrjj91cwQAAHqrkvfTd1e+0vw/SHf5kRXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkRXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkRXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkRXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkRXiGcVb1cZFwj5Sz0zuH3s77hmHmEAAAfqoEiPpvWuSBrhIj6b1rkga4SI+m9a5IGuEiPpvWuSBrhyq5cbz6oPqjr90crAAAHp7k0/TO4A+0fuGfd3GuEiPpvWuSBrhIj6b1rkga4SI+m9a5IGuEiPpvWuSBrhIj6b1rkga4SI+m9a5IGuEiPpvWuSBrhIj6b1rkga4SI+m9a5IGuEiPpvWuSBrhIj6b1rkga4SI+m9a5IGuEiPpvWuSBrnAHlQvTd1hezfw/R3lUAAAfqoEiPpvWuSBrhIj6b1rkga4SI+m9a5IGuEiPpvWuSBrhyq5cbz6oPqjr90crAAAHp7k0/TO4A+0fuGfd3GuEiPpvWuSBrhIj6b1rkga4SI+m9a5IGuEiPpvWuSBrhIj6b1rkga4SI+m9a5IGuEiPpvWuSBrhIj6b1rkga4SI+m9a5IGuEiPpvWuSBrhIj6b1rkga4SI+m9a5IGuEiPpvWuSBrnAHlQvTd1hezfw/R3lUAAAfqRl6h28CXqHbwOm+x5mtvbe7qJBpljEkGmWMSXqHbwJeodvA6b7Hma29t7uokGmWMSQaZYxJeodvAl6h28DpvseZrb23u6iQaZYxJBpljEl6h28CXqHbwOm+x5mtvbe7qJBpljEkGmWMSXqHbwJeodvBy25bfTvDf8AhJDze5uSz8u3bt8E/g5bgAAPUnJkaP4Tx1tX+Z39zelHy7Nswz7vbINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eDglym+keE8dbWBntzc3pO8m3bMMw8tgAAD9PQrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiOYfLUebVR9ccfurmCAAA9Vcl76bur32l+H6Q7/MiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIuEfKWemdw+9nfcMw8wgAAD9PQrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiOYfLUebVR9ccfurmCAAA9Vcl76bur32l+H6Q7/MiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIuEfKWemdw+9nfcMw8wgAAD9VAkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcOVXLjefVB9UdfujlYAAA9Pcmn6Z3AH2j9wz7u41wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNc4A8qF6busL2b+H6O8qgAAD9VAkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcOVXLjefVB9UdfujlYAAA0+rTWXw01QcNIv1g6vo3kuPor5zwXSuZzed5vnM3lZvK/JzmTlZMO3Jy8qDywfK/u/jLOOd63fc2gfBfc8aFx3fXJ7ki/wCCeNC47vrk9yRf8F8Okcpvx1tJ3ee1wb255vzLoEH+y+HxlnHO9bvubQPgnjLOOd63fc2gfBfc8aFx3fXJ7ki/4J40Lju+uT3JF/wXw6Rym/HW0nd57XBvbnm/MugQf7L4fGWcc71u+5tA+CeMs453rd9zaB8F9zxoXHd9cnuSL/gnjQuO765PckX/AAXw6Rym/HW0nd57XBvbnm/MugQf7L4fGWcc71u+5tA+CeMs453rd9zaB8F9zxoXHd9cnuSL/gnjQuO765PckX/BfDpHKb8dbSd3ntcG9ueb8y6BB/svh8ZZxzvW77m0D4J4yzjnet33NoHwX3PGhcd31ye5Iv8AgnjQuO765PckX/BfDpHKb8dbSd3ntcG9ueb8y6BB/svh8ZZxzvW77m0D4J4yzjnet33NoHwX3PGhcd31ye5Iv+CeNC47vrk9yRf8F8Okcpvx1tJ3ee1wb255vzLoEH+y+HxlnHO9bvubQPgnjLOOd63fc2gfBfc8aFx3fXJ7ki/4J40Lju+uT3JF/wAF8Okcpvx1tJ3ee1wb255vzLoEH+y+HxlnHO9bvubQPgnjLOOd63fc2gfBfc8aFx3fXJ7ki/4J40Lju+uT3JF/wXw6Rym/HW0nd57XBvbnm/MugQf7L4fGWcc71u+5tA+CeMs453rd9zaB8F9zxoXHd9cnuSL/AIJ40Lju+uT3JF/wXw6Rym/HW0nd57XBvbnm/MugQf7L4fGWcc71u+5tA+CeMs453rd9zaB8F9zxoXHd9cnuSL/gnjQuO765PckX/BfDpHKb8dbSd3ntcG9ueb8y6BB/svh8ZZxzvW77m0D4J4yzjnet33NoHwX3PGhcd31ye5Iv+CeNC47vrk9yRf8ABfDpHKb8dbSd3ntcG9ueb8y6BB/svh8ZZxzvW77m0D4J4yzjnet33NoHwX3PGhcd31ye5Iv+CeNC47vrk9yRf8F8Okcpvx1tJ3ee1wb255vzLoEH+y+HxlnHO9bvubQPgnjLOOd63fc2gfBfc8aFx3fXJ7ki/wCCeNC47vrk9yRf8F8Okcpvx1tJ3ee1wb255vzLoEH+y+HxlnHO9bvubQPgnjLOOd63fc2gfBfc8aFx3fXJ7ki/4Lz/AK0daPDfXNw3jHWNrGjmVeEEa814XpfMZvM85zebyc3kfkZvJycmDZk5GTB5IPkZUAAAfqRl6h28CXqHbwOm+x5mtvbe7qJBpljEkGmWMSXqHbwJeodvA6b7Hma29t7uokGmWMSQaZYxJeodvAl6h28DpvseZrb23u6iQaZYxJBpljEl6h28CXqHbwOm+x5mtvbe7qJBpljEkGmWMSXqHbwJeodvBy25bfTvDf8AhJDze5uSz8u3bt8E/g5bgAAAAAAAAAAAAAAAAAAAAAAAAAAAAA/T0K8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIjmPy0+YzkOjaqtKgg/Iyc5G2bh+uGDRoYP/wDIXL0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAUotiHT40zGVpGi5G3IycuHIh+vZBDe/TQK8QzirerjIivEM4q3q4yIrxDOKt6uMiK8QzirerjIjnhyzPB/P6Vqn4B8Js3kbc3F0f57RM5Ds83PZiHKyf/wBMLkoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPX/FE4vEda09Wen8JdCzEOVms3HWf0SCHc27d3M5jKh/bd6RIj6b1rkga4SI+m9a5IGuEiPpvWuSBrhIj6b1rkga4eQOVP4AZ3h3xRuEOd0bN7+kcGs9mI9zcEEG2HdzOXBk5z/+vOZyH9TgsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAO+PJe6tf8A0NxOeCWd0/MQQaTwmz2lR/lwQwbId3PZe7m+/NZvNw/retxIj6b1rkga4SI+m9a5IGuEiPpvWuSBrhIj6b1rkga4ZHWZwZizhpwVjDghHOagzmgR3oOkxfpOTD8ubzuRuZX/AIyn5q9Z3AONdV+sPhHq8jvN5WRpvB6Ms/oGd2wbN7m8uGDJyoPmysnZlQfNDAzIAAAAAAAAAAAAAAAAAAAAAAAAAAAANTqr1fxvrV1kcGtW8RZvKy9O4Rxno8XZrZBt3ecy4IMrLh+bJyduVD80EL9M/A7gtFfAfglEvA2JM1BmoviPQMxF2jZEHyZrNZuDIyf/ABkv+y9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293USDTLGJINMsYkvUO3gS9Q7eB032PM1t7b3dRINMsYkg0yxiS9Q7eBL1Dt4HTfY8zW3tvd1Eg0yxiSDTLGJL1Dt4EvUO3gdN9jzNbe293U5M8sNxZs9wa4SxPxi+Dehw5zQI7gyIqj/ACs3m9kGa0vIg/oM9lbP+fIg3NvXm4OtzUAAAAAAAAAAAAAAAAAAAAAAAAAAAAB0m5HrUDDp3C6NuMbwkimHOaFEmRlxVEEOcg2ZOXpeXBsz+eydsHl3MiHcgh685D1OtcvUO3gkCvEM4q3q4yIrxDOKt6uMiK8QzirerjIivEM4q3q4yIrxDOKt6Rra1YcFtc2rmPtWXDPQ4NIimP8AQ8vRc95Pys1lQ+XIzuR1ZeRlQZOVBD1wQPzl8YLUXwz4umtOONV3DbRYcnSouzu9oukwZMMGa03RcqGHms/m4flycqDuhghgh8sD+cgAAAAAAAAAAAAAAAAAAAAAAAAAAAP6DqG1J8L+MFrOijVnwM0aHK0mMM5vaTpMOTDDm9D0bJh/pM/nIfkycmDvhhggg8sL9B+qvVrwZ1Qavoj1b8ENEgzEVxFomRoua8n5WcyoPLl5zL68rLyoYcqGHrhhaoFeIZxVvVxkRXiGcVb1cZEV4hnFW9XGRFeIZxVvVxkRXiGcVb1d5l483Ex4OcbbV7BmtGyszF3DiIc3nM5EMZ5UGyCGGHyw6NnofPDmsuGCDy/2cr8qD5YIeBfDTgXwo1ecKIx4GcM4l0mKo5irP5Wj6VoukZEOTlZGVBD/AOYIfPBDB5IYIYIYEUAAAAAAAAAAAAAAAAAAAAAAAAAAAXOBHAjhXrI4VxbwJ4ExJpUbx1G+fydH0TRNHyIcrLy8uGH/AMQQeeGGHyQQQQww+R3P4lHE+4PcVXgDDmtIyszGHDSPM3kZyPIyyYNsEEMHlg0bNQ+eDNZEMNaHyw/JBB6PGuEiPpvWuSBrhIj6b1rkga4SI+m9a5IGuEiPpvWuSBrhIj6b1rkgfxPjncRnV7xtuDkGk53KzMQ8OIuzUOTFkfZvM7YYYPkzGkQQeXOZqGGtk+eD5YIeG+vTi+60+Lpwzz/AnWjwaz0XaVkwww6NpWTBDl6Lpua2+TOZjO+bLyYe+DzQwQQv5yAAAAAAAAAAAAAAAAAAAAAAAAAAD+g6k9Q2s3jBcMMzwM1Z8Hc9GGk5UMEOk6TlQQ5GjaHm9vlzmeznmyMn/wAw+aCCGF2q4n3Eo4A8VXg9DpGays1HnDSMM1BkxlHmczWyGCD5czo8EPlzeagh/XleeH5IIPXoAkR9N61yQNcJEfTetckDXCRH03rXJA1wkR9N61yQNcJEfTetckDXMjrP1S6udc3BbSOBms3gjF8fxTpEEP8AQ6VmtuVmsr/nzeXB+Vm8uD5MrJhghcsOMzyPPCXg1ntI4ScXThJkx3oGchy85kxBGucgzel5qCDy7uaz/kyM75/JBl7sPzwufXDzVjrD1XxrlxJrD4Fxxwe03Ihhg5rT9Ey81vfPk5UMGzKg+eCGGBmQAAAAAAAAAAAAAAAAAAAAAAAABqdX+qvWRrVjfIiLVvwHjrhHp2XDBBzUXaHl57d2/LlZUEG7kQfPlQwQPfuoHkeuF2nQ6Bwk4xvCHNxJoWchgzkMQRVnIM7peXkwbId3O5+DbkZvbt8sGRvQ/PA6VatdVer7VBwZzHBDVvwU0CIorzEEH9Foub2ZWcyv+fOZcP5WcyoflysqGGFqmuB/zZB1GyDqSY9/uvm2/r838Ej9cPefrh72t2QdRsg6kmPf7r5tv6/N/BI/XD3n64e9rdkHUbIOpJj3+6+bb+vzfwSP1w95+uHva3ZB1GyDqSY9/uvm2/r838Ej9cPefrh72t2QdRsg6kmPf7r5tv6/N/BI/XD3n64e9rdkHUbIOpJj3+6+bb+vzfwZLhNwR4K8NIsy4m4X8HItjvQM55MrRow0XIz+bhq5cEMDzVrF5NPijcNec07NcANI4OaRDthhy4i07L0bJ2/o8rezcH6slz94yPE31W6pY8z2g8GI44TZzM5MOVsg0vSsxnNmz58nM5LyLwhizR4pjDK0TRsvOZWRB8uXDBDD/wCIIEwAAAAAAAAAAAAAAAAAAAAAAAUohi3MRpp+ToukZecyciH5ciGCCH/zBC9W8Xjiias9acdZrQuEsbcI83msqHJ2waJpWZyNu358rM5Tplq15L3ic8Bua0/O6u9I4TaTBshgy4/0/L0rJ2/o8ndzcP68l6e4LcDeCXAeK83EnA3gxFcRxfmoNmRo0X6Jm9HzcFXIgggf6j3+6+bb+vzfwSP1w95+uHva3ZB1GyDqNkHU/9k= 89 | mediatype: image/png 90 | install: 91 | spec: 92 | deployments: null 93 | strategy: "" 94 | installModes: 95 | - supported: false 96 | type: OwnNamespace 97 | - supported: false 98 | type: SingleNamespace 99 | - supported: false 100 | type: MultiNamespace 101 | - supported: true 102 | type: AllNamespaces 103 | keywords: 104 | - Capacity Management 105 | - Node Autoscaling 106 | - Proactive Autoscaling 107 | - Elastic Capacity 108 | links: 109 | - name: Proactive Node Scaling Operator 110 | url: https://github.com/redhat-cop/proactive-node-scaling-operator 111 | - name: Container Image 112 | url: https://quay.io/redhat-cop/proactive-node-scaling-operator 113 | maintainers: 114 | - email: rspazzol@redhat.com 115 | name: Raffaele Spazzoli 116 | maturity: alpha 117 | provider: 118 | name: Red Hat Community of Practice 119 | version: 0.1.0 120 | -------------------------------------------------------------------------------- /config/manifests/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - ../default 3 | - ../samples 4 | - ../scorecard 5 | -------------------------------------------------------------------------------- /config/operatorhub/operator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: operators.coreos.com/v1alpha1 2 | kind: Subscription 3 | metadata: 4 | name: proactive-node-scaling-operator 5 | spec: 6 | channel: alpha 7 | installPlanApproval: Automatic 8 | name: proactive-node-scaling-operator 9 | source: community-operators 10 | sourceNamespace: openshift-marketplace 11 | --- 12 | apiVersion: operators.coreos.com/v1 13 | kind: OperatorGroup 14 | metadata: 15 | name: proactive-node-scaling-operator 16 | spec: 17 | targetNamespaces: [] -------------------------------------------------------------------------------- /config/prometheus/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - monitor.yaml 3 | 4 | configurations: 5 | - kustomizeconfig.yaml 6 | -------------------------------------------------------------------------------- /config/prometheus/kustomizeconfig.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | varReference: 3 | - path: spec/endpoints/tlsConfig/serverName 4 | kind: ServiceMonitor -------------------------------------------------------------------------------- /config/prometheus/monitor.yaml: -------------------------------------------------------------------------------- 1 | 2 | # Prometheus Monitor Service (Metrics) 3 | apiVersion: monitoring.coreos.com/v1 4 | kind: ServiceMonitor 5 | metadata: 6 | labels: 7 | operator: proactive-node-scaling-operator 8 | name: controller-manager-metrics-monitor 9 | namespace: system 10 | spec: 11 | endpoints: 12 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 13 | interval: 30s 14 | port: https 15 | scheme: https 16 | tlsConfig: 17 | caFile: /etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt 18 | serverName: $(METRICS_SERVICE_NAME).$(METRICS_SERVICE_NAMESPACE).svc 19 | selector: 20 | matchLabels: 21 | operator: proactive-node-scaling-operator 22 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_client_clusterrole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: metrics-reader 5 | rules: 6 | - nonResourceURLs: ["/metrics"] 7 | verbs: ["get"] 8 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: proxy-role 5 | rules: 6 | - apiGroups: ["authentication.k8s.io"] 7 | resources: 8 | - tokenreviews 9 | verbs: ["create"] 10 | - apiGroups: ["authorization.k8s.io"] 11 | resources: 12 | - subjectaccessreviews 13 | verbs: ["create"] 14 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: proxy-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: proxy-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: controller-manager 12 | namespace: system 13 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | operator: proactive-node-scaling-operator 6 | annotations: 7 | service.alpha.openshift.io/serving-cert-secret-name: proactive-node-scaling-operator-certs 8 | name: controller-manager-metrics 9 | namespace: system 10 | spec: 11 | ports: 12 | - name: https 13 | port: 8443 14 | targetPort: https 15 | selector: 16 | operator: proactive-node-scaling-operator 17 | -------------------------------------------------------------------------------- /config/rbac/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - role.yaml 3 | - role_binding.yaml 4 | - leader_election_role.yaml 5 | - leader_election_role_binding.yaml 6 | # Comment the following 4 lines if you want to disable 7 | # the auth proxy (https://github.com/brancz/kube-rbac-proxy) 8 | # which protects your /metrics endpoint. 9 | - auth_proxy_service.yaml 10 | - auth_proxy_role.yaml 11 | - auth_proxy_role_binding.yaml 12 | - auth_proxy_client_clusterrole.yaml 13 | -------------------------------------------------------------------------------- /config/rbac/leader_election_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions to do leader election. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | name: leader-election-role 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - configmaps 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - create 16 | - update 17 | - patch 18 | - delete 19 | - apiGroups: 20 | - coordination.k8s.io 21 | resources: 22 | - leases 23 | verbs: 24 | - get 25 | - list 26 | - watch 27 | - create 28 | - update 29 | - patch 30 | - delete 31 | - apiGroups: 32 | - "" 33 | resources: 34 | - events 35 | verbs: 36 | - create 37 | - patch 38 | -------------------------------------------------------------------------------- /config/rbac/leader_election_role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: leader-election-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: Role 8 | name: leader-election-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: controller-manager 12 | namespace: system 13 | -------------------------------------------------------------------------------- /config/rbac/nodescalingwatermark_editor_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to edit nodescalingwatermarks. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: nodescalingwatermark-editor-role 6 | rules: 7 | - apiGroups: 8 | - redhatcop.redhat.io 9 | resources: 10 | - nodescalingwatermarks 11 | verbs: 12 | - create 13 | - delete 14 | - get 15 | - list 16 | - patch 17 | - update 18 | - watch 19 | - apiGroups: 20 | - redhatcop.redhat.io 21 | resources: 22 | - nodescalingwatermarks/status 23 | verbs: 24 | - get 25 | -------------------------------------------------------------------------------- /config/rbac/nodescalingwatermark_viewer_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to view nodescalingwatermarks. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: nodescalingwatermark-viewer-role 6 | rules: 7 | - apiGroups: 8 | - redhatcop.redhat.io 9 | resources: 10 | - nodescalingwatermarks 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - apiGroups: 16 | - redhatcop.redhat.io 17 | resources: 18 | - nodescalingwatermarks/status 19 | verbs: 20 | - get 21 | -------------------------------------------------------------------------------- /config/rbac/role.yaml: -------------------------------------------------------------------------------- 1 | 2 | --- 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | kind: ClusterRole 5 | metadata: 6 | creationTimestamp: null 7 | name: manager-role 8 | rules: 9 | - apiGroups: 10 | - "" 11 | resources: 12 | - nodes 13 | - pods 14 | verbs: 15 | - get 16 | - list 17 | - watch 18 | - apiGroups: 19 | - apps 20 | resources: 21 | - deployments 22 | verbs: 23 | - create 24 | - delete 25 | - get 26 | - list 27 | - patch 28 | - update 29 | - watch 30 | - apiGroups: 31 | - redhatcop.redhat.io 32 | resources: 33 | - nodescalingwatermarks 34 | verbs: 35 | - create 36 | - delete 37 | - get 38 | - list 39 | - patch 40 | - update 41 | - watch 42 | - apiGroups: 43 | - redhatcop.redhat.io 44 | resources: 45 | - nodescalingwatermarks/finalizers 46 | verbs: 47 | - update 48 | - apiGroups: 49 | - redhatcop.redhat.io 50 | resources: 51 | - nodescalingwatermarks/status 52 | verbs: 53 | - get 54 | - patch 55 | - update 56 | -------------------------------------------------------------------------------- /config/rbac/role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: manager-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: manager-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: controller-manager 12 | namespace: system 13 | -------------------------------------------------------------------------------- /config/samples/ai-ml-watermark.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: redhatcop.redhat.io/v1alpha1 2 | kind: NodeScalingWatermark 3 | metadata: 4 | name: ai-ml 5 | spec: 6 | watermarkPercentage: 20 7 | nodeSelector: 8 | machine.openshift.io/cluster-api-machine-type: ai-ml 9 | tolerations: 10 | - key: "workload" 11 | operator: "Equal" 12 | value: "ai-ml" 13 | effect: "NoSchedule" -------------------------------------------------------------------------------- /config/samples/kustomization.yaml: -------------------------------------------------------------------------------- 1 | ## Append samples you want in your CSV to this file as resources ## 2 | resources: 3 | - ai-ml-watermark.yaml 4 | - zone-watermark.yaml 5 | # +kubebuilder:scaffold:manifestskustomizesamples 6 | -------------------------------------------------------------------------------- /config/samples/zone-watermark.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: redhatcop.redhat.io/v1alpha1 2 | kind: NodeScalingWatermark 3 | metadata: 4 | name: us-west-2a 5 | spec: 6 | watermarkPercentage: 20 7 | nodeSelector: 8 | topology.kubernetes.io/zone: us-west-2a 9 | 10 | 11 | 12 | -------------------------------------------------------------------------------- /config/scorecard/bases/config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: scorecard.operatorframework.io/v1alpha3 2 | kind: Configuration 3 | metadata: 4 | name: config 5 | stages: 6 | - parallel: true 7 | tests: [] 8 | -------------------------------------------------------------------------------- /config/scorecard/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - bases/config.yaml 3 | patchesJson6902: 4 | - path: patches/basic.config.yaml 5 | target: 6 | group: scorecard.operatorframework.io 7 | version: v1alpha3 8 | kind: Configuration 9 | name: config 10 | - path: patches/olm.config.yaml 11 | target: 12 | group: scorecard.operatorframework.io 13 | version: v1alpha3 14 | kind: Configuration 15 | name: config 16 | # +kubebuilder:scaffold:patchesJson6902 17 | -------------------------------------------------------------------------------- /config/scorecard/patches/basic.config.yaml: -------------------------------------------------------------------------------- 1 | - op: add 2 | path: /stages/0/tests/- 3 | value: 4 | entrypoint: 5 | - scorecard-test 6 | - basic-check-spec 7 | image: quay.io/operator-framework/scorecard-test:v1.5.0 8 | labels: 9 | suite: basic 10 | test: basic-check-spec-test 11 | -------------------------------------------------------------------------------- /config/scorecard/patches/olm.config.yaml: -------------------------------------------------------------------------------- 1 | - op: add 2 | path: /stages/0/tests/- 3 | value: 4 | entrypoint: 5 | - scorecard-test 6 | - olm-bundle-validation 7 | image: quay.io/operator-framework/scorecard-test:v1.5.0 8 | labels: 9 | suite: olm 10 | test: olm-bundle-validation-test 11 | - op: add 12 | path: /stages/0/tests/- 13 | value: 14 | entrypoint: 15 | - scorecard-test 16 | - olm-crds-have-validation 17 | image: quay.io/operator-framework/scorecard-test:v1.5.0 18 | labels: 19 | suite: olm 20 | test: olm-crds-have-validation-test 21 | - op: add 22 | path: /stages/0/tests/- 23 | value: 24 | entrypoint: 25 | - scorecard-test 26 | - olm-crds-have-resources 27 | image: quay.io/operator-framework/scorecard-test:v1.5.0 28 | labels: 29 | suite: olm 30 | test: olm-crds-have-resources-test 31 | - op: add 32 | path: /stages/0/tests/- 33 | value: 34 | entrypoint: 35 | - scorecard-test 36 | - olm-spec-descriptors 37 | image: quay.io/operator-framework/scorecard-test:v1.5.0 38 | labels: 39 | suite: olm 40 | test: olm-spec-descriptors-test 41 | - op: add 42 | path: /stages/0/tests/- 43 | value: 44 | entrypoint: 45 | - scorecard-test 46 | - olm-status-descriptors 47 | image: quay.io/operator-framework/scorecard-test:v1.5.0 48 | labels: 49 | suite: olm 50 | test: olm-status-descriptors-test 51 | -------------------------------------------------------------------------------- /config/templates/watermarkDeploymentTemplate.yaml: -------------------------------------------------------------------------------- 1 | # expected merge structure 2 | # .NodeScalingWatermark 3 | # .Replicas 4 | apiVersion: apps/v1 5 | kind: Deployment 6 | metadata: 7 | name: {{ .NodeScalingWatermark.Name }} 8 | namespace: {{ .NodeScalingWatermark.Namespace }} 9 | labels: 10 | watermark: {{ .NodeScalingWatermark.Name }} 11 | spec: 12 | replicas: {{ .Replicas }} 13 | selector: 14 | matchLabels: 15 | watermark: {{ .NodeScalingWatermark.Name }} 16 | template: 17 | metadata: 18 | labels: 19 | watermark: {{ .NodeScalingWatermark.Name }} 20 | proactive-node-scaling-operator.redhat-cop.io/watermark: "" 21 | spec: 22 | enableServiceLinks: false 23 | {{- with .NodeScalingWatermark.Spec.NodeSelector }} 24 | nodeSelector: 25 | {{- toYaml . | nindent 8 }} 26 | {{- end }} 27 | {{- with .NodeScalingWatermark.Spec.Tolerations }} 28 | tolerations: 29 | {{- toYaml . | nindent 8 }} 30 | {{- end }} 31 | priorityClassName: {{ .NodeScalingWatermark.Spec.PriorityClassName }} 32 | automountServiceAccountToken: false 33 | containers: 34 | - name: pause 35 | image: {{ .NodeScalingWatermark.Spec.PausePodImage }} 36 | {{- with .NodeScalingWatermark.Spec.PausePodSize }} 37 | resources: 38 | requests: 39 | {{- toYaml . | nindent 12 }} 40 | {{- end }} 41 | 42 | -------------------------------------------------------------------------------- /controllers/nodescalingwatermark_controller.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021 Red Hat Community of Practice. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package controllers 18 | 19 | import ( 20 | "context" 21 | "io/ioutil" 22 | "math" 23 | "os" 24 | "reflect" 25 | "strings" 26 | 27 | "text/template" 28 | 29 | "github.com/go-logr/logr" 30 | "github.com/redhat-cop/operator-utils/pkg/util" 31 | "github.com/redhat-cop/operator-utils/pkg/util/lockedresourcecontroller" 32 | redhatcopv1alpha1 "github.com/redhat-cop/proactive-node-scaling-operator/api/v1alpha1" 33 | corev1 "k8s.io/api/core/v1" 34 | "k8s.io/apimachinery/pkg/api/errors" 35 | "k8s.io/apimachinery/pkg/api/resource" 36 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 37 | "k8s.io/apimachinery/pkg/fields" 38 | "k8s.io/apimachinery/pkg/labels" 39 | "k8s.io/apimachinery/pkg/types" 40 | "k8s.io/client-go/util/workqueue" 41 | ctrl "sigs.k8s.io/controller-runtime" 42 | "sigs.k8s.io/controller-runtime/pkg/builder" 43 | "sigs.k8s.io/controller-runtime/pkg/client" 44 | "sigs.k8s.io/controller-runtime/pkg/event" 45 | "sigs.k8s.io/controller-runtime/pkg/predicate" 46 | "sigs.k8s.io/controller-runtime/pkg/reconcile" 47 | "sigs.k8s.io/controller-runtime/pkg/source" 48 | ) 49 | 50 | const annotationBase = "proactive-node-scaling-operator.redhat-cop.io" 51 | const watermarkLabel = annotationBase + "/watermark" 52 | const templateFileNameEnv = "TEMPLATE_FILE_NAME" 53 | 54 | // NodeScalingWatermarkReconciler reconciles a NodeScalingWatermark object 55 | type NodeScalingWatermarkReconciler struct { 56 | lockedresourcecontroller.EnforcingReconciler 57 | Log logr.Logger 58 | watermarkDeploymentTemplate *template.Template 59 | } 60 | 61 | type templateData struct { 62 | NodeScalingWatermark *redhatcopv1alpha1.NodeScalingWatermark 63 | Replicas int64 64 | } 65 | 66 | // +kubebuilder:rbac:groups=redhatcop.redhat.io,resources=nodescalingwatermarks,verbs=get;list;watch;create;update;patch;delete 67 | // +kubebuilder:rbac:groups=redhatcop.redhat.io,resources=nodescalingwatermarks/status,verbs=get;update;patch 68 | // +kubebuilder:rbac:groups=redhatcop.redhat.io,resources=nodescalingwatermarks/finalizers,verbs=update 69 | // +kubebuilder:rbac:groups="",resources=nodes;pods,verbs=get;list;watch 70 | // +kubebuilder:rbac:groups="apps",resources=deployments,verbs=get;list;watch;create;update;patch;delete 71 | 72 | // Reconcile is part of the main kubernetes reconciliation loop which aims to 73 | // move the current state of the cluster closer to the desired state. 74 | // TODO(user): Modify the Reconcile function to compare the state specified by 75 | // the NodeScalingWatermark object against the actual cluster state, and then 76 | // perform operations to make the cluster state reflect the state specified by 77 | // the user. 78 | // 79 | // For more details, check Reconcile and its Result here: 80 | // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.7.0/pkg/reconcile 81 | func (r *NodeScalingWatermarkReconciler) Reconcile(context context.Context, req ctrl.Request) (ctrl.Result, error) { 82 | log := r.Log.WithValues("nodescalingwatermark", req.NamespacedName) 83 | // Fetch the EgressIPAM instance 84 | instance := &redhatcopv1alpha1.NodeScalingWatermark{} 85 | err := r.GetClient().Get(context, req.NamespacedName, instance) 86 | if err != nil { 87 | if errors.IsNotFound(err) { 88 | // Request object not found, could have been deleted after reconcile request. 89 | // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. 90 | // Return and don't requeue 91 | return reconcile.Result{}, nil 92 | } 93 | // Error reading the object - requeue the request. 94 | return reconcile.Result{}, err 95 | } 96 | 97 | // your logic here 98 | // find all selected nodes 99 | selectedNodes, err := r.getSelectedNodes(context, instance) 100 | if err != nil { 101 | log.Error(err, "unable to load selected nodes", "instance", instance.GetName()) 102 | return r.ManageError(context, instance, err) 103 | } 104 | 105 | log.Info("selected nodes", "count", len(selectedNodes)) 106 | 107 | // find all selected pods 108 | selectedPods, err := r.getSelectedPods(context, selectedNodes) 109 | if err != nil { 110 | log.Error(err, "unable to load selected pods", "instance", instance.GetName()) 111 | return r.ManageError(context, instance, err) 112 | } 113 | 114 | log.Info("selected pods", "count", len(selectedPods)) 115 | 116 | // sum pods requests 117 | totalRequests := r.sumRequests(selectedPods) 118 | 119 | log.Info("selected pods total", "request", totalRequests) 120 | 121 | // calculate ratio with passed pod size and obtain number replicas 122 | replicas := r.getNeededReplicas(instance, totalRequests) 123 | 124 | log.Info("watermark", "replicas", replicas) 125 | 126 | // process and apply template 127 | 128 | templateData := templateData{ 129 | NodeScalingWatermark: instance, 130 | Replicas: replicas, 131 | } 132 | 133 | obj, err := util.ProcessTemplate(templateData, r.watermarkDeploymentTemplate) 134 | 135 | //objs, err := r.processTemplate(instance, data) 136 | if err != nil { 137 | log.Error(err, "unable process watermark pod deployment template from", "instance", instance, "and from services", templateData) 138 | return r.ManageError(context, instance, err) 139 | } 140 | 141 | // err = r.UpdateLockedResources(context, instance, []lockedresource.LockedResource{ 142 | // { 143 | // Unstructured: *obj, 144 | // ExcludedPaths: []string{ 145 | // ".metadata", 146 | // ".status", 147 | // }, 148 | // }, 149 | // }, []lockedpatch.LockedPatch{}) 150 | // if err != nil { 151 | // log.Error(err, "unable to update locked resources") 152 | // return r.ManageError(context, instance, err) 153 | // } 154 | 155 | err = r.CreateOrUpdateResource(context, instance, instance.GetNamespace(), obj) 156 | if err != nil { 157 | log.Error(err, "unable to create or update resource", "resource", obj) 158 | return r.ManageError(context, instance, err) 159 | } 160 | 161 | return r.ManageSuccess(context, instance) 162 | } 163 | 164 | func (r *NodeScalingWatermarkReconciler) getNeededReplicas(instance *redhatcopv1alpha1.NodeScalingWatermark, totalRequests corev1.ResourceList) int64 { 165 | replicas := float64(0) 166 | for measure, podQuantity := range instance.Spec.PausePodSize { 167 | totalQuantity, ok := totalRequests[measure] 168 | if !ok { 169 | continue 170 | } 171 | neededReplicas := totalQuantity.AsApproximateFloat64() * float64(instance.Spec.WatermarkPercentage) / 100 / podQuantity.AsApproximateFloat64() 172 | replicas = math.Max(replicas, neededReplicas) 173 | } 174 | return int64(replicas) 175 | } 176 | 177 | func (r *NodeScalingWatermarkReconciler) sumRequests(pods []corev1.Pod) corev1.ResourceList { 178 | totalRequests := corev1.ResourceList{} 179 | for i := range pods { 180 | totalRequests = sumResources(totalRequests, getTotalRequests(&pods[i])) 181 | } 182 | return totalRequests 183 | } 184 | 185 | func (r *NodeScalingWatermarkReconciler) getSelectedPods(context context.Context, nodes []corev1.Node) ([]corev1.Pod, error) { 186 | selectedPods := []corev1.Pod{} 187 | for _, node := range nodes { 188 | podList := &corev1.PodList{} 189 | err := r.GetClient().List(context, podList, &client.ListOptions{ 190 | FieldSelector: fields.OneTermEqualSelector("spec.nodeName", node.Name), 191 | }) 192 | if err != nil { 193 | r.Log.Error(err, "unable to list pod by field", "spec.nodeName", node.Name) 194 | return []corev1.Pod{}, err 195 | } 196 | selectedPods = append(selectedPods, r.filterWatermarkAndSystemPods(podList.Items)...) 197 | } 198 | return selectedPods, nil 199 | } 200 | 201 | func (r *NodeScalingWatermarkReconciler) filterWatermarkAndSystemPods(pods []corev1.Pod) []corev1.Pod { 202 | filteredPods := []corev1.Pod{} 203 | for i := range pods { 204 | _, ok := pods[i].Labels[watermarkLabel] 205 | if strings.HasPrefix(pods[i].Namespace, "kube-") || strings.HasPrefix(pods[i].Namespace, "openshift-") || pods[i].Namespace == "default" || ok || util.IsBeingDeleted(&pods[i]) { 206 | continue 207 | } 208 | filteredPods = append(filteredPods, pods[i]) 209 | } 210 | return filteredPods 211 | } 212 | 213 | func (r *NodeScalingWatermarkReconciler) getSelectedNodes(context context.Context, instance *redhatcopv1alpha1.NodeScalingWatermark) ([]corev1.Node, error) { 214 | nodeList := &corev1.NodeList{} 215 | err := r.GetClient().List(context, nodeList, &client.ListOptions{ 216 | LabelSelector: labels.SelectorFromSet(labels.Set(instance.Spec.NodeSelector)), 217 | }) 218 | if err != nil { 219 | r.Log.Error(err, "unable to find nodes mathcing", "labels", instance.Spec.NodeSelector) 220 | return []corev1.Node{}, err 221 | } 222 | return nodeList.Items, nil 223 | } 224 | 225 | func sumQuantity(left, right resource.Quantity) resource.Quantity { 226 | result := resource.Quantity{} 227 | result.Add(left) 228 | result.Add(right) 229 | return result 230 | } 231 | 232 | func sumResources(left corev1.ResourceList, right corev1.ResourceList) corev1.ResourceList { 233 | result := left.DeepCopy() 234 | for measure, value := range right { 235 | if currentValue, ok := result[measure]; ok { 236 | result[measure] = sumQuantity(currentValue, value) 237 | } else { 238 | result[measure] = value 239 | } 240 | } 241 | return result 242 | } 243 | 244 | func getTotalRequests(pod *corev1.Pod) corev1.ResourceList { 245 | result := corev1.ResourceList{} 246 | for i := range pod.Spec.Containers { 247 | result = sumResources(result, pod.Spec.Containers[i].Resources.Requests) 248 | } 249 | return result 250 | } 251 | 252 | func (r *NodeScalingWatermarkReconciler) initializeTemplate() (*template.Template, error) { 253 | templateFileName, ok := os.LookupEnv(templateFileNameEnv) 254 | if !ok { 255 | templateFileName = "/templates/watermarkDeploymentTemplate.yaml" 256 | } 257 | text, err := ioutil.ReadFile(templateFileName) 258 | if err != nil { 259 | r.Log.Error(err, "Error reading job template file", "filename", templateFileName) 260 | return &template.Template{}, err 261 | } 262 | watermarkDeploymentTemplate, err := template.New("WatermarkDeployment").Funcs(util.AdvancedTemplateFuncMap(r.GetRestConfig())).Parse(string(text)) 263 | if err != nil { 264 | r.Log.Error(err, "Error parsing template", "template", string(text)) 265 | return &template.Template{}, err 266 | } 267 | return watermarkDeploymentTemplate, nil 268 | } 269 | 270 | // SetupWithManager sets up the controller with the Manager. 271 | func (r *NodeScalingWatermarkReconciler) SetupWithManager(mgr ctrl.Manager) error { 272 | 273 | watermarkDeploymentTemplate, err := r.initializeTemplate() 274 | if err != nil { 275 | r.Log.Error(err, "unable to initialize watermarkDeploymentTemplate") 276 | return err 277 | } 278 | r.watermarkDeploymentTemplate = watermarkDeploymentTemplate 279 | 280 | log2 := r.Log.WithName("IsCreatedOrIsEgressIPsChanged") 281 | 282 | IsRequestChanged := predicate.Funcs{ 283 | UpdateFunc: func(e event.UpdateEvent) bool { 284 | oldPod, ok := e.ObjectOld.(*corev1.Pod) 285 | if !ok { 286 | log2.Info("unable to convert event object to Pod,", "event", e) 287 | return false 288 | } 289 | newPod, ok := e.ObjectNew.(*corev1.Pod) 290 | if !ok { 291 | log2.Info("unable to convert event object to Pod,", "event", e) 292 | return false 293 | } 294 | oldRequest := getTotalRequests(oldPod) 295 | newRequest := getTotalRequests(newPod) 296 | return !reflect.DeepEqual(oldRequest, newRequest) || (len(newRequest) > 0 && (oldPod.Spec.NodeName != newPod.Spec.NodeName || oldPod.ObjectMeta.DeletionTimestamp != newPod.ObjectMeta.DeletionTimestamp)) 297 | }, 298 | CreateFunc: func(e event.CreateEvent) bool { 299 | pod, ok := e.Object.(*corev1.Pod) 300 | return ok && pod.Spec.NodeName != "" && len(getTotalRequests(pod)) > 0 301 | }, 302 | DeleteFunc: func(e event.DeleteEvent) bool { 303 | pod, ok := e.Object.(*corev1.Pod) 304 | return ok && len(getTotalRequests(pod)) > 0 305 | }, 306 | GenericFunc: func(e event.GenericEvent) bool { 307 | return false 308 | }, 309 | } 310 | 311 | err = mgr.GetFieldIndexer().IndexField(context.TODO(), &corev1.Pod{}, "spec.nodeName", func(rawObj client.Object) []string { 312 | pod := rawObj.(*corev1.Pod) 313 | return []string{pod.Spec.NodeName} 314 | }) 315 | 316 | if err != nil { 317 | r.Log.Error(err, "unable to create index on `spec.nodeName`") 318 | return err 319 | } 320 | 321 | return ctrl.NewControllerManagedBy(mgr). 322 | For(&redhatcopv1alpha1.NodeScalingWatermark{}, builder.WithPredicates(util.ResourceGenerationOrFinalizerChangedPredicate{})). 323 | Watches(&source.Kind{Type: &corev1.Pod{ 324 | TypeMeta: metav1.TypeMeta{ 325 | Kind: "Pod", 326 | }, 327 | }}, &enqueForScalingWatermarksSelectingNodeHostingPod{ 328 | r: r, 329 | log: r.Log.WithName("enqueForScalingWatermarksSelectingNodeHostingPod"), 330 | }, builder.WithPredicates(&IsRequestChanged)). 331 | Complete(r) 332 | } 333 | 334 | func (e *enqueForScalingWatermarksSelectingNodeHostingPod) getNode(name string) (*corev1.Node, bool, error) { 335 | node := &corev1.Node{} 336 | err := e.r.GetClient().Get(context.TODO(), client.ObjectKey{ 337 | Name: name, 338 | }, node) 339 | if err != nil { 340 | if errors.IsNotFound(err) { 341 | return &corev1.Node{}, false, nil 342 | } 343 | e.log.Error(err, "unable to look up", "node", name) 344 | return &corev1.Node{}, false, err 345 | } 346 | return node, true, nil 347 | } 348 | 349 | func (e *enqueForScalingWatermarksSelectingNodeHostingPod) getAllNodeScalingWatermark() ([]redhatcopv1alpha1.NodeScalingWatermark, error) { 350 | nodeScalingWaetermarkList := &redhatcopv1alpha1.NodeScalingWatermarkList{} 351 | err := e.r.GetClient().List(context.TODO(), nodeScalingWaetermarkList) 352 | if err != nil { 353 | e.log.Error(err, "unable to list NodeScalingWatermark") 354 | return []redhatcopv1alpha1.NodeScalingWatermark{}, nil 355 | } 356 | return nodeScalingWaetermarkList.Items, nil 357 | } 358 | 359 | type enqueForScalingWatermarksSelectingNodeHostingPod struct { 360 | r *NodeScalingWatermarkReconciler 361 | log logr.Logger 362 | } 363 | 364 | // trigger a egressIPAM reconcile event for those egressIPAM objects that reference this hostsubnet indireclty via the corresponding node. 365 | func (e *enqueForScalingWatermarksSelectingNodeHostingPod) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) { 366 | pod, ok := evt.Object.(*corev1.Pod) 367 | if !ok { 368 | e.log.Info("unable convert event object to pod,", "event", evt) 369 | return 370 | } 371 | e.processReferringNodeScalingWatemarks(pod, q) 372 | } 373 | 374 | func (e *enqueForScalingWatermarksSelectingNodeHostingPod) processReferringNodeScalingWatemarks(pod *corev1.Pod, q workqueue.RateLimitingInterface) { 375 | 376 | nodeName := pod.Spec.NodeName 377 | if nodeName == "" { 378 | return 379 | } 380 | node, found, err := e.getNode(nodeName) 381 | if err != nil { 382 | e.log.Error(err, "unable to lookup", "node", nodeName) 383 | return 384 | } 385 | if !found { 386 | return 387 | } 388 | 389 | nodeScalingWatermarks, err := e.getAllNodeScalingWatermark() 390 | 391 | if err != nil { 392 | e.log.Error(err, "unable to list NodeScalingWatermarks") 393 | return 394 | } 395 | 396 | for _, nodeScalingWatermark := range nodeScalingWatermarks { 397 | labelSelector := labels.SelectorFromSet(nodeScalingWatermark.Spec.NodeSelector) 398 | if labelSelector.Matches(labels.Set(node.Labels)) { 399 | q.Add(reconcile.Request{NamespacedName: types.NamespacedName{ 400 | Namespace: nodeScalingWatermark.GetNamespace(), 401 | Name: nodeScalingWatermark.GetName(), 402 | }}) 403 | } 404 | } 405 | } 406 | 407 | // Update implements EventHandler 408 | // trigger a router reconcile event for those routes that reference this secret 409 | func (e *enqueForScalingWatermarksSelectingNodeHostingPod) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { 410 | pod, ok := evt.ObjectNew.(*corev1.Pod) 411 | if !ok { 412 | e.log.Info("unable convert event object to pod,", "event", evt) 413 | return 414 | } 415 | e.processReferringNodeScalingWatemarks(pod, q) 416 | } 417 | 418 | // Delete implements EventHandler 419 | func (e *enqueForScalingWatermarksSelectingNodeHostingPod) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { 420 | pod, ok := evt.Object.(*corev1.Pod) 421 | if !ok { 422 | e.log.Info("unable convert event object to pod,", "event", evt) 423 | return 424 | } 425 | e.processReferringNodeScalingWatemarks(pod, q) 426 | } 427 | 428 | // Generic implements EventHandler 429 | func (e *enqueForScalingWatermarksSelectingNodeHostingPod) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) { 430 | return 431 | } 432 | -------------------------------------------------------------------------------- /controllers/suite_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021 Red Hat Community of Practice. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package controllers 18 | 19 | import ( 20 | "path/filepath" 21 | "testing" 22 | 23 | . "github.com/onsi/ginkgo" 24 | . "github.com/onsi/gomega" 25 | "k8s.io/client-go/kubernetes/scheme" 26 | "k8s.io/client-go/rest" 27 | "sigs.k8s.io/controller-runtime/pkg/client" 28 | "sigs.k8s.io/controller-runtime/pkg/envtest" 29 | "sigs.k8s.io/controller-runtime/pkg/envtest/printer" 30 | logf "sigs.k8s.io/controller-runtime/pkg/log" 31 | "sigs.k8s.io/controller-runtime/pkg/log/zap" 32 | 33 | redhatcopv1alpha1 "github.com/redhat-cop/proactive-node-scaling-operator/api/v1alpha1" 34 | // +kubebuilder:scaffold:imports 35 | ) 36 | 37 | // These tests use Ginkgo (BDD-style Go testing framework). Refer to 38 | // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. 39 | 40 | var cfg *rest.Config 41 | var k8sClient client.Client 42 | var testEnv *envtest.Environment 43 | 44 | func TestAPIs(t *testing.T) { 45 | RegisterFailHandler(Fail) 46 | 47 | RunSpecsWithDefaultAndCustomReporters(t, 48 | "Controller Suite", 49 | []Reporter{printer.NewlineReporter{}}) 50 | } 51 | 52 | var _ = BeforeSuite(func() { 53 | logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) 54 | 55 | By("bootstrapping test environment") 56 | testEnv = &envtest.Environment{ 57 | CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, 58 | } 59 | 60 | cfg, err := testEnv.Start() 61 | Expect(err).NotTo(HaveOccurred()) 62 | Expect(cfg).NotTo(BeNil()) 63 | 64 | err = redhatcopv1alpha1.AddToScheme(scheme.Scheme) 65 | Expect(err).NotTo(HaveOccurred()) 66 | 67 | // +kubebuilder:scaffold:scheme 68 | 69 | k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) 70 | Expect(err).NotTo(HaveOccurred()) 71 | Expect(k8sClient).NotTo(BeNil()) 72 | 73 | }, 60) 74 | 75 | var _ = AfterSuite(func() { 76 | By("tearing down the test environment") 77 | err := testEnv.Stop() 78 | Expect(err).NotTo(HaveOccurred()) 79 | }) 80 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/redhat-cop/proactive-node-scaling-operator 2 | 3 | go 1.16 4 | 5 | require ( 6 | github.com/go-logr/logr v0.4.0 7 | github.com/onsi/ginkgo v1.16.4 8 | github.com/onsi/gomega v1.13.0 9 | github.com/redhat-cop/operator-utils v1.1.4 10 | k8s.io/api v0.20.2 11 | k8s.io/apimachinery v0.20.2 12 | k8s.io/client-go v0.20.2 13 | sigs.k8s.io/controller-runtime v0.8.3 14 | ) 15 | -------------------------------------------------------------------------------- /hack/boilerplate.go.txt: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021 Red Hat Community of Practice. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021 Red Hat Community of Practice. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package main 18 | 19 | import ( 20 | "flag" 21 | "os" 22 | 23 | // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) 24 | // to ensure that exec-entrypoint and run can make use of them. 25 | 26 | "k8s.io/apimachinery/pkg/runtime" 27 | utilruntime "k8s.io/apimachinery/pkg/util/runtime" 28 | clientgoscheme "k8s.io/client-go/kubernetes/scheme" 29 | _ "k8s.io/client-go/plugin/pkg/client/auth" 30 | ctrl "sigs.k8s.io/controller-runtime" 31 | "sigs.k8s.io/controller-runtime/pkg/healthz" 32 | "sigs.k8s.io/controller-runtime/pkg/log/zap" 33 | 34 | "github.com/redhat-cop/operator-utils/pkg/util/lockedresourcecontroller" 35 | redhatcopv1alpha1 "github.com/redhat-cop/proactive-node-scaling-operator/api/v1alpha1" 36 | "github.com/redhat-cop/proactive-node-scaling-operator/controllers" 37 | // +kubebuilder:scaffold:imports 38 | ) 39 | 40 | var ( 41 | scheme = runtime.NewScheme() 42 | setupLog = ctrl.Log.WithName("setup") 43 | ) 44 | 45 | func init() { 46 | utilruntime.Must(clientgoscheme.AddToScheme(scheme)) 47 | 48 | utilruntime.Must(redhatcopv1alpha1.AddToScheme(scheme)) 49 | // +kubebuilder:scaffold:scheme 50 | } 51 | 52 | func main() { 53 | var metricsAddr string 54 | var enableLeaderElection bool 55 | var probeAddr string 56 | flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.") 57 | flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") 58 | flag.BoolVar(&enableLeaderElection, "leader-elect", false, 59 | "Enable leader election for controller manager. "+ 60 | "Enabling this will ensure there is only one active controller manager.") 61 | opts := zap.Options{ 62 | Development: true, 63 | } 64 | opts.BindFlags(flag.CommandLine) 65 | flag.Parse() 66 | 67 | ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts))) 68 | 69 | mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ 70 | Scheme: scheme, 71 | MetricsBindAddress: metricsAddr, 72 | Port: 9443, 73 | HealthProbeBindAddress: probeAddr, 74 | LeaderElection: enableLeaderElection, 75 | LeaderElectionID: "af18d8c9.redhat.io", 76 | LeaderElectionResourceLock: "configmaps", 77 | }) 78 | if err != nil { 79 | setupLog.Error(err, "unable to start manager") 80 | os.Exit(1) 81 | } 82 | 83 | if err = (&controllers.NodeScalingWatermarkReconciler{ 84 | EnforcingReconciler: lockedresourcecontroller.NewFromManager(mgr, mgr.GetEventRecorderFor("NodeScalingWatermark_controller"), true), 85 | Log: ctrl.Log.WithName("controllers").WithName("NodeScalingWatermark"), 86 | }).SetupWithManager(mgr); err != nil { 87 | setupLog.Error(err, "unable to create controller", "controller", "NodeScalingWatermark") 88 | os.Exit(1) 89 | } 90 | // +kubebuilder:scaffold:builder 91 | 92 | if err := mgr.AddHealthzCheck("health", healthz.Ping); err != nil { 93 | setupLog.Error(err, "unable to set up health check") 94 | os.Exit(1) 95 | } 96 | if err := mgr.AddReadyzCheck("check", healthz.Ping); err != nil { 97 | setupLog.Error(err, "unable to set up ready check") 98 | os.Exit(1) 99 | } 100 | 101 | setupLog.Info("starting manager") 102 | if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { 103 | setupLog.Error(err, "problem running manager") 104 | os.Exit(1) 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /media/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cop/proactive-node-scaling-operator/95a2125f6ec63a62e452d99f2679305c69a30360/media/icon.png -------------------------------------------------------------------------------- /media/proactive-node-scaling-operator.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-cop/proactive-node-scaling-operator/95a2125f6ec63a62e452d99f2679305c69a30360/media/proactive-node-scaling-operator.png -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | # Proactive Node Scaling Operator 2 | 3 | ![build status](https://github.com/redhat-cop/proactive-node-scaling-operator/workflows/push/badge.svg) 4 | [![Go Report Card](https://goreportcard.com/badge/github.com/redhat-cop/proactive-node-scaling-operator)](https://goreportcard.com/report/github.com/redhat-cop/proactive-node-scaling-operator) 5 | ![GitHub go.mod Go version](https://img.shields.io/github/go-mod/go-version/redhat-cop/proactive-node-scaling-operator) 6 | 7 | This operator makes the [cluster autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler) more proactive. As of now the cluster auto scaler will create new nodes only when a pod is pending because it cannot be allocated due to lack of capacity. This is not a good user experience as the pending workload has to wait for several minutes as the new node is create and joins the cluster. 8 | 9 | The Proactive Node Scaling Operator improves the user experience by allocating low priority pods that don't do anything. When the cluster is full and a new user pod is created the following happens: 10 | 11 | 1. some of the low priority pods are de-scheduled to make room for the user pod, which can then be scheduled. The user workload does not have to wait in this case. 12 | 13 | 2. the de-scheduled low priority pods are rescheduled and in doing so, trigger the cluster autoscaler to add new nodes. 14 | 15 | Essentially this operator allows you to trade wasted resources for faster response time. 16 | 17 | In order for this operator to work correctly [pod priorities](https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/) must be defined. The default name for the priority class used by this operator is "proactive-node-autoscaling-pods" and it should have the lowest possible priority, 0. To ensure your regular workloads get a normal priority you should also define a PriorityClass for those and set globalDefault to true. 18 | 19 | For example: 20 | 21 | ```yaml 22 | apiVersion: scheduling.k8s.io/v1 23 | kind: PriorityClass 24 | metadata: 25 | name: proactive-node-autoscaling-pods 26 | value: 0 27 | globalDefault: false 28 | description: "This priority class is is the Priority class for Proactive Node Scaling." 29 | --- 30 | apiVersion: scheduling.k8s.io/v1 31 | kind: PriorityClass 32 | metadata: 33 | name: normal-workload 34 | value: 1000 35 | globalDefault: true 36 | description: "This priority classis the cluster default and should be used for normal workloads." 37 | ``` 38 | 39 | Also for this operator to work the [cluster autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler) must be active, see OpenShift instructions [here](https://docs.openshift.com/container-platform/4.6/machine_management/applying-autoscaling.html) on how to turn it on. 40 | 41 | To activate the proactive autoscaling, a CR must be defined, here is an example: 42 | 43 | ```yaml 44 | apiVersion: redhatcop.redhat.io/v1alpha1 45 | kind: NodeScalingWatermark 46 | metadata: 47 | name: us-west-2a 48 | spec: 49 | priorityClassName: proactive-node-autoscaling-pods 50 | watermarkPercentage: 20 51 | nodeSelector: 52 | topology.kubernetes.io/zone: us-west-2a 53 | ``` 54 | 55 | The `nodeSelector` selects the nodes observed by this operator, which are also the nodes on which the low priority pods will be scheduled. The nodes observed by the cluster autoscaler should coincide with the nodes selected by this operator CR. 56 | 57 | The `watermarkPercentage` define the percentage of capacity of user workload that will be allocated to low priority pods. So in this example 20% of the user allocated capacity will be allocated via low priority pods. This also means that when the user workload reaches 80% capacity of the nodes selected by this CR (and the autoscaler), the cluster will start to scale. 58 | 59 | ## Deploying the Operator 60 | 61 | This is a cluster-level operator that you can deploy in any namespace, `proactive-node-scaling-operator` is recommended. 62 | 63 | It is recommended to deploy this operator via [`OperatorHub`](https://operatorhub.io/), but you can also deploy it using [`Helm`](https://helm.sh/). 64 | 65 | ### Deploying from OperatorHub 66 | 67 | > **Note**: This operator supports being installed disconnected environments 68 | 69 | If you want to utilize the Operator Lifecycle Manager (OLM) to install this operator, you can do so in two ways: from the UI or the CLI. 70 | 71 | #### Deploying from OperatorHub UI 72 | 73 | * If you would like to launch this operator from the UI, you'll need to navigate to the OperatorHub tab in the console. Before starting, make sure you've created the namespace that you want to install this operator in by running the following: 74 | 75 | ```shell 76 | oc new-project proactive-node-scaling-operator 77 | ``` 78 | 79 | * Once there, you can search for this operator by name: `proactive node scaling operator`. This will then return an item for our operator and you can select it to get started. Once you've arrived here, you'll be presented with an option to install, which will begin the process. 80 | * After clicking the install button, you can then select the namespace that you would like to install this to as well as the installation strategy you would like to proceed with (`Automatic` or `Manual`). 81 | * Once you've made your selection, you can select `Subscribe` and the installation will begin. After a few moments you can go ahead and check your namespace and you should see the operator running. 82 | 83 | ![Proactive Node Scaling Operator](./media/proactive-node-scaling-operator.png) 84 | 85 | #### Deploying from OperatorHub using CLI 86 | 87 | If you'd like to launch this operator from the command line, you can use the manifests contained in this repository by running the following: 88 | 89 | oc new-project proactive-node-scaling-operator 90 | 91 | ```shell 92 | oc apply -f config/operatorhub -n proactive-node-scaling-operator 93 | ``` 94 | 95 | This will create the appropriate OperatorGroup and Subscription and will trigger OLM to launch the operator in the specified namespace. 96 | 97 | ### Deploying with Helm 98 | 99 | Here are the instructions to install the latest release with Helm. 100 | 101 | ```shell 102 | oc new-project proactive-node-scaling-operator 103 | helm repo add proactive-node-scaling-operator https://redhat-cop.github.io/proactive-node-scaling-operator 104 | helm repo update 105 | helm install proactive-node-scaling-operator proactive-node-scaling-operator/proactive-node-scaling-operator 106 | ``` 107 | 108 | This can later be updated with the following commands: 109 | 110 | ```shell 111 | helm repo update 112 | helm upgrade proactive-node-scaling-operator proactive-node-scaling-operator/proactive-node-scaling-operator 113 | ``` 114 | 115 | ### Disconnected deployment 116 | 117 | Use the `PausePodImage` field of the `NodeScalingWatermark` to specify an internally mirrored pause pod image, when running in a disconnected environment. 118 | 119 | ## Metrics 120 | 121 | Prometheus compatible metrics are exposed by the Operator and can be integrated into OpenShift's default cluster monitoring. To enable OpenShift cluster monitoring, label the namespace the operator is deployed in with the label `openshift.io/cluster-monitoring="true"`. 122 | 123 | ```shell 124 | oc label namespace openshift.io/cluster-monitoring="true" 125 | ``` 126 | 127 | ### Testing metrics 128 | 129 | ```sh 130 | export operatorNamespace=proactive-node-scaling-operator-local # or proactive-node-scaling-operator 131 | oc label namespace ${operatorNamespace} openshift.io/cluster-monitoring="true" 132 | oc rsh -n openshift-monitoring -c prometheus prometheus-k8s-0 /bin/bash 133 | export operatorNamespace=proactive-node-scaling-operator-local # or proactive-node-scaling-operator 134 | curl -v -s -k -H "Authorization: Bearer $(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" https://proactive-node-scaling-operator-controller-manager-metrics.${operatorNamespace}.svc.cluster.local:8443/metrics 135 | exit 136 | ``` 137 | 138 | ## Development 139 | 140 | ### Running the operator locally 141 | 142 | ```shell 143 | make install 144 | export TEMPLATE_FILE_NAME=./config/templates/watermarkDeploymentTemplate.yaml 145 | oc new-project proactive-node-scaling-operator-local 146 | kustomize build ./config/local-development | oc apply -f - -n proactive-node-scaling-operator-local 147 | export token=$(oc serviceaccounts get-token 'proactive-node-scaling-controller-manager' -n proactive-node-scaling-operator-local) 148 | oc login --token ${token} 149 | make run ENABLE_WEBHOOKS=false 150 | ``` 151 | 152 | ### Test helm chart locally 153 | 154 | Define an image and tag. For example... 155 | 156 | ```shell 157 | export imageRepository="quay.io/redhat-cop/proactive-node-scaling-operator" 158 | export imageTag="$(git describe --tags --abbrev=0)" # grabs the most recent git tag, which should match the image tag 159 | ``` 160 | 161 | Deploy chart... 162 | 163 | ```shell 164 | make helmchart 165 | helm upgrade -i proactive-node-scaling-operator-helmchart-test charts/proactive-node-scaling-operator -n proactive-node-scaling-operator-local --set image.repository=${imageRepository} --set image.tag=${imageTag} --create-namespace 166 | ``` 167 | 168 | Delete... 169 | 170 | ```shell 171 | helm delete proactive-node-scaling-operator-helmchart-test -n proactive-node-scaling-operator-local 172 | kubectl delete -f charts/proactive-node-scaling-operator/crds/crds.yaml 173 | ``` 174 | 175 | ### Building/Pushing the operator image 176 | 177 | ```shell 178 | export repo=raffaelespazzoli #replace with yours 179 | docker login quay.io/$repo/proactive-node-scaling-operator 180 | make docker-build IMG=quay.io/$repo/proactive-node-scaling-operator:latest 181 | make docker-push IMG=quay.io/$repo/proactive-node-scaling-operator:latest 182 | ``` 183 | 184 | ### Deploy to OLM via bundle 185 | 186 | ```shell 187 | make manifests 188 | make bundle IMG=quay.io/$repo/proactive-node-scaling-operator:latest 189 | operator-sdk bundle validate ./bundle --select-optional name=operatorhub 190 | make bundle-build BUNDLE_IMG=quay.io/$repo/proactive-node-scaling-operator-bundle:latest 191 | docker login quay.io/$repo/proactive-node-scaling-operator-bundle 192 | docker push quay.io/$repo/proactive-node-scaling-operator-bundle:latest 193 | operator-sdk bundle validate quay.io/$repo/proactive-node-scaling-operator-bundle:latest --select-optional name=operatorhub 194 | oc new-project proactive-node-scaling-operator 195 | oc label namespace proactive-node-scaling-operator openshift.io/cluster-monitoring="true" 196 | operator-sdk cleanup proactive-node-scaling-operator -n proactive-node-scaling-operator 197 | operator-sdk run bundle --install-mode AllNamespaces -n proactive-node-scaling-operator quay.io/$repo/proactive-node-scaling-operator-bundle:latest 198 | ``` 199 | 200 | ### Testing 201 | 202 | Create the following resource: 203 | 204 | ```shell 205 | oc new-project proactive-node-scaling-operator-test 206 | oc apply -f ./test/ai-ml-watermark.yaml -n proactive-node-scaling-operator-test 207 | oc apply -f ./test/zone-watermark.yaml -n proactive-node-scaling-operator-test 208 | ``` 209 | 210 | ### Releasing 211 | 212 | ```shell 213 | git tag -a "" -m "" 214 | git push upstream 215 | ``` 216 | 217 | If you need to remove a release: 218 | 219 | ```shell 220 | git tag -d 221 | git push upstream --delete 222 | ``` 223 | 224 | If you need to "move" a release to the current main 225 | 226 | ```shell 227 | git tag -f 228 | git push upstream -f 229 | ``` 230 | 231 | ### Cleaning up 232 | 233 | ```shell 234 | operator-sdk cleanup proactive-node-scaling-operator -n proactive-node-scaling-operator 235 | oc delete operatorgroup operator-sdk-og 236 | oc delete catalogsource proactive-node-scaling-operator-catalog 237 | ``` 238 | -------------------------------------------------------------------------------- /renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "extends": [ 4 | "config:best-practices", 5 | "schedule:earlyMondays" 6 | ] 7 | } 8 | -------------------------------------------------------------------------------- /test/ai-ml-watermark.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: redhatcop.redhat.io/v1alpha1 2 | kind: NodeScalingWatermark 3 | metadata: 4 | name: ai-ml 5 | spec: 6 | watermarkPercentage: 20 7 | nodeSelector: 8 | machine.openshift.io/cluster-api-machine-type: ai-ml 9 | tolerations: 10 | - key: "workload" 11 | operator: "Equal" 12 | value: "ai-ml" 13 | effect: "NoSchedule" -------------------------------------------------------------------------------- /test/zone-watermark.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: redhatcop.redhat.io/v1alpha1 2 | kind: NodeScalingWatermark 3 | metadata: 4 | name: us-west-2a 5 | spec: 6 | watermarkPercentage: 20 7 | nodeSelector: 8 | topology.kubernetes.io/zone: us-west-2a 9 | 10 | 11 | 12 | --------------------------------------------------------------------------------