├── .dockerignore
├── .github
└── workflows
│ └── releases.yaml
├── .gitignore
├── .golangci.yml
├── .idea
├── .gitignore
├── golinter.xml
├── modules.xml
├── typesense-operator.iml
└── vcs.xml
├── .mirrord
└── mirrord.json
├── Dockerfile
├── LICENSE
├── Makefile
├── PROJECT
├── README.md
├── api
└── v1alpha1
│ ├── groupversion_info.go
│ ├── typesensecluster_types.go
│ ├── typesensecluster_types_helpers.go
│ └── zz_generated.deepcopy.go
├── charts
└── typesense-operator
│ ├── .helmignore
│ ├── Chart.yaml
│ ├── templates
│ ├── _helpers.tpl
│ ├── deployment.yaml
│ ├── leader-election-rbac.yaml
│ ├── manager-rbac.yaml
│ ├── metrics-auth-rbac.yaml
│ ├── metrics-reader-rbac.yaml
│ ├── metrics-service.yaml
│ ├── serviceaccount.yaml
│ ├── typesensecluster-crd.yaml
│ ├── typesensecluster-editor-rbac.yaml
│ └── typesensecluster-viewer-rbac.yaml
│ └── values.yaml
├── cmd
└── main.go
├── config
├── crd
│ ├── bases
│ │ └── ts.opentelekomcloud.com_typesenseclusters.yaml
│ ├── kustomization.yaml
│ └── kustomizeconfig.yaml
├── default
│ ├── kustomization.yaml
│ ├── manager_metrics_patch.yaml
│ └── metrics_service.yaml
├── manager
│ ├── kustomization.yaml
│ └── manager.yaml
├── manifests
│ └── kustomization.yaml
├── prometheus
│ ├── kustomization.yaml
│ └── monitor.yaml
├── rbac
│ ├── kustomization.yaml
│ ├── leader_election_role.yaml
│ ├── leader_election_role_binding.yaml
│ ├── metrics_auth_role.yaml
│ ├── metrics_auth_role_binding.yaml
│ ├── metrics_reader_role.yaml
│ ├── role.yaml
│ ├── role_binding.yaml
│ ├── service_account.yaml
│ ├── typesensecluster_editor_role.yaml
│ └── typesensecluster_viewer_role.yaml
├── samples
│ ├── kustomization.yaml
│ ├── ts_v1alpha1_typesensecluster.yaml
│ ├── ts_v1alpha1_typesensecluster_aws.yaml
│ ├── ts_v1alpha1_typesensecluster_azure.yaml
│ ├── ts_v1alpha1_typesensecluster_bm.yaml
│ ├── ts_v1alpha1_typesensecluster_kind.yaml
│ └── ts_v1alpha1_typesensecluster_opentelekomcloud.yaml
└── scorecard
│ ├── bases
│ └── config.yaml
│ ├── kustomization.yaml
│ └── patches
│ ├── basic.config.yaml
│ └── olm.config.yaml
├── go.mod
├── go.sum
├── hack
└── boilerplate.go.txt
├── internal
└── controller
│ ├── suite_test.go
│ ├── typesensecluster_condition_types.go
│ ├── typesensecluster_configmap.go
│ ├── typesensecluster_constants.go
│ ├── typesensecluster_controller.go
│ ├── typesensecluster_controller_test.go
│ ├── typesensecluster_helpers.go
│ ├── typesensecluster_ingress.go
│ ├── typesensecluster_podmonitor.go
│ ├── typesensecluster_quorum.go
│ ├── typesensecluster_quorum_helpers.go
│ ├── typesensecluster_quorum_types.go
│ ├── typesensecluster_scraper.go
│ ├── typesensecluster_secret.go
│ ├── typesensecluster_services.go
│ ├── typesensecluster_statefulset.go
│ └── utils.go
└── test
├── e2e
├── e2e_suite_test.go
└── e2e_test.go
└── utils
└── utils.go
/.dockerignore:
--------------------------------------------------------------------------------
1 | # More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file
2 | # Ignore build and test binaries.
3 | bin/
4 |
--------------------------------------------------------------------------------
/.github/workflows/releases.yaml:
--------------------------------------------------------------------------------
1 | name: Release Charts
2 | on:
3 | # push:
4 | # branches:
5 | # - main
6 | # tags:
7 | # - "v*.*.*"
8 | workflow_dispatch:
9 | paths-ignore:
10 | - '**/README.md'
11 | - "**/.github/workflows/**"
12 |
13 | jobs:
14 | chart-release:
15 | permissions:
16 | contents: write
17 | runs-on: ubuntu-latest
18 | steps:
19 | - name: Checkout
20 | uses: actions/checkout@v4
21 | with:
22 | fetch-depth: 0
23 |
24 | - name: Configure Git
25 | run: |
26 | git config user.name "$GITHUB_ACTOR"
27 | git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
28 |
29 | - name: Run chart-releaser
30 | uses: helm/chart-releaser-action@v1.6.0
31 | env:
32 | CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # If you prefer the allow list template instead of the deny list, see community template:
2 | # https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore
3 | #
4 | # Binaries for programs and plugins
5 | *.exe
6 | *.exe~
7 | *.dll
8 | *.so
9 | *.dylib
10 |
11 | # Test binary, built with `go test -c`
12 | *.test
13 |
14 | # Output of the go coverage tool, specifically when used with LiteIDE
15 | *.out
16 |
17 | # Dependency directories (remove the comment below to include it)
18 | # vendor/
19 |
20 | # Go workspace file
21 | go.work
22 | go.work.sum
23 |
24 | # env file
25 | .env
26 | /bin/
27 | /bin/controller-gen
28 | /bin/controller-gen-v0.15.0
29 | /config/samples/ts_v1alpha1_typesensecluster_blueprints.yaml
30 |
--------------------------------------------------------------------------------
/.golangci.yml:
--------------------------------------------------------------------------------
1 | run:
2 | timeout: 5m
3 | allow-parallel-runners: true
4 |
5 | issues:
6 | # don't skip warning about doc comments
7 | # don't exclude the default set of lint
8 | exclude-use-default: false
9 | # restore some of the defaults
10 | # (fill in the rest as needed)
11 | exclude-rules:
12 | - path: "api/*"
13 | linters:
14 | - lll
15 | - path: "internal/*"
16 | linters:
17 | - dupl
18 | - lll
19 | linters:
20 | disable-all: true
21 | enable:
22 | - dupl
23 | - errcheck
24 | - exportloopref
25 | - ginkgolinter
26 | - goconst
27 | - gocyclo
28 | - gofmt
29 | - goimports
30 | - gosimple
31 | - govet
32 | - ineffassign
33 | - lll
34 | - misspell
35 | - nakedret
36 | - prealloc
37 | - revive
38 | - staticcheck
39 | - typecheck
40 | - unconvert
41 | - unparam
42 | - unused
43 |
44 | linters-settings:
45 | revive:
46 | rules:
47 | - name: comment-spacings
48 |
--------------------------------------------------------------------------------
/.idea/.gitignore:
--------------------------------------------------------------------------------
1 | # Default ignored files
2 | /shelf/
3 | /workspace.xml
4 | # Editor-based HTTP Client requests
5 | /httpRequests/
6 | # Datasource local storage ignored files
7 | /dataSources/
8 | /dataSources.local.xml
9 |
--------------------------------------------------------------------------------
/.idea/golinter.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
28 |
29 |
--------------------------------------------------------------------------------
/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/typesense-operator.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
--------------------------------------------------------------------------------
/.idea/vcs.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/.mirrord/mirrord.json:
--------------------------------------------------------------------------------
1 | {
2 | "feature": {
3 | "network": {
4 | "incoming": "mirror",
5 | "outgoing": true
6 | },
7 | "fs": "read",
8 | "env": true
9 | }
10 | }
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | # Build the manager binary
2 | FROM golang:1.22 AS builder
3 | ARG TARGETOS
4 | ARG TARGETARCH
5 |
6 | WORKDIR /workspace
7 | # Copy the Go Modules manifests
8 | COPY go.mod go.mod
9 | COPY go.sum go.sum
10 | # cache deps before building and copying source so that we don't need to re-download as much
11 | # and so that source changes don't invalidate our downloaded layer
12 | RUN go mod download
13 |
14 | # Copy the go source
15 | COPY cmd/main.go cmd/main.go
16 | COPY api/ api/
17 | COPY internal/controller/ internal/controller/
18 |
19 | # Build
20 | # the GOARCH has not a default value to allow the binary be built according to the host where the command
21 | # was called. For example, if we call make docker-build in a local env which has the Apple Silicon M1 SO
22 | # the docker BUILDPLATFORM arg will be linux/arm64 when for Apple x86 it will be linux/amd64. Therefore,
23 | # by leaving it empty we can ensure that the container and binary shipped on it will have the same platform.
24 | RUN CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -a -o manager cmd/main.go
25 |
26 | # Use distroless as minimal base image to package the manager binary
27 | # Refer to https://github.com/GoogleContainerTools/distroless for more details
28 | FROM gcr.io/distroless/static:nonroot
29 | WORKDIR /
30 | COPY --from=builder /workspace/manager .
31 | USER 65532:65532
32 |
33 | ENTRYPOINT ["/manager"]
34 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | # VERSION defines the project version for the bundle.
2 | # Update this value when you upgrade the version of your project.
3 | # To re-generate a bundle for another specific version without changing the standard setup, you can:
4 | # - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2)
5 | # - use environment variables to overwrite this value (e.g export VERSION=0.0.2)
6 | VERSION ?= 0.0.1
7 |
8 | # CHANNELS define the bundle channels used in the bundle.
9 | # Add a new line here if you would like to change its default config. (E.g CHANNELS = "candidate,fast,stable")
10 | # To re-generate a bundle for other specific channels without changing the standard setup, you can:
11 | # - use the CHANNELS as arg of the bundle target (e.g make bundle CHANNELS=candidate,fast,stable)
12 | # - use environment variables to overwrite this value (e.g export CHANNELS="candidate,fast,stable")
13 | ifneq ($(origin CHANNELS), undefined)
14 | BUNDLE_CHANNELS := --channels=$(CHANNELS)
15 | endif
16 |
17 | # DEFAULT_CHANNEL defines the default channel used in the bundle.
18 | # Add a new line here if you would like to change its default config. (E.g DEFAULT_CHANNEL = "stable")
19 | # To re-generate a bundle for any other default channel without changing the default setup, you can:
20 | # - use the DEFAULT_CHANNEL as arg of the bundle target (e.g make bundle DEFAULT_CHANNEL=stable)
21 | # - use environment variables to overwrite this value (e.g export DEFAULT_CHANNEL="stable")
22 | ifneq ($(origin DEFAULT_CHANNEL), undefined)
23 | BUNDLE_DEFAULT_CHANNEL := --default-channel=$(DEFAULT_CHANNEL)
24 | endif
25 | BUNDLE_METADATA_OPTS ?= $(BUNDLE_CHANNELS) $(BUNDLE_DEFAULT_CHANNEL)
26 |
27 | # IMAGE_TAG_BASE defines the docker.io namespace and part of the image name for remote images.
28 | # This variable is used to construct full image tags for bundle and catalog images.
29 | #
30 | # For example, running 'make bundle-build bundle-push catalog-build catalog-push' will build and push both
31 | # opentelekomcloud.com/typesense-operator-bundle:$VERSION and opentelekomcloud.com/typesense-operator-catalog:$VERSION.
32 | IMAGE_TAG_BASE ?= akyriako78/typesense-operator
33 |
34 | # BUNDLE_IMG defines the image:tag used for the bundle.
35 | # You can use it as an arg. (E.g make bundle-build BUNDLE_IMG=/:)
36 | BUNDLE_IMG ?= $(IMAGE_TAG_BASE)-bundle:v$(VERSION)
37 |
38 | # BUNDLE_GEN_FLAGS are the flags passed to the operator-sdk generate bundle command
39 | BUNDLE_GEN_FLAGS ?= -q --overwrite --version $(VERSION) $(BUNDLE_METADATA_OPTS)
40 |
41 | # USE_IMAGE_DIGESTS defines if images are resolved via tags or digests
42 | # You can enable this value if you would like to use SHA Based Digests
43 | # To enable set flag to true
44 | USE_IMAGE_DIGESTS ?= false
45 | ifeq ($(USE_IMAGE_DIGESTS), true)
46 | BUNDLE_GEN_FLAGS += --use-image-digests
47 | endif
48 |
49 | # Set the Operator SDK version to use. By default, what is installed on the system is used.
50 | # This is useful for CI or a project to utilize a specific version of the operator-sdk toolkit.
51 | OPERATOR_SDK_VERSION ?= v1.38.0
52 | # Image URL to use all building/pushing image targets
53 | DOCKER_HUB_NAME ?= $(shell docker info | sed '/Username:/!d;s/.* //')
54 | IMG_NAME ?= typesense-operator
55 | IMG_TAG ?= 0.3.0
56 | IMG ?= $(DOCKER_HUB_NAME)/$(IMG_NAME):$(IMG_TAG)
57 |
58 | # ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary.
59 | ENVTEST_K8S_VERSION = 1.30.0
60 |
61 | # Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
62 | ifeq (,$(shell go env GOBIN))
63 | GOBIN=$(shell go env GOPATH)/bin
64 | else
65 | GOBIN=$(shell go env GOBIN)
66 | endif
67 |
68 | # CONTAINER_TOOL defines the container tool to be used for building images.
69 | # Be aware that the target commands are only tested with Docker which is
70 | # scaffolded by default. However, you might want to replace it to use other
71 | # tools. (i.e. podman)
72 | CONTAINER_TOOL ?= docker
73 |
74 | # Setting SHELL to bash allows bash commands to be executed by recipes.
75 | # Options are set to exit when a recipe line exits non-zero or a piped command fails.
76 | SHELL = /usr/bin/env bash -o pipefail
77 | .SHELLFLAGS = -ec
78 |
79 | .PHONY: all
80 | all: build
81 |
82 | ##@ General
83 |
84 | # The help target prints out all targets with their descriptions organized
85 | # beneath their categories. The categories are represented by '##@' and the
86 | # target descriptions by '##'. The awk command is responsible for reading the
87 | # entire set of makefiles included in this invocation, looking for lines of the
88 | # file as xyz: ## something, and then pretty-format the target and help. Then,
89 | # if there's a line with ##@ something, that gets pretty-printed as a category.
90 | # More info on the usage of ANSI control characters for terminal formatting:
91 | # https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters
92 | # More info on the awk command:
93 | # http://linuxcommand.org/lc3_adv_awk.php
94 |
95 | .PHONY: help
96 | help: ## Display this help.
97 | @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
98 |
99 | ##@ Development
100 |
101 | .PHONY: manifests
102 | manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects.
103 | $(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases
104 |
105 | .PHONY: generate
106 | generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations.
107 | $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..."
108 |
109 | .PHONY: fmt
110 | fmt: ## Run go fmt against code.
111 | go fmt ./...
112 |
113 | .PHONY: vet
114 | vet: ## Run go vet against code.
115 | go vet ./...
116 |
117 | .PHONY: test
118 | test: manifests generate fmt vet envtest ## Run tests.
119 | KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test $$(go list ./... | grep -v /e2e) -coverprofile cover.out
120 |
121 | # Utilize Kind or modify the e2e tests to load the image locally, enabling compatibility with other vendors.
122 | .PHONY: test-e2e # Run the e2e tests against a Kind k8s instance that is spun up.
123 | test-e2e:
124 | go test ./test/e2e/ -v -ginkgo.v
125 |
126 | .PHONY: lint
127 | lint: golangci-lint ## Run golangci-lint linter
128 | $(GOLANGCI_LINT) run
129 |
130 | .PHONY: lint-fix
131 | lint-fix: golangci-lint ## Run golangci-lint linter and perform fixes
132 | $(GOLANGCI_LINT) run --fix
133 |
134 | ##@ Build
135 |
136 | .PHONY: build
137 | build: manifests generate fmt vet ## Build manager binary.
138 | go build -o bin/manager cmd/main.go
139 |
140 | .PHONY: run
141 | run: manifests generate fmt vet ## Run a controller from your host.
142 | go run ./cmd/main.go
143 |
144 | # If you wish to build the manager image targeting other platforms you can use the --platform flag.
145 | # (i.e. docker build --platform linux/arm64). However, you must enable docker buildKit for it.
146 | # More info: https://docs.docker.com/develop/develop-images/build_enhancements/
147 | .PHONY: docker-build
148 | docker-build: ## Build docker image with the manager.
149 | $(CONTAINER_TOOL) build -t ${IMG} .
150 |
151 | .PHONY: docker-push
152 | docker-push: ## Push docker image with the manager.
153 | $(CONTAINER_TOOL) push ${IMG}
154 |
155 | # PLATFORMS defines the target platforms for the manager image be built to provide support to multiple
156 | # architectures. (i.e. make docker-buildx IMG=myregistry/mypoperator:0.0.1). To use this option you need to:
157 | # - be able to use docker buildx. More info: https://docs.docker.com/build/buildx/
158 | # - have enabled BuildKit. More info: https://docs.docker.com/develop/develop-images/build_enhancements/
159 | # - be able to push the image to your registry (i.e. if you do not set a valid value via IMG=> then the export will fail)
160 | # To adequately provide solutions that are compatible with multiple platforms, you should consider using this option.
161 | PLATFORMS ?= linux/arm64,linux/amd64,linux/s390x,linux/ppc64le
162 | .PHONY: docker-buildx
163 | docker-buildx: ## Build and push docker image for the manager for cross-platform support
164 | # copy existing Dockerfile and insert --platform=${BUILDPLATFORM} into Dockerfile.cross, and preserve the original Dockerfile
165 | sed -e '1 s/\(^FROM\)/FROM --platform=\$$\{BUILDPLATFORM\}/; t' -e ' 1,// s//FROM --platform=\$$\{BUILDPLATFORM\}/' Dockerfile > Dockerfile.cross
166 | - $(CONTAINER_TOOL) buildx create --name typesense-operator-builder
167 | $(CONTAINER_TOOL) buildx use typesense-operator-builder
168 | - $(CONTAINER_TOOL) buildx build --push --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile.cross .
169 | - $(CONTAINER_TOOL) buildx rm typesense-operator-builder
170 | rm Dockerfile.cross
171 |
172 | .PHONY: build-installer
173 | build-installer: manifests generate kustomize ## Generate a consolidated YAML with CRDs and deployment.
174 | mkdir -p dist
175 | cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
176 | $(KUSTOMIZE) build config/default > dist/install.yaml
177 |
178 | ##@ Deployment
179 |
180 | ifndef ignore-not-found
181 | ignore-not-found = false
182 | endif
183 |
184 | .PHONY: install
185 | install: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config.
186 | $(KUSTOMIZE) build config/crd | $(KUBECTL) apply -f -
187 |
188 | .PHONY: uninstall
189 | uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
190 | $(KUSTOMIZE) build config/crd | $(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f -
191 |
192 | .PHONY: deploy
193 | deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config.
194 | cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG}
195 | $(KUSTOMIZE) build config/default | $(KUBECTL) apply -f -
196 |
197 | .PHONY: undeploy
198 | undeploy: kustomize ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion.
199 | $(KUSTOMIZE) build config/default | $(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f -
200 |
201 | .PHONY: deploy-with-samples
202 | deploy-with-samples: kustomize generate manifests install ## Install CRDs into the K8s cluster specified in ~/.kube/config.
203 | $(KUSTOMIZE) build config/samples | $(KUBECTL) apply -f config/samples/ts_v1alpha1_typesensecluster_kind.yaml
204 |
205 | .PHONY: samples
206 | samples: kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config.
207 | $(KUSTOMIZE) build config/samples | $(KUBECTL) apply -f config/samples/ts_v1alpha1_typesensecluster_kind.yaml
208 |
209 | ##@ Dependencies
210 |
211 | ## Location to install dependencies to
212 | LOCALBIN ?= $(shell pwd)/bin
213 | $(LOCALBIN):
214 | mkdir -p $(LOCALBIN)
215 |
216 | ## Tool Binaries
217 | KUBECTL ?= kubectl
218 | KUSTOMIZE ?= $(LOCALBIN)/kustomize
219 | CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen
220 | ENVTEST ?= $(LOCALBIN)/setup-envtest
221 | GOLANGCI_LINT = $(LOCALBIN)/golangci-lint
222 |
223 | ## Tool Versions
224 | KUSTOMIZE_VERSION ?= v5.4.2
225 | CONTROLLER_TOOLS_VERSION ?= v0.15.0
226 | ENVTEST_VERSION ?= release-0.18
227 | GOLANGCI_LINT_VERSION ?= v1.59.1
228 |
229 | .PHONY: kustomize
230 | kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary.
231 | $(KUSTOMIZE): $(LOCALBIN)
232 | $(call go-install-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v5,$(KUSTOMIZE_VERSION))
233 |
234 | .PHONY: controller-gen
235 | controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary.
236 | $(CONTROLLER_GEN): $(LOCALBIN)
237 | $(call go-install-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen,$(CONTROLLER_TOOLS_VERSION))
238 |
239 | .PHONY: envtest
240 | envtest: $(ENVTEST) ## Download setup-envtest locally if necessary.
241 | $(ENVTEST): $(LOCALBIN)
242 | $(call go-install-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest,$(ENVTEST_VERSION))
243 |
244 | .PHONY: golangci-lint
245 | golangci-lint: $(GOLANGCI_LINT) ## Download golangci-lint locally if necessary.
246 | $(GOLANGCI_LINT): $(LOCALBIN)
247 | $(call go-install-tool,$(GOLANGCI_LINT),github.com/golangci/golangci-lint/cmd/golangci-lint,$(GOLANGCI_LINT_VERSION))
248 |
249 | # go-install-tool will 'go install' any package with custom target and name of binary, if it doesn't exist
250 | # $1 - target path with name of binary
251 | # $2 - package url which can be installed
252 | # $3 - specific version of package
253 | define go-install-tool
254 | @[ -f "$(1)-$(3)" ] || { \
255 | set -e; \
256 | package=$(2)@$(3) ;\
257 | echo "Downloading $${package}" ;\
258 | rm -f $(1) || true ;\
259 | GOBIN=$(LOCALBIN) go install $${package} ;\
260 | mv $(1) $(1)-$(3) ;\
261 | } ;\
262 | ln -sf $(1)-$(3) $(1)
263 | endef
264 |
265 | .PHONY: operator-sdk
266 | OPERATOR_SDK ?= $(LOCALBIN)/operator-sdk
267 | operator-sdk: ## Download operator-sdk locally if necessary.
268 | ifeq (,$(wildcard $(OPERATOR_SDK)))
269 | ifeq (, $(shell which operator-sdk 2>/dev/null))
270 | @{ \
271 | set -e ;\
272 | mkdir -p $(dir $(OPERATOR_SDK)) ;\
273 | OS=$(shell go env GOOS) && ARCH=$(shell go env GOARCH) && \
274 | curl -sSLo $(OPERATOR_SDK) https://github.com/operator-framework/operator-sdk/releases/download/$(OPERATOR_SDK_VERSION)/operator-sdk_$${OS}_$${ARCH} ;\
275 | chmod +x $(OPERATOR_SDK) ;\
276 | }
277 | else
278 | OPERATOR_SDK = $(shell which operator-sdk)
279 | endif
280 | endif
281 |
282 | .PHONY: bundle
283 | bundle: manifests kustomize operator-sdk ## Generate bundle manifests and metadata, then validate generated files.
284 | $(OPERATOR_SDK) generate kustomize manifests -q
285 | cd config/manager && $(KUSTOMIZE) edit set image controller=$(IMG)
286 | $(KUSTOMIZE) build config/manifests | $(OPERATOR_SDK) generate bundle $(BUNDLE_GEN_FLAGS)
287 | $(OPERATOR_SDK) bundle validate ./bundle
288 |
289 | .PHONY: bundle-build
290 | bundle-build: ## Build the bundle image.
291 | docker build -f bundle.Dockerfile -t $(BUNDLE_IMG) .
292 |
293 | .PHONY: bundle-push
294 | bundle-push: ## Push the bundle image.
295 | $(MAKE) docker-push IMG=$(BUNDLE_IMG)
296 |
297 | .PHONY: opm
298 | OPM = $(LOCALBIN)/opm
299 | opm: ## Download opm locally if necessary.
300 | ifeq (,$(wildcard $(OPM)))
301 | ifeq (,$(shell which opm 2>/dev/null))
302 | @{ \
303 | set -e ;\
304 | mkdir -p $(dir $(OPM)) ;\
305 | OS=$(shell go env GOOS) && ARCH=$(shell go env GOARCH) && \
306 | curl -sSLo $(OPM) https://github.com/operator-framework/operator-registry/releases/download/v1.23.0/$${OS}-$${ARCH}-opm ;\
307 | chmod +x $(OPM) ;\
308 | }
309 | else
310 | OPM = $(shell which opm)
311 | endif
312 | endif
313 |
314 | # A comma-separated list of bundle images (e.g. make catalog-build BUNDLE_IMGS=example.com/operator-bundle:v0.1.0,example.com/operator-bundle:v0.2.0).
315 | # These images MUST exist in a registry and be pull-able.
316 | BUNDLE_IMGS ?= $(BUNDLE_IMG)
317 |
318 | # The image tag given to the resulting catalog image (e.g. make catalog-build CATALOG_IMG=example.com/operator-catalog:v0.2.0).
319 | CATALOG_IMG ?= $(IMAGE_TAG_BASE)-catalog:v$(VERSION)
320 |
321 | # Set CATALOG_BASE_IMG to an existing catalog image tag to add $BUNDLE_IMGS to that image.
322 | ifneq ($(origin CATALOG_BASE_IMG), undefined)
323 | FROM_INDEX_OPT := --from-index $(CATALOG_BASE_IMG)
324 | endif
325 |
326 | # Build a catalog image by adding bundle images to an empty catalog using the operator package manager tool, 'opm'.
327 | # This recipe invokes 'opm' in 'semver' bundle add mode. For more information on add modes, see:
328 | # https://github.com/operator-framework/community-operators/blob/7f1438c/docs/packaging-operator.md#updating-your-existing-operator
329 | .PHONY: catalog-build
330 | catalog-build: opm ## Build a catalog image.
331 | $(OPM) index add --container-tool docker --mode semver --tag $(CATALOG_IMG) --bundles $(BUNDLE_IMGS) $(FROM_INDEX_OPT)
332 |
333 | # Push the catalog image.
334 | .PHONY: catalog-push
335 | catalog-push: ## Push a catalog image.
336 | $(MAKE) docker-push IMG=$(CATALOG_IMG)
337 |
338 | HELMIFY ?= $(LOCALBIN)/helmify
339 |
340 | .PHONY: helmify
341 | helmify: $(HELMIFY) ## Download helmify locally if necessary.
342 | $(HELMIFY): $(LOCALBIN)
343 | test -s $(LOCALBIN)/helmify || GOBIN=$(LOCALBIN) go install github.com/arttor/helmify/cmd/helmify@latest
344 |
345 | helm: manifests kustomize helmify
346 | $(KUSTOMIZE) build config/default | $(HELMIFY) -image-pull-secrets charts/typesense-operator
347 | sed -i -e 's|^appVersion:.*|appVersion: "$(IMG_TAG)"|' -e 's|^version:.*|version: $(IMG_TAG)|' ./charts/typesense-operator/Chart.yaml
348 |
349 |
--------------------------------------------------------------------------------
/PROJECT:
--------------------------------------------------------------------------------
1 | # Code generated by tool. DO NOT EDIT.
2 | # This file is used to track the info used to scaffold your project
3 | # and allow the plugins properly work.
4 | # More info: https://book.kubebuilder.io/reference/project-config.html
5 | domain: opentelekomcloud.com
6 | layout:
7 | - go.kubebuilder.io/v4
8 | plugins:
9 | manifests.sdk.operatorframework.io/v2: {}
10 | scorecard.sdk.operatorframework.io/v2: {}
11 | projectName: typesense-operator
12 | repo: github.com/akyriako/typesense-operator
13 | resources:
14 | - api:
15 | crdVersion: v1
16 | namespaced: true
17 | controller: true
18 | domain: opentelekomcloud.com
19 | group: ts
20 | kind: TypesenseCluster
21 | path: github.com/akyriako/typesense-operator/api/v1alpha1
22 | version: v1alpha1
23 | version: "3"
24 |
--------------------------------------------------------------------------------
/api/v1alpha1/groupversion_info.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2024.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | // Package v1alpha1 contains API Schema definitions for the ts v1alpha1 API group
18 | // +kubebuilder:object:generate=true
19 | // +groupName=ts.opentelekomcloud.com
20 | package v1alpha1
21 |
22 | import (
23 | "k8s.io/apimachinery/pkg/runtime/schema"
24 | "sigs.k8s.io/controller-runtime/pkg/scheme"
25 | )
26 |
27 | var (
28 | // GroupVersion is group version used to register these objects
29 | GroupVersion = schema.GroupVersion{Group: "ts.opentelekomcloud.com", Version: "v1alpha1"}
30 |
31 | // SchemeBuilder is used to add go types to the GroupVersionKind scheme
32 | SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
33 |
34 | // AddToScheme adds the types in this group-version to the given scheme.
35 | AddToScheme = SchemeBuilder.AddToScheme
36 | )
37 |
--------------------------------------------------------------------------------
/api/v1alpha1/typesensecluster_types.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2024.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package v1alpha1
18 |
19 | import (
20 | corev1 "k8s.io/api/core/v1"
21 | networkingv1 "k8s.io/api/networking/v1"
22 | "k8s.io/apimachinery/pkg/api/resource"
23 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
24 | )
25 |
26 | // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
27 | // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
28 |
29 | // TypesenseClusterSpec defines the desired state of TypesenseCluster
30 | type TypesenseClusterSpec struct {
31 | Image string `json:"image"`
32 |
33 | AdminApiKey *corev1.SecretReference `json:"adminApiKey,omitempty"`
34 |
35 | // +optional
36 | // +kubebuilder:default=3
37 | // +kubebuilder:validation:Minimum=1
38 | // +kubebuilder:validation:Maximum=7
39 | // +kubebuilder:validation:ExclusiveMinimum=false
40 | // +kubebuilder:validation:ExclusiveMaximum=false
41 | // +kubebuilder:validation:Type=integer
42 | // +kubebuilder:validation:Enum=1;3;5;7
43 | Replicas int32 `json:"replicas,omitempty"`
44 |
45 | // +optional
46 | // +kubebuilder:default=8108
47 | // +kubebuilder:validation:Minimum=1024
48 | // +kubebuilder:validation:Maximum=65535
49 | // +kubebuilder:validation:ExclusiveMinimum=true
50 | // +kubebuilder:validation:ExclusiveMaximum=false
51 | // +kubebuilder:validation:Type=integer
52 | ApiPort int `json:"apiPort,omitempty"`
53 |
54 | // +optional
55 | // +kubebuilder:default=8107
56 | // +kubebuilder:validation:Minimum=1024
57 | // +kubebuilder:validation:Maximum=65535
58 | // +kubebuilder:validation:ExclusiveMinimum=true
59 | // +kubebuilder:validation:ExclusiveMaximum=false
60 | // +kubebuilder:validation:Type=integer
61 | PeeringPort int `json:"peeringPort,omitempty"`
62 |
63 | // +optional
64 | // +kubebuilder:default=true
65 | // +kubebuilder:validation:Type=boolean
66 | ResetPeersOnError bool `json:"resetPeersOnError,omitempty"`
67 |
68 | // +optional
69 | // +kubebuilder:default=false
70 | // +kubebuilder:validation:Type=boolean
71 | EnableCors bool `json:"enableCors,omitempty"`
72 |
73 | // +optional
74 | // +kubebuilder:validation:Type=string
75 | CorsDomains *string `json:"corsDomains,omitempty"`
76 |
77 | Resources *corev1.ResourceRequirements `json:"resources,omitempty"`
78 |
79 | // +kubebuilder:validation:Optional
80 | Affinity *corev1.Affinity `json:"affinity,omitempty"`
81 |
82 | // +kubebuilder:validation:Optional
83 | NodeSelector map[string]string `json:"nodeSelector,omitempty"`
84 |
85 | // +kubebuilder:validation:Optional
86 | Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
87 |
88 | // +kubebuilder:validation:Optional
89 | AdditionalServerConfiguration *corev1.LocalObjectReference `json:"additionalServerConfiguration,omitempty"`
90 |
91 | Storage *StorageSpec `json:"storage"`
92 |
93 | Ingress *IngressSpec `json:"ingress,omitempty"`
94 |
95 | Scrapers []DocSearchScraperSpec `json:"scrapers,omitempty"`
96 |
97 | Metrics *MetricsExporterSpec `json:"metrics,omitempty"`
98 |
99 | HealthCheck *HealthCheckSpec `json:"healthcheck,omitempty"`
100 |
101 | // +kubebuilder:validation:Optional
102 | TopologySpreadConstraints []corev1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"`
103 |
104 | // +optional
105 | // +kubebuilder:default=false
106 | // +kubebuilder:validation:Type=boolean
107 | IncrementalQuorumRecovery bool `json:"incrementalQuorumRecovery,omitempty"`
108 | }
109 |
110 | type StorageSpec struct {
111 |
112 | // +optional
113 | // +kubebuilder:default="100Mi"
114 | Size resource.Quantity `json:"size,omitempty"`
115 |
116 | StorageClassName string `json:"storageClassName"`
117 | }
118 |
119 | type IngressSpec struct {
120 | // +optional
121 | // +kubebuilder:validation:Pattern:=`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$`
122 | Referer *string `json:"referer,omitempty"`
123 |
124 | // +kubebuilder:validation:Required
125 | // +kubebuilder:validation:Pattern:=`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$`
126 | Host string `json:"host"`
127 |
128 | HttpDirectives *string `json:"httpDirectives,omitempty"`
129 | ServerDirectives *string `json:"serverDirectives,omitempty"`
130 | LocationDirectives *string `json:"locationDirectives,omitempty"`
131 |
132 | // +optional
133 | ClusterIssuer *string `json:"clusterIssuer,omitempty"`
134 |
135 | IngressClassName string `json:"ingressClassName"`
136 |
137 | Annotations map[string]string `json:"annotations,omitempty"`
138 |
139 | // +optional
140 | TLSSecretName *string `json:"tlsSecretName,omitempty"`
141 |
142 | // +kubebuilder:validation:Optional
143 | Resources *corev1.ResourceRequirements `json:"resources,omitempty"`
144 |
145 | // +kubebuilder:validation:Optional
146 | // +kubebuilder:default:="nginx:alpine"
147 | Image string `json:"image,omitempty"`
148 |
149 | // +optional
150 | ReadOnlyRootFilesystem *ReadOnlyRootFilesystemSpec `json:"readOnlyRootFilesystem,omitempty"`
151 |
152 | // +optional
153 | // +kubebuilder:default:="/"
154 | Path string `json:"path,omitempty"`
155 |
156 | // +optional
157 | // +kubebuilder:default:="ImplementationSpecific"
158 | // +kubebuilder:validation:Enum=Exact;Prefix;ImplementationSpecific
159 | PathType *networkingv1.PathType `json:"pathType,omitempty"`
160 | }
161 |
162 | type ReadOnlyRootFilesystemSpec struct {
163 | // +optional
164 | SecurityContext *corev1.SecurityContext `json:"securityContext,omitempty"`
165 |
166 | // +optional
167 | Volumes []corev1.Volume `json:"volumes,omitempty"`
168 |
169 | // +optional
170 | VolumeMounts []corev1.VolumeMount `json:"volumeMounts,omitempty"`
171 | }
172 |
173 | type DocSearchScraperSpec struct {
174 | Name string `json:"name"`
175 | Image string `json:"image"`
176 | Config string `json:"config"`
177 |
178 | // +kubebuilder:validation:Pattern:=`(^((\*\/)?([0-5]?[0-9])((\,|\-|\/)([0-5]?[0-9]))*|\*)\s+((\*\/)?((2[0-3]|1[0-9]|[0-9]|00))((\,|\-|\/)(2[0-3]|1[0-9]|[0-9]|00))*|\*)\s+((\*\/)?([1-9]|[12][0-9]|3[01])((\,|\-|\/)([1-9]|[12][0-9]|3[01]))*|\*)\s+((\*\/)?([1-9]|1[0-2])((\,|\-|\/)([1-9]|1[0-2]))*|\*|(jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|des))\s+((\*\/)?[0-6]((\,|\-|\/)[0-6])*|\*|00|(sun|mon|tue|wed|thu|fri|sat))\s*$)|@(annually|yearly|monthly|weekly|daily|hourly|reboot)`
179 | // +kubebuilder:validation:Type=string
180 | Schedule string `json:"schedule"`
181 |
182 | // +kubebuilder:validation:Optional
183 | AuthConfiguration *corev1.LocalObjectReference `json:"authConfiguration,omitempty"`
184 | }
185 |
186 | type MetricsExporterSpec struct {
187 | Release string `json:"release"`
188 |
189 | // +kubebuilder:validation:Optional
190 | // +kubebuilder:default:="akyriako78/typesense-prometheus-exporter:0.1.7"
191 | Image string `json:"image,omitempty"`
192 |
193 | // +optional
194 | // +kubebuilder:default=15
195 | // +kubebuilder:validation:Minimum=15
196 | // +kubebuilder:validation:Maximum=60
197 | // +kubebuilder:validation:ExclusiveMinimum=false
198 | // +kubebuilder:validation:ExclusiveMaximum=false
199 | // +kubebuilder:validation:Type=integer
200 | IntervalInSeconds int `json:"interval,omitempty"`
201 |
202 | // +kubebuilder:validation:Optional
203 | Resources *corev1.ResourceRequirements `json:"resources,omitempty"`
204 | }
205 |
206 | type HealthCheckSpec struct {
207 | // +kubebuilder:validation:Optional
208 | // +kubebuilder:default:="akyriako78/typesense-healthcheck:0.1.7"
209 | Image string `json:"image,omitempty"`
210 |
211 | // +kubebuilder:validation:Optional
212 | Resources *corev1.ResourceRequirements `json:"resources,omitempty"`
213 | }
214 |
215 | // TypesenseClusterStatus defines the observed state of TypesenseCluster
216 | type TypesenseClusterStatus struct {
217 |
218 | // +optional
219 | // +operator-sdk:csv:customresourcedefinitions:type=status,xDescriptors={"urn:alm:descriptor:io.kubernetes.conditions"}
220 | Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
221 |
222 | // +optional
223 | Phase string `json:"phase,omitempty"`
224 | }
225 |
226 | // +kubebuilder:object:root=true
227 | // +kubebuilder:subresource:status
228 |
229 | // TypesenseCluster is the Schema for the typesenseclusters API
230 | // +kubebuilder:printcolumn:name="Image",type=string,JSONPath=`.spec.image`
231 | // +kubebuilder:printcolumn:name="Replicas",type=integer,JSONPath=`.spec.replicas`
232 | // +kubebuilder:printcolumn:name="API Port",type=integer,JSONPath=`.spec.apiPort`
233 | // +kubebuilder:printcolumn:name="Peering Port",type=integer,JSONPath=`.spec.peeringPort`
234 | // +kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase`
235 | // +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type==\"Ready\")].status"
236 | type TypesenseCluster struct {
237 | metav1.TypeMeta `json:",inline"`
238 | metav1.ObjectMeta `json:"metadata,omitempty"`
239 |
240 | Spec TypesenseClusterSpec `json:"spec,omitempty"`
241 | Status TypesenseClusterStatus `json:"status,omitempty"`
242 | }
243 |
244 | // +kubebuilder:object:root=true
245 |
246 | // TypesenseClusterList contains a list of TypesenseCluster
247 | type TypesenseClusterList struct {
248 | metav1.TypeMeta `json:",inline"`
249 | metav1.ListMeta `json:"metadata,omitempty"`
250 | Items []TypesenseCluster `json:"items"`
251 | }
252 |
253 | func init() {
254 | SchemeBuilder.Register(&TypesenseCluster{}, &TypesenseClusterList{})
255 | }
256 |
--------------------------------------------------------------------------------
/api/v1alpha1/typesensecluster_types_helpers.go:
--------------------------------------------------------------------------------
1 | package v1alpha1
2 |
3 | import (
4 | corev1 "k8s.io/api/core/v1"
5 | "k8s.io/apimachinery/pkg/api/resource"
6 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
7 | )
8 |
9 | func (s *TypesenseClusterSpec) GetResources() corev1.ResourceRequirements {
10 | if s.Resources != nil {
11 | return *s.Resources
12 | }
13 |
14 | return corev1.ResourceRequirements{
15 | Limits: corev1.ResourceList{
16 | corev1.ResourceCPU: resource.MustParse("1000m"),
17 | corev1.ResourceMemory: resource.MustParse("512Mi"),
18 | },
19 | Requests: corev1.ResourceList{
20 | corev1.ResourceCPU: resource.MustParse("100m"),
21 | corev1.ResourceMemory: resource.MustParse("256Mi"),
22 | },
23 | }
24 | }
25 |
26 | func (s *TypesenseClusterSpec) GetAdditionalServerConfiguration() []corev1.EnvFromSource {
27 | if s.AdditionalServerConfiguration != nil {
28 | return []corev1.EnvFromSource{
29 | {
30 | ConfigMapRef: &corev1.ConfigMapEnvSource{
31 | LocalObjectReference: *s.AdditionalServerConfiguration,
32 | },
33 | },
34 | }
35 | }
36 |
37 | return []corev1.EnvFromSource{}
38 | }
39 |
40 | func (s *DocSearchScraperSpec) GetScraperAuthConfiguration() []corev1.EnvFromSource {
41 | if s.AuthConfiguration != nil {
42 | return []corev1.EnvFromSource{
43 | {
44 | SecretRef: &corev1.SecretEnvSource{
45 | LocalObjectReference: *s.AuthConfiguration,
46 | },
47 | },
48 | }
49 | }
50 |
51 | return []corev1.EnvFromSource{}
52 | }
53 |
54 | func (s *TypesenseClusterSpec) GetCorsDomains() string {
55 | if s.CorsDomains == nil {
56 | return ""
57 | }
58 | return *s.CorsDomains
59 | }
60 |
61 | func (s *TypesenseClusterSpec) GetStorage() StorageSpec {
62 | if s.Storage != nil {
63 | return *s.Storage
64 | }
65 |
66 | return StorageSpec{
67 | Size: resource.MustParse("100Mi"),
68 | StorageClassName: "standard",
69 | }
70 | }
71 |
72 | func (s *TypesenseClusterSpec) GetTopologySpreadConstraints(labels map[string]string) []corev1.TopologySpreadConstraint {
73 | tscs := make([]corev1.TopologySpreadConstraint, 0)
74 |
75 | for _, tsc := range s.TopologySpreadConstraints {
76 | if tsc.LabelSelector == nil {
77 | tsc.LabelSelector = &metav1.LabelSelector{
78 | MatchLabels: labels,
79 | }
80 | }
81 | tscs = append(tscs, tsc)
82 | }
83 | return tscs
84 | }
85 |
86 | func (s *TypesenseClusterSpec) GetMetricsExporterSpecs() MetricsExporterSpec {
87 | if s.Metrics != nil {
88 | return *s.Metrics
89 | }
90 |
91 | return MetricsExporterSpec{
92 | Release: "promstack",
93 | Image: "akyriako78/typesense-prometheus-exporter:0.1.7",
94 | IntervalInSeconds: 15,
95 | }
96 | }
97 |
98 | func (s *TypesenseClusterSpec) GetMetricsExporterResources() corev1.ResourceRequirements {
99 | if s.Metrics != nil && s.Metrics.Resources != nil {
100 | return *s.Metrics.Resources
101 | }
102 |
103 | return corev1.ResourceRequirements{
104 | Limits: corev1.ResourceList{
105 | corev1.ResourceCPU: resource.MustParse("100m"),
106 | corev1.ResourceMemory: resource.MustParse("64Mi"),
107 | },
108 | Requests: corev1.ResourceList{
109 | corev1.ResourceCPU: resource.MustParse("100m"),
110 | corev1.ResourceMemory: resource.MustParse("32Mi"),
111 | },
112 | }
113 | }
114 |
115 | func (s *TypesenseClusterSpec) GetHealthCheckSidecarSpecs() HealthCheckSpec {
116 | if s.HealthCheck != nil {
117 | return *s.HealthCheck
118 | }
119 |
120 | return HealthCheckSpec{
121 | Image: "akyriako78/typesense-healthcheck:0.1.7",
122 | }
123 | }
124 |
125 | func (s *TypesenseClusterSpec) GetHealthCheckSidecarResources() corev1.ResourceRequirements {
126 | if s.HealthCheck != nil && s.HealthCheck.Resources != nil {
127 | return *s.HealthCheck.Resources
128 | }
129 |
130 | return corev1.ResourceRequirements{
131 | Limits: corev1.ResourceList{
132 | corev1.ResourceCPU: resource.MustParse("100m"),
133 | corev1.ResourceMemory: resource.MustParse("64Mi"),
134 | },
135 | Requests: corev1.ResourceList{
136 | corev1.ResourceCPU: resource.MustParse("100m"),
137 | corev1.ResourceMemory: resource.MustParse("32Mi"),
138 | },
139 | }
140 | }
141 |
142 | func (s *IngressSpec) GetReverseProxyResources() corev1.ResourceRequirements {
143 | if s.Resources != nil {
144 | return *s.Resources
145 | }
146 |
147 | return corev1.ResourceRequirements{
148 | Limits: corev1.ResourceList{
149 | corev1.ResourceCPU: resource.MustParse("150m"),
150 | corev1.ResourceMemory: resource.MustParse("64Mi"),
151 | },
152 | Requests: corev1.ResourceList{
153 | corev1.ResourceCPU: resource.MustParse("100m"),
154 | corev1.ResourceMemory: resource.MustParse("32Mi"),
155 | },
156 | }
157 | }
158 |
--------------------------------------------------------------------------------
/api/v1alpha1/zz_generated.deepcopy.go:
--------------------------------------------------------------------------------
1 | //go:build !ignore_autogenerated
2 |
3 | /*
4 | Copyright 2024.
5 |
6 | Licensed under the Apache License, Version 2.0 (the "License");
7 | you may not use this file except in compliance with the License.
8 | You may obtain a copy of the License at
9 |
10 | http://www.apache.org/licenses/LICENSE-2.0
11 |
12 | Unless required by applicable law or agreed to in writing, software
13 | distributed under the License is distributed on an "AS IS" BASIS,
14 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 | See the License for the specific language governing permissions and
16 | limitations under the License.
17 | */
18 |
19 | // Code generated by controller-gen. DO NOT EDIT.
20 |
21 | package v1alpha1
22 |
23 | import (
24 | "k8s.io/api/core/v1"
25 | networkingv1 "k8s.io/api/networking/v1"
26 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
27 | runtime "k8s.io/apimachinery/pkg/runtime"
28 | )
29 |
30 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
31 | func (in *DocSearchScraperSpec) DeepCopyInto(out *DocSearchScraperSpec) {
32 | *out = *in
33 | if in.AuthConfiguration != nil {
34 | in, out := &in.AuthConfiguration, &out.AuthConfiguration
35 | *out = new(v1.LocalObjectReference)
36 | **out = **in
37 | }
38 | }
39 |
40 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DocSearchScraperSpec.
41 | func (in *DocSearchScraperSpec) DeepCopy() *DocSearchScraperSpec {
42 | if in == nil {
43 | return nil
44 | }
45 | out := new(DocSearchScraperSpec)
46 | in.DeepCopyInto(out)
47 | return out
48 | }
49 |
50 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
51 | func (in *HealthCheckSpec) DeepCopyInto(out *HealthCheckSpec) {
52 | *out = *in
53 | if in.Resources != nil {
54 | in, out := &in.Resources, &out.Resources
55 | *out = new(v1.ResourceRequirements)
56 | (*in).DeepCopyInto(*out)
57 | }
58 | }
59 |
60 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HealthCheckSpec.
61 | func (in *HealthCheckSpec) DeepCopy() *HealthCheckSpec {
62 | if in == nil {
63 | return nil
64 | }
65 | out := new(HealthCheckSpec)
66 | in.DeepCopyInto(out)
67 | return out
68 | }
69 |
70 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
71 | func (in *IngressSpec) DeepCopyInto(out *IngressSpec) {
72 | *out = *in
73 | if in.Referer != nil {
74 | in, out := &in.Referer, &out.Referer
75 | *out = new(string)
76 | **out = **in
77 | }
78 | if in.HttpDirectives != nil {
79 | in, out := &in.HttpDirectives, &out.HttpDirectives
80 | *out = new(string)
81 | **out = **in
82 | }
83 | if in.ServerDirectives != nil {
84 | in, out := &in.ServerDirectives, &out.ServerDirectives
85 | *out = new(string)
86 | **out = **in
87 | }
88 | if in.LocationDirectives != nil {
89 | in, out := &in.LocationDirectives, &out.LocationDirectives
90 | *out = new(string)
91 | **out = **in
92 | }
93 | if in.ClusterIssuer != nil {
94 | in, out := &in.ClusterIssuer, &out.ClusterIssuer
95 | *out = new(string)
96 | **out = **in
97 | }
98 | if in.Annotations != nil {
99 | in, out := &in.Annotations, &out.Annotations
100 | *out = make(map[string]string, len(*in))
101 | for key, val := range *in {
102 | (*out)[key] = val
103 | }
104 | }
105 | if in.TLSSecretName != nil {
106 | in, out := &in.TLSSecretName, &out.TLSSecretName
107 | *out = new(string)
108 | **out = **in
109 | }
110 | if in.Resources != nil {
111 | in, out := &in.Resources, &out.Resources
112 | *out = new(v1.ResourceRequirements)
113 | (*in).DeepCopyInto(*out)
114 | }
115 | if in.ReadOnlyRootFilesystem != nil {
116 | in, out := &in.ReadOnlyRootFilesystem, &out.ReadOnlyRootFilesystem
117 | *out = new(ReadOnlyRootFilesystemSpec)
118 | (*in).DeepCopyInto(*out)
119 | }
120 | if in.PathType != nil {
121 | in, out := &in.PathType, &out.PathType
122 | *out = new(networkingv1.PathType)
123 | **out = **in
124 | }
125 | }
126 |
127 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressSpec.
128 | func (in *IngressSpec) DeepCopy() *IngressSpec {
129 | if in == nil {
130 | return nil
131 | }
132 | out := new(IngressSpec)
133 | in.DeepCopyInto(out)
134 | return out
135 | }
136 |
137 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
138 | func (in *MetricsExporterSpec) DeepCopyInto(out *MetricsExporterSpec) {
139 | *out = *in
140 | if in.Resources != nil {
141 | in, out := &in.Resources, &out.Resources
142 | *out = new(v1.ResourceRequirements)
143 | (*in).DeepCopyInto(*out)
144 | }
145 | }
146 |
147 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsExporterSpec.
148 | func (in *MetricsExporterSpec) DeepCopy() *MetricsExporterSpec {
149 | if in == nil {
150 | return nil
151 | }
152 | out := new(MetricsExporterSpec)
153 | in.DeepCopyInto(out)
154 | return out
155 | }
156 |
157 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
158 | func (in *ReadOnlyRootFilesystemSpec) DeepCopyInto(out *ReadOnlyRootFilesystemSpec) {
159 | *out = *in
160 | if in.SecurityContext != nil {
161 | in, out := &in.SecurityContext, &out.SecurityContext
162 | *out = new(v1.SecurityContext)
163 | (*in).DeepCopyInto(*out)
164 | }
165 | if in.Volumes != nil {
166 | in, out := &in.Volumes, &out.Volumes
167 | *out = make([]v1.Volume, len(*in))
168 | for i := range *in {
169 | (*in)[i].DeepCopyInto(&(*out)[i])
170 | }
171 | }
172 | if in.VolumeMounts != nil {
173 | in, out := &in.VolumeMounts, &out.VolumeMounts
174 | *out = make([]v1.VolumeMount, len(*in))
175 | for i := range *in {
176 | (*in)[i].DeepCopyInto(&(*out)[i])
177 | }
178 | }
179 | }
180 |
181 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReadOnlyRootFilesystemSpec.
182 | func (in *ReadOnlyRootFilesystemSpec) DeepCopy() *ReadOnlyRootFilesystemSpec {
183 | if in == nil {
184 | return nil
185 | }
186 | out := new(ReadOnlyRootFilesystemSpec)
187 | in.DeepCopyInto(out)
188 | return out
189 | }
190 |
191 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
192 | func (in *StorageSpec) DeepCopyInto(out *StorageSpec) {
193 | *out = *in
194 | out.Size = in.Size.DeepCopy()
195 | }
196 |
197 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageSpec.
198 | func (in *StorageSpec) DeepCopy() *StorageSpec {
199 | if in == nil {
200 | return nil
201 | }
202 | out := new(StorageSpec)
203 | in.DeepCopyInto(out)
204 | return out
205 | }
206 |
207 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
208 | func (in *TypesenseCluster) DeepCopyInto(out *TypesenseCluster) {
209 | *out = *in
210 | out.TypeMeta = in.TypeMeta
211 | in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
212 | in.Spec.DeepCopyInto(&out.Spec)
213 | in.Status.DeepCopyInto(&out.Status)
214 | }
215 |
216 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypesenseCluster.
217 | func (in *TypesenseCluster) DeepCopy() *TypesenseCluster {
218 | if in == nil {
219 | return nil
220 | }
221 | out := new(TypesenseCluster)
222 | in.DeepCopyInto(out)
223 | return out
224 | }
225 |
226 | // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
227 | func (in *TypesenseCluster) DeepCopyObject() runtime.Object {
228 | if c := in.DeepCopy(); c != nil {
229 | return c
230 | }
231 | return nil
232 | }
233 |
234 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
235 | func (in *TypesenseClusterList) DeepCopyInto(out *TypesenseClusterList) {
236 | *out = *in
237 | out.TypeMeta = in.TypeMeta
238 | in.ListMeta.DeepCopyInto(&out.ListMeta)
239 | if in.Items != nil {
240 | in, out := &in.Items, &out.Items
241 | *out = make([]TypesenseCluster, len(*in))
242 | for i := range *in {
243 | (*in)[i].DeepCopyInto(&(*out)[i])
244 | }
245 | }
246 | }
247 |
248 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypesenseClusterList.
249 | func (in *TypesenseClusterList) DeepCopy() *TypesenseClusterList {
250 | if in == nil {
251 | return nil
252 | }
253 | out := new(TypesenseClusterList)
254 | in.DeepCopyInto(out)
255 | return out
256 | }
257 |
258 | // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
259 | func (in *TypesenseClusterList) DeepCopyObject() runtime.Object {
260 | if c := in.DeepCopy(); c != nil {
261 | return c
262 | }
263 | return nil
264 | }
265 |
266 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
267 | func (in *TypesenseClusterSpec) DeepCopyInto(out *TypesenseClusterSpec) {
268 | *out = *in
269 | if in.AdminApiKey != nil {
270 | in, out := &in.AdminApiKey, &out.AdminApiKey
271 | *out = new(v1.SecretReference)
272 | **out = **in
273 | }
274 | if in.CorsDomains != nil {
275 | in, out := &in.CorsDomains, &out.CorsDomains
276 | *out = new(string)
277 | **out = **in
278 | }
279 | if in.Resources != nil {
280 | in, out := &in.Resources, &out.Resources
281 | *out = new(v1.ResourceRequirements)
282 | (*in).DeepCopyInto(*out)
283 | }
284 | if in.Affinity != nil {
285 | in, out := &in.Affinity, &out.Affinity
286 | *out = new(v1.Affinity)
287 | (*in).DeepCopyInto(*out)
288 | }
289 | if in.NodeSelector != nil {
290 | in, out := &in.NodeSelector, &out.NodeSelector
291 | *out = make(map[string]string, len(*in))
292 | for key, val := range *in {
293 | (*out)[key] = val
294 | }
295 | }
296 | if in.Tolerations != nil {
297 | in, out := &in.Tolerations, &out.Tolerations
298 | *out = make([]v1.Toleration, len(*in))
299 | for i := range *in {
300 | (*in)[i].DeepCopyInto(&(*out)[i])
301 | }
302 | }
303 | if in.AdditionalServerConfiguration != nil {
304 | in, out := &in.AdditionalServerConfiguration, &out.AdditionalServerConfiguration
305 | *out = new(v1.LocalObjectReference)
306 | **out = **in
307 | }
308 | if in.Storage != nil {
309 | in, out := &in.Storage, &out.Storage
310 | *out = new(StorageSpec)
311 | (*in).DeepCopyInto(*out)
312 | }
313 | if in.Ingress != nil {
314 | in, out := &in.Ingress, &out.Ingress
315 | *out = new(IngressSpec)
316 | (*in).DeepCopyInto(*out)
317 | }
318 | if in.Scrapers != nil {
319 | in, out := &in.Scrapers, &out.Scrapers
320 | *out = make([]DocSearchScraperSpec, len(*in))
321 | for i := range *in {
322 | (*in)[i].DeepCopyInto(&(*out)[i])
323 | }
324 | }
325 | if in.Metrics != nil {
326 | in, out := &in.Metrics, &out.Metrics
327 | *out = new(MetricsExporterSpec)
328 | (*in).DeepCopyInto(*out)
329 | }
330 | if in.HealthCheck != nil {
331 | in, out := &in.HealthCheck, &out.HealthCheck
332 | *out = new(HealthCheckSpec)
333 | (*in).DeepCopyInto(*out)
334 | }
335 | if in.TopologySpreadConstraints != nil {
336 | in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints
337 | *out = make([]v1.TopologySpreadConstraint, len(*in))
338 | for i := range *in {
339 | (*in)[i].DeepCopyInto(&(*out)[i])
340 | }
341 | }
342 | }
343 |
344 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypesenseClusterSpec.
345 | func (in *TypesenseClusterSpec) DeepCopy() *TypesenseClusterSpec {
346 | if in == nil {
347 | return nil
348 | }
349 | out := new(TypesenseClusterSpec)
350 | in.DeepCopyInto(out)
351 | return out
352 | }
353 |
354 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
355 | func (in *TypesenseClusterStatus) DeepCopyInto(out *TypesenseClusterStatus) {
356 | *out = *in
357 | if in.Conditions != nil {
358 | in, out := &in.Conditions, &out.Conditions
359 | *out = make([]metav1.Condition, len(*in))
360 | for i := range *in {
361 | (*in)[i].DeepCopyInto(&(*out)[i])
362 | }
363 | }
364 | }
365 |
366 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypesenseClusterStatus.
367 | func (in *TypesenseClusterStatus) DeepCopy() *TypesenseClusterStatus {
368 | if in == nil {
369 | return nil
370 | }
371 | out := new(TypesenseClusterStatus)
372 | in.DeepCopyInto(out)
373 | return out
374 | }
375 |
--------------------------------------------------------------------------------
/charts/typesense-operator/.helmignore:
--------------------------------------------------------------------------------
1 | # Patterns to ignore when building packages.
2 | # This supports shell glob matching, relative path matching, and
3 | # negation (prefixed with !). Only one pattern per line.
4 | .DS_Store
5 | # Common VCS dirs
6 | .git/
7 | .gitignore
8 | .bzr/
9 | .bzrignore
10 | .hg/
11 | .hgignore
12 | .svn/
13 | # Common backup files
14 | *.swp
15 | *.bak
16 | *.tmp
17 | *.orig
18 | *~
19 | # Various IDEs
20 | .project
21 | .idea/
22 | *.tmproj
23 | .vscode/
24 |
--------------------------------------------------------------------------------
/charts/typesense-operator/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v2
2 | name: typesense-operator
3 | description: Manage the lifecycle of Typesense Clusters in Kubernetes
4 | # A chart can be either an 'application' or a 'library' chart.
5 | #
6 | # Application charts are a collection of templates that can be packaged into versioned archives
7 | # to be deployed.
8 | #
9 | # Library charts provide useful utilities or functions for the chart developer. They're included as
10 | # a dependency of application charts to inject those utilities and functions into the rendering
11 | # pipeline. Library charts do not define any templates and therefore cannot be deployed.
12 | type: application
13 | # This is the chart version. This version number should be incremented each time you make changes
14 | # to the chart and its templates, including the app version.
15 | # Versions are expected to follow Semantic Versioning (https://semver.org/)
16 | version: 0.3.0
17 | # This is the version number of the application being deployed. This version number should be
18 | # incremented each time you make changes to the application. Versions are not expected to
19 | # follow Semantic Versioning. They should reflect the version the application is using.
20 | # It is recommended to use it with quotes.
21 | appVersion: "0.3.0"
--------------------------------------------------------------------------------
/charts/typesense-operator/templates/_helpers.tpl:
--------------------------------------------------------------------------------
1 | {{/*
2 | Expand the name of the chart.
3 | */}}
4 | {{- define "typesense-operator.name" -}}
5 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
6 | {{- end }}
7 |
8 | {{/*
9 | Create a default fully qualified app name.
10 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
11 | If release name contains chart name it will be used as a full name.
12 | */}}
13 | {{- define "typesense-operator.fullname" -}}
14 | {{- if .Values.fullnameOverride }}
15 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
16 | {{- else }}
17 | {{- $name := default .Chart.Name .Values.nameOverride }}
18 | {{- if contains $name .Release.Name }}
19 | {{- .Release.Name | trunc 63 | trimSuffix "-" }}
20 | {{- else }}
21 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
22 | {{- end }}
23 | {{- end }}
24 | {{- end }}
25 |
26 | {{/*
27 | Create chart name and version as used by the chart label.
28 | */}}
29 | {{- define "typesense-operator.chart" -}}
30 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
31 | {{- end }}
32 |
33 | {{/*
34 | Common labels
35 | */}}
36 | {{- define "typesense-operator.labels" -}}
37 | helm.sh/chart: {{ include "typesense-operator.chart" . }}
38 | {{ include "typesense-operator.selectorLabels" . }}
39 | {{- if .Chart.AppVersion }}
40 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
41 | {{- end }}
42 | app.kubernetes.io/managed-by: {{ .Release.Service }}
43 | {{- end }}
44 |
45 | {{/*
46 | Selector labels
47 | */}}
48 | {{- define "typesense-operator.selectorLabels" -}}
49 | app.kubernetes.io/name: {{ include "typesense-operator.name" . }}
50 | app.kubernetes.io/instance: {{ .Release.Name }}
51 | {{- end }}
52 |
53 | {{/*
54 | Create the name of the service account to use
55 | */}}
56 | {{- define "typesense-operator.serviceAccountName" -}}
57 | {{- if .Values.serviceAccount.create }}
58 | {{- default (include "typesense-operator.fullname" .) .Values.serviceAccount.name }}
59 | {{- else }}
60 | {{- default "default" .Values.serviceAccount.name }}
61 | {{- end }}
62 | {{- end }}
63 |
--------------------------------------------------------------------------------
/charts/typesense-operator/templates/deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: {{ include "typesense-operator.fullname" . }}-controller-manager
5 | labels:
6 | control-plane: controller-manager
7 | {{- include "typesense-operator.labels" . | nindent 4 }}
8 | spec:
9 | replicas: {{ .Values.controllerManager.replicas }}
10 | selector:
11 | matchLabels:
12 | control-plane: controller-manager
13 | {{- include "typesense-operator.selectorLabels" . | nindent 6 }}
14 | template:
15 | metadata:
16 | labels:
17 | control-plane: controller-manager
18 | {{- include "typesense-operator.selectorLabels" . | nindent 8 }}
19 | annotations:
20 | kubectl.kubernetes.io/default-container: manager
21 | spec:
22 | containers:
23 | - args: {{- toYaml .Values.controllerManager.manager.args | nindent 8 }}
24 | command:
25 | - /manager
26 | env:
27 | - name: KUBERNETES_CLUSTER_DOMAIN
28 | value: {{ quote .Values.kubernetesClusterDomain }}
29 | image: {{ .Values.controllerManager.manager.image.repository }}:{{ .Values.controllerManager.manager.image.tag
30 | | default .Chart.AppVersion }}
31 | imagePullPolicy: {{ .Values.controllerManager.manager.imagePullPolicy }}
32 | livenessProbe:
33 | httpGet:
34 | path: /healthz
35 | port: 8081
36 | initialDelaySeconds: 15
37 | periodSeconds: 20
38 | name: manager
39 | readinessProbe:
40 | httpGet:
41 | path: /readyz
42 | port: 8081
43 | initialDelaySeconds: 5
44 | periodSeconds: 10
45 | resources: {{- toYaml .Values.controllerManager.manager.resources | nindent 10
46 | }}
47 | securityContext: {{- toYaml .Values.controllerManager.manager.containerSecurityContext
48 | | nindent 10 }}
49 | imagePullSecrets: {{ .Values.imagePullSecrets | default list | toJson }}
50 | securityContext: {{- toYaml .Values.controllerManager.podSecurityContext | nindent
51 | 8 }}
52 | serviceAccountName: {{ include "typesense-operator.fullname" . }}-controller-manager
53 | terminationGracePeriodSeconds: 10
--------------------------------------------------------------------------------
/charts/typesense-operator/templates/leader-election-rbac.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: Role
3 | metadata:
4 | name: {{ include "typesense-operator.fullname" . }}-leader-election-role
5 | labels:
6 | {{- include "typesense-operator.labels" . | nindent 4 }}
7 | rules:
8 | - apiGroups:
9 | - ""
10 | resources:
11 | - configmaps
12 | verbs:
13 | - get
14 | - list
15 | - watch
16 | - create
17 | - update
18 | - patch
19 | - delete
20 | - apiGroups:
21 | - coordination.k8s.io
22 | resources:
23 | - leases
24 | verbs:
25 | - get
26 | - list
27 | - watch
28 | - create
29 | - update
30 | - patch
31 | - delete
32 | - apiGroups:
33 | - ""
34 | resources:
35 | - events
36 | verbs:
37 | - create
38 | - patch
39 | ---
40 | apiVersion: rbac.authorization.k8s.io/v1
41 | kind: RoleBinding
42 | metadata:
43 | name: {{ include "typesense-operator.fullname" . }}-leader-election-rolebinding
44 | labels:
45 | {{- include "typesense-operator.labels" . | nindent 4 }}
46 | roleRef:
47 | apiGroup: rbac.authorization.k8s.io
48 | kind: Role
49 | name: '{{ include "typesense-operator.fullname" . }}-leader-election-role'
50 | subjects:
51 | - kind: ServiceAccount
52 | name: '{{ include "typesense-operator.fullname" . }}-controller-manager'
53 | namespace: '{{ .Release.Namespace }}'
--------------------------------------------------------------------------------
/charts/typesense-operator/templates/manager-rbac.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: {{ include "typesense-operator.fullname" . }}-manager-role
5 | labels:
6 | {{- include "typesense-operator.labels" . | nindent 4 }}
7 | rules:
8 | - apiGroups:
9 | - ""
10 | resources:
11 | - configmaps
12 | verbs:
13 | - create
14 | - delete
15 | - get
16 | - list
17 | - patch
18 | - update
19 | - watch
20 | - apiGroups:
21 | - ""
22 | resources:
23 | - pods
24 | verbs:
25 | - delete
26 | - get
27 | - list
28 | - patch
29 | - update
30 | - watch
31 | - apiGroups:
32 | - ""
33 | resources:
34 | - pods/status
35 | verbs:
36 | - get
37 | - patch
38 | - update
39 | - apiGroups:
40 | - ""
41 | resources:
42 | - secrets
43 | verbs:
44 | - create
45 | - delete
46 | - get
47 | - list
48 | - patch
49 | - update
50 | - watch
51 | - apiGroups:
52 | - ""
53 | resources:
54 | - services
55 | verbs:
56 | - create
57 | - delete
58 | - get
59 | - list
60 | - patch
61 | - update
62 | - watch
63 | - apiGroups:
64 | - apps
65 | resources:
66 | - deployments
67 | verbs:
68 | - create
69 | - delete
70 | - get
71 | - list
72 | - patch
73 | - update
74 | - watch
75 | - apiGroups:
76 | - apps
77 | resources:
78 | - statefulsets
79 | verbs:
80 | - create
81 | - delete
82 | - get
83 | - list
84 | - patch
85 | - update
86 | - watch
87 | - apiGroups:
88 | - batch
89 | resources:
90 | - cronjobs
91 | verbs:
92 | - create
93 | - delete
94 | - get
95 | - list
96 | - patch
97 | - update
98 | - watch
99 | - apiGroups:
100 | - ""
101 | resources:
102 | - events
103 | verbs:
104 | - create
105 | - patch
106 | - apiGroups:
107 | - discovery.k8s.io
108 | resources:
109 | - endpointslices
110 | verbs:
111 | - get
112 | - list
113 | - watch
114 | - apiGroups:
115 | - monitoring.coreos.com
116 | resources:
117 | - podmonitors
118 | verbs:
119 | - create
120 | - delete
121 | - get
122 | - list
123 | - patch
124 | - update
125 | - watch
126 | - apiGroups:
127 | - monitoring.coreos.com
128 | resources:
129 | - servicemonitors
130 | verbs:
131 | - create
132 | - delete
133 | - get
134 | - list
135 | - patch
136 | - update
137 | - watch
138 | - apiGroups:
139 | - networking.k8s.io
140 | resources:
141 | - ingresses
142 | verbs:
143 | - create
144 | - delete
145 | - get
146 | - list
147 | - patch
148 | - update
149 | - watch
150 | - apiGroups:
151 | - ts.opentelekomcloud.com
152 | resources:
153 | - typesenseclusters
154 | verbs:
155 | - create
156 | - delete
157 | - get
158 | - list
159 | - patch
160 | - update
161 | - watch
162 | - apiGroups:
163 | - ts.opentelekomcloud.com
164 | resources:
165 | - typesenseclusters/finalizers
166 | verbs:
167 | - update
168 | - apiGroups:
169 | - ts.opentelekomcloud.com
170 | resources:
171 | - typesenseclusters/status
172 | verbs:
173 | - get
174 | - patch
175 | - update
176 | ---
177 | apiVersion: rbac.authorization.k8s.io/v1
178 | kind: ClusterRoleBinding
179 | metadata:
180 | name: {{ include "typesense-operator.fullname" . }}-manager-rolebinding
181 | labels:
182 | {{- include "typesense-operator.labels" . | nindent 4 }}
183 | roleRef:
184 | apiGroup: rbac.authorization.k8s.io
185 | kind: ClusterRole
186 | name: '{{ include "typesense-operator.fullname" . }}-manager-role'
187 | subjects:
188 | - kind: ServiceAccount
189 | name: '{{ include "typesense-operator.fullname" . }}-controller-manager'
190 | namespace: '{{ .Release.Namespace }}'
--------------------------------------------------------------------------------
/charts/typesense-operator/templates/metrics-auth-rbac.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: {{ include "typesense-operator.fullname" . }}-metrics-auth-role
5 | labels:
6 | {{- include "typesense-operator.labels" . | nindent 4 }}
7 | rules:
8 | - apiGroups:
9 | - authentication.k8s.io
10 | resources:
11 | - tokenreviews
12 | verbs:
13 | - create
14 | - apiGroups:
15 | - authorization.k8s.io
16 | resources:
17 | - subjectaccessreviews
18 | verbs:
19 | - create
20 | ---
21 | apiVersion: rbac.authorization.k8s.io/v1
22 | kind: ClusterRoleBinding
23 | metadata:
24 | name: {{ include "typesense-operator.fullname" . }}-metrics-auth-rolebinding
25 | labels:
26 | {{- include "typesense-operator.labels" . | nindent 4 }}
27 | roleRef:
28 | apiGroup: rbac.authorization.k8s.io
29 | kind: ClusterRole
30 | name: '{{ include "typesense-operator.fullname" . }}-metrics-auth-role'
31 | subjects:
32 | - kind: ServiceAccount
33 | name: '{{ include "typesense-operator.fullname" . }}-controller-manager'
34 | namespace: '{{ .Release.Namespace }}'
--------------------------------------------------------------------------------
/charts/typesense-operator/templates/metrics-reader-rbac.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: {{ include "typesense-operator.fullname" . }}-metrics-reader
5 | labels:
6 | {{- include "typesense-operator.labels" . | nindent 4 }}
7 | rules:
8 | - nonResourceURLs:
9 | - /metrics
10 | verbs:
11 | - get
--------------------------------------------------------------------------------
/charts/typesense-operator/templates/metrics-service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: {{ include "typesense-operator.fullname" . }}-controller-manager-metrics-service
5 | labels:
6 | control-plane: controller-manager
7 | {{- include "typesense-operator.labels" . | nindent 4 }}
8 | spec:
9 | type: {{ .Values.metricsService.type }}
10 | selector:
11 | control-plane: controller-manager
12 | {{- include "typesense-operator.selectorLabels" . | nindent 4 }}
13 | ports:
14 | {{- .Values.metricsService.ports | toYaml | nindent 2 }}
--------------------------------------------------------------------------------
/charts/typesense-operator/templates/serviceaccount.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: {{ include "typesense-operator.fullname" . }}-controller-manager
5 | labels:
6 | {{- include "typesense-operator.labels" . | nindent 4 }}
7 | annotations:
8 | {{- toYaml .Values.controllerManager.serviceAccount.annotations | nindent 4 }}
--------------------------------------------------------------------------------
/charts/typesense-operator/templates/typesensecluster-editor-rbac.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: {{ include "typesense-operator.fullname" . }}-typesensecluster-editor-role
5 | labels:
6 | {{- include "typesense-operator.labels" . | nindent 4 }}
7 | rules:
8 | - apiGroups:
9 | - ts.opentelekomcloud.com
10 | resources:
11 | - typesenseclusters
12 | verbs:
13 | - create
14 | - delete
15 | - get
16 | - list
17 | - patch
18 | - update
19 | - watch
20 | - apiGroups:
21 | - ts.opentelekomcloud.com
22 | resources:
23 | - typesenseclusters/status
24 | verbs:
25 | - get
--------------------------------------------------------------------------------
/charts/typesense-operator/templates/typesensecluster-viewer-rbac.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: {{ include "typesense-operator.fullname" . }}-typesensecluster-viewer-role
5 | labels:
6 | {{- include "typesense-operator.labels" . | nindent 4 }}
7 | rules:
8 | - apiGroups:
9 | - ts.opentelekomcloud.com
10 | resources:
11 | - typesenseclusters
12 | verbs:
13 | - get
14 | - list
15 | - watch
16 | - apiGroups:
17 | - ts.opentelekomcloud.com
18 | resources:
19 | - typesenseclusters/status
20 | verbs:
21 | - get
--------------------------------------------------------------------------------
/charts/typesense-operator/values.yaml:
--------------------------------------------------------------------------------
1 | controllerManager:
2 | manager:
3 | args:
4 | - --metrics-bind-address=:8443
5 | - --leader-elect
6 | - --health-probe-bind-address=:8081
7 | - --zap-log-level=debug
8 | containerSecurityContext:
9 | allowPrivilegeEscalation: false
10 | capabilities:
11 | drop:
12 | - ALL
13 | image:
14 | repository: akyriako78/typesense-operator
15 | tag: 0.3.0
16 | imagePullPolicy: IfNotPresent
17 | resources:
18 | limits:
19 | cpu: 500m
20 | memory: 256Mi
21 | requests:
22 | cpu: 10m
23 | memory: 64Mi
24 | podSecurityContext:
25 | runAsNonRoot: true
26 | replicas: 1
27 | serviceAccount:
28 | annotations: {}
29 | imagePullSecrets: []
30 | kubernetesClusterDomain: cluster.local
31 | metricsService:
32 | ports:
33 | - name: https
34 | port: 8443
35 | protocol: TCP
36 | targetPort: 8443
37 | type: ClusterIP
38 |
--------------------------------------------------------------------------------
/cmd/main.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2024.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package main
18 |
19 | import (
20 | "crypto/tls"
21 | "flag"
22 | "go.uber.org/zap/zapcore"
23 | "k8s.io/client-go/discovery"
24 | "os"
25 |
26 | // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
27 | // to ensure that exec-entrypoint and run can make use of them.
28 |
29 | monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
30 |
31 | _ "k8s.io/client-go/discovery"
32 | _ "k8s.io/client-go/plugin/pkg/client/auth"
33 |
34 | tsv1alpha1 "github.com/akyriako/typesense-operator/api/v1alpha1"
35 | "github.com/akyriako/typesense-operator/internal/controller"
36 | discoveryv1 "k8s.io/api/discovery/v1"
37 | "k8s.io/apimachinery/pkg/runtime"
38 | utilruntime "k8s.io/apimachinery/pkg/util/runtime"
39 | clientgoscheme "k8s.io/client-go/kubernetes/scheme"
40 | ctrl "sigs.k8s.io/controller-runtime"
41 | "sigs.k8s.io/controller-runtime/pkg/healthz"
42 | "sigs.k8s.io/controller-runtime/pkg/log/zap"
43 | "sigs.k8s.io/controller-runtime/pkg/metrics/filters"
44 | metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
45 | "sigs.k8s.io/controller-runtime/pkg/webhook"
46 | // +kubebuilder:scaffold:imports
47 | )
48 |
49 | var (
50 | scheme = runtime.NewScheme()
51 | setupLog = ctrl.Log.WithName("setup")
52 | )
53 |
54 | func init() {
55 | utilruntime.Must(clientgoscheme.AddToScheme(scheme))
56 |
57 | utilruntime.Must(monitoringv1.AddToScheme(scheme))
58 | utilruntime.Must(discoveryv1.AddToScheme(scheme))
59 | // +kubebuilder:scaffold:scheme
60 |
61 | utilruntime.Must(tsv1alpha1.AddToScheme(scheme))
62 | // +kubebuilder:scaffold:scheme
63 | }
64 |
65 | func main() {
66 | var metricsAddr string
67 | var enableLeaderElection bool
68 | var probeAddr string
69 | var secureMetrics bool
70 | var enableHTTP2 bool
71 | var tlsOpts []func(*tls.Config)
72 | flag.StringVar(&metricsAddr, "metrics-bind-address", "0", "The address the metrics endpoint binds to. "+
73 | "Use :8443 for HTTPS or :8080 for HTTP, or leave as 0 to disable the metrics service.")
74 | flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.")
75 | flag.BoolVar(&enableLeaderElection, "leader-elect", false,
76 | "Enable leader election for controller manager. "+
77 | "Enabling this will ensure there is only one active controller manager.")
78 | flag.BoolVar(&secureMetrics, "metrics-secure", true,
79 | "If set, the metrics endpoint is served securely via HTTPS. Use --metrics-secure=false to use HTTP instead.")
80 | flag.BoolVar(&enableHTTP2, "enable-http2", false,
81 | "If set, HTTP/2 will be enabled for the metrics and webhook servers")
82 |
83 | opts := zap.Options{
84 | Development: true,
85 | TimeEncoder: zapcore.ISO8601TimeEncoder,
86 | StacktraceLevel: zapcore.DPanicLevel,
87 | }
88 |
89 | opts.BindFlags(flag.CommandLine)
90 | flag.Parse()
91 |
92 | ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts)))
93 |
94 | // if the enable-http2 flag is false (the default), http/2 should be disabled
95 | // due to its vulnerabilities. More specifically, disabling http/2 will
96 | // prevent from being vulnerable to the HTTP/2 Stream Cancellation and
97 | // Rapid Reset CVEs. For more information see:
98 | // - https://github.com/advisories/GHSA-qppj-fm5r-hxr3
99 | // - https://github.com/advisories/GHSA-4374-p667-p6c8
100 | disableHTTP2 := func(c *tls.Config) {
101 | setupLog.Info("disabling http/2")
102 | c.NextProtos = []string{"http/1.1"}
103 | }
104 |
105 | if !enableHTTP2 {
106 | tlsOpts = append(tlsOpts, disableHTTP2)
107 | }
108 |
109 | webhookServer := webhook.NewServer(webhook.Options{
110 | TLSOpts: tlsOpts,
111 | })
112 |
113 | // Metrics endpoint is enabled in 'config/default/kustomization.yaml'. The Metrics options configure the server.
114 | // More info:
115 | // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.18.4/pkg/metrics/server
116 | // - https://book.kubebuilder.io/reference/metrics.html
117 | metricsServerOptions := metricsserver.Options{
118 | BindAddress: metricsAddr,
119 | SecureServing: secureMetrics,
120 | // TODO(user): TLSOpts is used to allow configuring the TLS config used for the server. If certificates are
121 | // not provided, self-signed certificates will be generated by default. This option is not recommended for
122 | // production environments as self-signed certificates do not offer the same level of trust and security
123 | // as certificates issued by a trusted Certificate Authority (CA). The primary risk is potentially allowing
124 | // unauthorized access to sensitive metrics data. Consider replacing with CertDir, CertName, and KeyName
125 | // to provide certificates, ensuring the server communicates using trusted and secure certificates.
126 | TLSOpts: tlsOpts,
127 | }
128 |
129 | if secureMetrics {
130 | // FilterProvider is used to protect the metrics endpoint with authn/authz.
131 | // These configurations ensure that only authorized users and service accounts
132 | // can access the metrics endpoint. The RBAC are configured in 'config/rbac/kustomization.yaml'. More info:
133 | // https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.18.4/pkg/metrics/filters#WithAuthenticationAndAuthorization
134 | metricsServerOptions.FilterProvider = filters.WithAuthenticationAndAuthorization
135 | }
136 |
137 | kubeConfig := ctrl.GetConfigOrDie()
138 | mgr, err := ctrl.NewManager(kubeConfig, ctrl.Options{
139 | Scheme: scheme,
140 | Metrics: metricsServerOptions,
141 | WebhookServer: webhookServer,
142 | HealthProbeBindAddress: probeAddr,
143 | LeaderElection: enableLeaderElection,
144 | LeaderElectionID: "9cf0818f.opentelekomcloud.com",
145 | // LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily
146 | // when the Manager ends. This requires the binary to immediately end when the
147 | // Manager is stopped, otherwise, this setting is unsafe. Setting this significantly
148 | // speeds up voluntary leader transitions as the new leader don't have to wait
149 | // LeaseDuration time first.
150 | //
151 | // In the default scaffold provided, the program ends immediately after
152 | // the manager stops, so would be fine to enable this option. However,
153 | // if you are doing or is intended to do any operation such as perform cleanups
154 | // after the manager stops then its usage might be unsafe.
155 | // LeaderElectionReleaseOnCancel: true,
156 | })
157 | if err != nil {
158 | setupLog.Error(err, "unable to start manager")
159 | os.Exit(1)
160 | }
161 |
162 | discoveryClient, err := discovery.NewDiscoveryClientForConfig(kubeConfig)
163 | if err != nil {
164 | setupLog.Error(err, "unable to create discovery client")
165 | }
166 |
167 | if err = (&controller.TypesenseClusterReconciler{
168 | Client: mgr.GetClient(),
169 | Scheme: mgr.GetScheme(),
170 | Recorder: mgr.GetEventRecorderFor("typesensecluster-controller"),
171 | DiscoveryClient: discoveryClient,
172 | }).SetupWithManager(mgr); err != nil {
173 | setupLog.Error(err, "unable to create controller", "controller", "TypesenseCluster")
174 | os.Exit(1)
175 | }
176 | // +kubebuilder:scaffold:builder
177 |
178 | if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
179 | setupLog.Error(err, "unable to set up health check")
180 | os.Exit(1)
181 | }
182 | if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil {
183 | setupLog.Error(err, "unable to set up ready check")
184 | os.Exit(1)
185 | }
186 |
187 | setupLog.Info("starting manager")
188 | if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
189 | setupLog.Error(err, "problem running manager")
190 | os.Exit(1)
191 | }
192 | }
193 |
--------------------------------------------------------------------------------
/config/crd/kustomization.yaml:
--------------------------------------------------------------------------------
1 | # This kustomization.yaml is not intended to be run by itself,
2 | # since it depends on service name and namespace that are out of this kustomize package.
3 | # It should be run by config/default
4 | resources:
5 | - bases/ts.opentelekomcloud.com_typesenseclusters.yaml
6 | # +kubebuilder:scaffold:crdkustomizeresource
7 |
8 | patches:
9 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix.
10 | # patches here are for enabling the conversion webhook for each CRD
11 | # +kubebuilder:scaffold:crdkustomizewebhookpatch
12 |
13 | # [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix.
14 | # patches here are for enabling the CA injection for each CRD
15 | #- path: patches/cainjection_in_typesenseclusters.yaml
16 | # +kubebuilder:scaffold:crdkustomizecainjectionpatch
17 |
18 | # [WEBHOOK] To enable webhook, uncomment the following section
19 | # the following config is for teaching kustomize how to do kustomization for CRDs.
20 |
21 | #configurations:
22 | #- kustomizeconfig.yaml
23 |
--------------------------------------------------------------------------------
/config/crd/kustomizeconfig.yaml:
--------------------------------------------------------------------------------
1 | # This file is for teaching kustomize how to substitute name and namespace reference in CRD
2 | nameReference:
3 | - kind: Service
4 | version: v1
5 | fieldSpecs:
6 | - kind: CustomResourceDefinition
7 | version: v1
8 | group: apiextensions.k8s.io
9 | path: spec/conversion/webhook/clientConfig/service/name
10 |
11 | namespace:
12 | - kind: CustomResourceDefinition
13 | version: v1
14 | group: apiextensions.k8s.io
15 | path: spec/conversion/webhook/clientConfig/service/namespace
16 | create: false
17 |
18 | varReference:
19 | - path: metadata/annotations
20 |
--------------------------------------------------------------------------------
/config/default/kustomization.yaml:
--------------------------------------------------------------------------------
1 | # Adds namespace to all resources.
2 | namespace: typesense-operator-system
3 |
4 | # Value of this field is prepended to the
5 | # names of all resources, e.g. a deployment named
6 | # "wordpress" becomes "alices-wordpress".
7 | # Note that it should also match with the prefix (text before '-') of the namespace
8 | # field above.
9 | namePrefix: typesense-operator-
10 |
11 | # Labels to add to all resources and selectors.
12 | #labels:
13 | #- includeSelectors: true
14 | # pairs:
15 | # someName: someValue
16 |
17 | resources:
18 | - ../crd
19 | - ../rbac
20 | - ../manager
21 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
22 | # crd/kustomization.yaml
23 | #- ../webhook
24 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required.
25 | #- ../certmanager
26 | # [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'.
27 | #- ../prometheus
28 | # [METRICS] Expose the controller manager metrics service.
29 | - metrics_service.yaml
30 |
31 | # Uncomment the patches line if you enable Metrics, and/or are using webhooks and cert-manager
32 | patches:
33 | # [METRICS] The following patch will enable the metrics endpoint using HTTPS and the port :8443.
34 | # More info: https://book.kubebuilder.io/reference/metrics
35 | - path: manager_metrics_patch.yaml
36 | target:
37 | kind: Deployment
38 |
39 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in
40 | # crd/kustomization.yaml
41 | #- path: manager_webhook_patch.yaml
42 |
43 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'.
44 | # Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks.
45 | # 'CERTMANAGER' needs to be enabled to use ca injection
46 | #- path: webhookcainjection_patch.yaml
47 |
48 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix.
49 | # Uncomment the following replacements to add the cert-manager CA injection annotations
50 | #replacements:
51 | # - source: # Add cert-manager annotation to ValidatingWebhookConfiguration, MutatingWebhookConfiguration and CRDs
52 | # kind: Certificate
53 | # group: cert-manager.io
54 | # version: v1
55 | # name: serving-cert # this name should match the one in certificate.yaml
56 | # fieldPath: .metadata.namespace # namespace of the certificate CR
57 | # targets:
58 | # - select:
59 | # kind: ValidatingWebhookConfiguration
60 | # fieldPaths:
61 | # - .metadata.annotations.[cert-manager.io/inject-ca-from]
62 | # options:
63 | # delimiter: '/'
64 | # index: 0
65 | # create: true
66 | # - select:
67 | # kind: MutatingWebhookConfiguration
68 | # fieldPaths:
69 | # - .metadata.annotations.[cert-manager.io/inject-ca-from]
70 | # options:
71 | # delimiter: '/'
72 | # index: 0
73 | # create: true
74 | # - select:
75 | # kind: CustomResourceDefinition
76 | # fieldPaths:
77 | # - .metadata.annotations.[cert-manager.io/inject-ca-from]
78 | # options:
79 | # delimiter: '/'
80 | # index: 0
81 | # create: true
82 | # - source:
83 | # kind: Certificate
84 | # group: cert-manager.io
85 | # version: v1
86 | # name: serving-cert # this name should match the one in certificate.yaml
87 | # fieldPath: .metadata.name
88 | # targets:
89 | # - select:
90 | # kind: ValidatingWebhookConfiguration
91 | # fieldPaths:
92 | # - .metadata.annotations.[cert-manager.io/inject-ca-from]
93 | # options:
94 | # delimiter: '/'
95 | # index: 1
96 | # create: true
97 | # - select:
98 | # kind: MutatingWebhookConfiguration
99 | # fieldPaths:
100 | # - .metadata.annotations.[cert-manager.io/inject-ca-from]
101 | # options:
102 | # delimiter: '/'
103 | # index: 1
104 | # create: true
105 | # - select:
106 | # kind: CustomResourceDefinition
107 | # fieldPaths:
108 | # - .metadata.annotations.[cert-manager.io/inject-ca-from]
109 | # options:
110 | # delimiter: '/'
111 | # index: 1
112 | # create: true
113 | # - source: # Add cert-manager annotation to the webhook Service
114 | # kind: Service
115 | # version: v1
116 | # name: webhook-service
117 | # fieldPath: .metadata.name # namespace of the service
118 | # targets:
119 | # - select:
120 | # kind: Certificate
121 | # group: cert-manager.io
122 | # version: v1
123 | # fieldPaths:
124 | # - .spec.dnsNames.0
125 | # - .spec.dnsNames.1
126 | # options:
127 | # delimiter: '.'
128 | # index: 0
129 | # create: true
130 | # - source:
131 | # kind: Service
132 | # version: v1
133 | # name: webhook-service
134 | # fieldPath: .metadata.namespace # namespace of the service
135 | # targets:
136 | # - select:
137 | # kind: Certificate
138 | # group: cert-manager.io
139 | # version: v1
140 | # fieldPaths:
141 | # - .spec.dnsNames.0
142 | # - .spec.dnsNames.1
143 | # options:
144 | # delimiter: '.'
145 | # index: 1
146 | # create: true
147 |
--------------------------------------------------------------------------------
/config/default/manager_metrics_patch.yaml:
--------------------------------------------------------------------------------
1 | # This patch adds the args to allow exposing the metrics endpoint using HTTPS
2 | - op: add
3 | path: /spec/template/spec/containers/0/args/0
4 | value: --metrics-bind-address=:8443
5 |
--------------------------------------------------------------------------------
/config/default/metrics_service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | control-plane: controller-manager
6 | app.kubernetes.io/name: typesense-operator
7 | app.kubernetes.io/managed-by: kustomize
8 | name: controller-manager-metrics-service
9 | namespace: system
10 | spec:
11 | ports:
12 | - name: https
13 | port: 8443
14 | protocol: TCP
15 | targetPort: 8443
16 | selector:
17 | control-plane: controller-manager
18 |
--------------------------------------------------------------------------------
/config/manager/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - manager.yaml
3 | apiVersion: kustomize.config.k8s.io/v1beta1
4 | kind: Kustomization
5 | images:
6 | - name: controller
7 | newName: akyriako78/typesense-operator
8 | newTag: 0.3.0
9 |
--------------------------------------------------------------------------------
/config/manager/manager.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | labels:
5 | control-plane: controller-manager
6 | app.kubernetes.io/name: typesense-operator
7 | app.kubernetes.io/managed-by: kustomize
8 | name: system
9 | ---
10 | apiVersion: apps/v1
11 | kind: Deployment
12 | metadata:
13 | name: controller-manager
14 | namespace: system
15 | labels:
16 | control-plane: controller-manager
17 | app.kubernetes.io/name: typesense-operator
18 | app.kubernetes.io/managed-by: kustomize
19 | spec:
20 | selector:
21 | matchLabels:
22 | control-plane: controller-manager
23 | replicas: 1
24 | template:
25 | metadata:
26 | annotations:
27 | kubectl.kubernetes.io/default-container: manager
28 | labels:
29 | control-plane: controller-manager
30 | spec:
31 | # TODO(user): Uncomment the following code to configure the nodeAffinity expression
32 | # according to the platforms which are supported by your solution.
33 | # It is considered best practice to support multiple architectures. You can
34 | # build your manager image using the makefile target docker-buildx.
35 | # affinity:
36 | # nodeAffinity:
37 | # requiredDuringSchedulingIgnoredDuringExecution:
38 | # nodeSelectorTerms:
39 | # - matchExpressions:
40 | # - key: kubernetes.io/arch
41 | # operator: In
42 | # values:
43 | # - amd64
44 | # - arm64
45 | # - ppc64le
46 | # - s390x
47 | # - key: kubernetes.io/os
48 | # operator: In
49 | # values:
50 | # - linux
51 | securityContext:
52 | runAsNonRoot: true
53 | # TODO(user): For common cases that do not require escalating privileges
54 | # it is recommended to ensure that all your Pods/Containers are restrictive.
55 | # More info: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted
56 | # Please uncomment the following code if your project does NOT have to work on old Kubernetes
57 | # versions < 1.19 or on vendors versions which do NOT support this field by default (i.e. Openshift < 4.11 ).
58 | # seccompProfile:
59 | # type: RuntimeDefault
60 | containers:
61 | - command:
62 | - /manager
63 | args:
64 | - --leader-elect
65 | - --health-probe-bind-address=:8081
66 | - --zap-log-level=debug
67 | image: controller:latest
68 | imagePullPolicy: IfNotPresent
69 | name: manager
70 | securityContext:
71 | allowPrivilegeEscalation: false
72 | capabilities:
73 | drop:
74 | - "ALL"
75 | livenessProbe:
76 | httpGet:
77 | path: /healthz
78 | port: 8081
79 | initialDelaySeconds: 15
80 | periodSeconds: 20
81 | readinessProbe:
82 | httpGet:
83 | path: /readyz
84 | port: 8081
85 | initialDelaySeconds: 5
86 | periodSeconds: 10
87 | # TODO(user): Configure the resources accordingly based on the project requirements.
88 | # More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
89 | resources:
90 | limits:
91 | cpu: 500m
92 | memory: 256Mi
93 | requests:
94 | cpu: 10m
95 | memory: 64Mi
96 | serviceAccountName: controller-manager
97 | terminationGracePeriodSeconds: 10
98 |
--------------------------------------------------------------------------------
/config/manifests/kustomization.yaml:
--------------------------------------------------------------------------------
1 | # These resources constitute the fully configured set of manifests
2 | # used to generate the 'manifests/' directory in a bundle.
3 | resources:
4 | - bases/typesense-operator.clusterserviceversion.yaml
5 | - ../default
6 | - ../samples
7 | - ../scorecard
8 |
9 | # [WEBHOOK] To enable webhooks, uncomment all the sections with [WEBHOOK] prefix.
10 | # Do NOT uncomment sections with prefix [CERTMANAGER], as OLM does not support cert-manager.
11 | # These patches remove the unnecessary "cert" volume and its manager container volumeMount.
12 | #patches:
13 | #- target:
14 | # group: apps
15 | # version: v1
16 | # kind: Deployment
17 | # name: controller-manager
18 | # namespace: system
19 | # patch: |-
20 | # # Remove the manager container's "cert" volumeMount, since OLM will create and mount a set of certs.
21 | # # Update the indices in this path if adding or removing containers/volumeMounts in the manager's Deployment.
22 | # - op: remove
23 |
24 | # path: /spec/template/spec/containers/0/volumeMounts/0
25 | # # Remove the "cert" volume, since OLM will create and mount a set of certs.
26 | # # Update the indices in this path if adding or removing volumes in the manager's Deployment.
27 | # - op: remove
28 | # path: /spec/template/spec/volumes/0
29 |
--------------------------------------------------------------------------------
/config/prometheus/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - monitor.yaml
3 |
--------------------------------------------------------------------------------
/config/prometheus/monitor.yaml:
--------------------------------------------------------------------------------
1 | # Prometheus Monitor Service (Metrics)
2 | apiVersion: monitoring.coreos.com/v1
3 | kind: ServiceMonitor
4 | metadata:
5 | labels:
6 | control-plane: controller-manager
7 | app.kubernetes.io/name: typesense-operator
8 | app.kubernetes.io/managed-by: kustomize
9 | name: controller-manager-metrics-monitor
10 | namespace: system
11 | spec:
12 | endpoints:
13 | - path: /metrics
14 | port: https # Ensure this is the name of the port that exposes HTTPS metrics
15 | scheme: https
16 | bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
17 | tlsConfig:
18 | # TODO(user): The option insecureSkipVerify: true is not recommended for production since it disables
19 | # certificate verification. This poses a significant security risk by making the system vulnerable to
20 | # man-in-the-middle attacks, where an attacker could intercept and manipulate the communication between
21 | # Prometheus and the monitored services. This could lead to unauthorized access to sensitive metrics data,
22 | # compromising the integrity and confidentiality of the information.
23 | # Please use the following options for secure configurations:
24 | # caFile: /etc/metrics-certs/ca.crt
25 | # certFile: /etc/metrics-certs/tls.crt
26 | # keyFile: /etc/metrics-certs/tls.key
27 | insecureSkipVerify: true
28 | selector:
29 | matchLabels:
30 | control-plane: controller-manager
31 |
--------------------------------------------------------------------------------
/config/rbac/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | # All RBAC will be applied under this service account in
3 | # the deployment namespace. You may comment out this resource
4 | # if your manager will use a service account that exists at
5 | # runtime. Be sure to update RoleBinding and ClusterRoleBinding
6 | # subjects if changing service account names.
7 | - service_account.yaml
8 | - role.yaml
9 | - role_binding.yaml
10 | - leader_election_role.yaml
11 | - leader_election_role_binding.yaml
12 | # The following RBAC configurations are used to protect
13 | # the metrics endpoint with authn/authz. These configurations
14 | # ensure that only authorized users and service accounts
15 | # can access the metrics endpoint. Comment the following
16 | # permissions if you want to disable this protection.
17 | # More info: https://book.kubebuilder.io/reference/metrics.html
18 | - metrics_auth_role.yaml
19 | - metrics_auth_role_binding.yaml
20 | - metrics_reader_role.yaml
21 | # For each CRD, "Editor" and "Viewer" roles are scaffolded by
22 | # default, aiding admins in cluster management. Those roles are
23 | # not used by the Project itself. You can comment the following lines
24 | # if you do not want those helpers be installed with your Project.
25 | - typesensecluster_editor_role.yaml
26 | - typesensecluster_viewer_role.yaml
27 |
28 |
--------------------------------------------------------------------------------
/config/rbac/leader_election_role.yaml:
--------------------------------------------------------------------------------
1 | # permissions to do leader election.
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: Role
4 | metadata:
5 | labels:
6 | app.kubernetes.io/name: typesense-operator
7 | app.kubernetes.io/managed-by: kustomize
8 | name: leader-election-role
9 | rules:
10 | - apiGroups:
11 | - ""
12 | resources:
13 | - configmaps
14 | verbs:
15 | - get
16 | - list
17 | - watch
18 | - create
19 | - update
20 | - patch
21 | - delete
22 | - apiGroups:
23 | - coordination.k8s.io
24 | resources:
25 | - leases
26 | verbs:
27 | - get
28 | - list
29 | - watch
30 | - create
31 | - update
32 | - patch
33 | - delete
34 | - apiGroups:
35 | - ""
36 | resources:
37 | - events
38 | verbs:
39 | - create
40 | - patch
41 |
--------------------------------------------------------------------------------
/config/rbac/leader_election_role_binding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: RoleBinding
3 | metadata:
4 | labels:
5 | app.kubernetes.io/name: typesense-operator
6 | app.kubernetes.io/managed-by: kustomize
7 | name: leader-election-rolebinding
8 | roleRef:
9 | apiGroup: rbac.authorization.k8s.io
10 | kind: Role
11 | name: leader-election-role
12 | subjects:
13 | - kind: ServiceAccount
14 | name: controller-manager
15 | namespace: system
16 |
--------------------------------------------------------------------------------
/config/rbac/metrics_auth_role.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: metrics-auth-role
5 | rules:
6 | - apiGroups:
7 | - authentication.k8s.io
8 | resources:
9 | - tokenreviews
10 | verbs:
11 | - create
12 | - apiGroups:
13 | - authorization.k8s.io
14 | resources:
15 | - subjectaccessreviews
16 | verbs:
17 | - create
18 |
--------------------------------------------------------------------------------
/config/rbac/metrics_auth_role_binding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | name: metrics-auth-rolebinding
5 | roleRef:
6 | apiGroup: rbac.authorization.k8s.io
7 | kind: ClusterRole
8 | name: metrics-auth-role
9 | subjects:
10 | - kind: ServiceAccount
11 | name: controller-manager
12 | namespace: system
13 |
--------------------------------------------------------------------------------
/config/rbac/metrics_reader_role.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: metrics-reader
5 | rules:
6 | - nonResourceURLs:
7 | - "/metrics"
8 | verbs:
9 | - get
10 |
--------------------------------------------------------------------------------
/config/rbac/role.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: ClusterRole
4 | metadata:
5 | name: manager-role
6 | rules:
7 | - apiGroups:
8 | - ""
9 | resources:
10 | - configmaps
11 | verbs:
12 | - create
13 | - delete
14 | - get
15 | - list
16 | - patch
17 | - update
18 | - watch
19 | - apiGroups:
20 | - ""
21 | resources:
22 | - pods
23 | verbs:
24 | - delete
25 | - get
26 | - list
27 | - patch
28 | - update
29 | - watch
30 | - apiGroups:
31 | - ""
32 | resources:
33 | - pods/status
34 | verbs:
35 | - get
36 | - patch
37 | - update
38 | - apiGroups:
39 | - ""
40 | resources:
41 | - secrets
42 | verbs:
43 | - create
44 | - delete
45 | - get
46 | - list
47 | - patch
48 | - update
49 | - watch
50 | - apiGroups:
51 | - ""
52 | resources:
53 | - services
54 | verbs:
55 | - create
56 | - delete
57 | - get
58 | - list
59 | - patch
60 | - update
61 | - watch
62 | - apiGroups:
63 | - apps
64 | resources:
65 | - deployments
66 | verbs:
67 | - create
68 | - delete
69 | - get
70 | - list
71 | - patch
72 | - update
73 | - watch
74 | - apiGroups:
75 | - apps
76 | resources:
77 | - statefulsets
78 | verbs:
79 | - create
80 | - delete
81 | - get
82 | - list
83 | - patch
84 | - update
85 | - watch
86 | - apiGroups:
87 | - batch
88 | resources:
89 | - cronjobs
90 | verbs:
91 | - create
92 | - delete
93 | - get
94 | - list
95 | - patch
96 | - update
97 | - watch
98 | - apiGroups:
99 | - ""
100 | resources:
101 | - events
102 | verbs:
103 | - create
104 | - patch
105 | - apiGroups:
106 | - discovery.k8s.io
107 | resources:
108 | - endpointslices
109 | verbs:
110 | - get
111 | - list
112 | - watch
113 | - apiGroups:
114 | - monitoring.coreos.com
115 | resources:
116 | - podmonitors
117 | verbs:
118 | - create
119 | - delete
120 | - get
121 | - list
122 | - patch
123 | - update
124 | - watch
125 | - apiGroups:
126 | - monitoring.coreos.com
127 | resources:
128 | - servicemonitors
129 | verbs:
130 | - create
131 | - delete
132 | - get
133 | - list
134 | - patch
135 | - update
136 | - watch
137 | - apiGroups:
138 | - networking.k8s.io
139 | resources:
140 | - ingresses
141 | verbs:
142 | - create
143 | - delete
144 | - get
145 | - list
146 | - patch
147 | - update
148 | - watch
149 | - apiGroups:
150 | - ts.opentelekomcloud.com
151 | resources:
152 | - typesenseclusters
153 | verbs:
154 | - create
155 | - delete
156 | - get
157 | - list
158 | - patch
159 | - update
160 | - watch
161 | - apiGroups:
162 | - ts.opentelekomcloud.com
163 | resources:
164 | - typesenseclusters/finalizers
165 | verbs:
166 | - update
167 | - apiGroups:
168 | - ts.opentelekomcloud.com
169 | resources:
170 | - typesenseclusters/status
171 | verbs:
172 | - get
173 | - patch
174 | - update
175 |
--------------------------------------------------------------------------------
/config/rbac/role_binding.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRoleBinding
3 | metadata:
4 | labels:
5 | app.kubernetes.io/name: typesense-operator
6 | app.kubernetes.io/managed-by: kustomize
7 | name: manager-rolebinding
8 | roleRef:
9 | apiGroup: rbac.authorization.k8s.io
10 | kind: ClusterRole
11 | name: manager-role
12 | subjects:
13 | - kind: ServiceAccount
14 | name: controller-manager
15 | namespace: system
16 |
--------------------------------------------------------------------------------
/config/rbac/service_account.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | labels:
5 | app.kubernetes.io/name: typesense-operator
6 | app.kubernetes.io/managed-by: kustomize
7 | name: controller-manager
8 | namespace: system
9 |
--------------------------------------------------------------------------------
/config/rbac/typesensecluster_editor_role.yaml:
--------------------------------------------------------------------------------
1 | # permissions for end users to edit typesenseclusters.
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: ClusterRole
4 | metadata:
5 | labels:
6 | app.kubernetes.io/name: typesense-operator
7 | app.kubernetes.io/managed-by: kustomize
8 | name: typesensecluster-editor-role
9 | rules:
10 | - apiGroups:
11 | - ts.opentelekomcloud.com
12 | resources:
13 | - typesenseclusters
14 | verbs:
15 | - create
16 | - delete
17 | - get
18 | - list
19 | - patch
20 | - update
21 | - watch
22 | - apiGroups:
23 | - ts.opentelekomcloud.com
24 | resources:
25 | - typesenseclusters/status
26 | verbs:
27 | - get
28 |
--------------------------------------------------------------------------------
/config/rbac/typesensecluster_viewer_role.yaml:
--------------------------------------------------------------------------------
1 | # permissions for end users to view typesenseclusters.
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: ClusterRole
4 | metadata:
5 | labels:
6 | app.kubernetes.io/name: typesense-operator
7 | app.kubernetes.io/managed-by: kustomize
8 | name: typesensecluster-viewer-role
9 | rules:
10 | - apiGroups:
11 | - ts.opentelekomcloud.com
12 | resources:
13 | - typesenseclusters
14 | verbs:
15 | - get
16 | - list
17 | - watch
18 | - apiGroups:
19 | - ts.opentelekomcloud.com
20 | resources:
21 | - typesenseclusters/status
22 | verbs:
23 | - get
24 |
--------------------------------------------------------------------------------
/config/samples/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ## Append samples of your project ##
2 | resources:
3 | - ts_v1alpha1_typesensecluster_aws.yaml
4 | - ts_v1alpha1_typesensecluster_azure.yaml
5 | - ts_v1alpha1_typesensecluster_bm.yaml
6 | - ts_v1alpha1_typesensecluster_kind.yaml
7 | - ts_v1alpha1_typesensecluster_opentelekomcloud.yaml
8 | - ts_v1alpha1_typesensecluster.yaml
9 | # +kubebuilder:scaffold:manifestskustomizesamples
10 |
--------------------------------------------------------------------------------
/config/samples/ts_v1alpha1_typesensecluster.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: ts.opentelekomcloud.com/v1alpha1
2 | kind: TypesenseCluster
3 | metadata:
4 | labels:
5 | app.kubernetes.io/name: typesense-operator
6 | app.kubernetes.io/managed-by: kustomize
7 | name: cluster-1
8 | spec:
9 | image: typesense/typesense:27.1
10 | replicas: 3
11 | storage:
12 | size: 100Mi
13 | storageClassName: standard
--------------------------------------------------------------------------------
/config/samples/ts_v1alpha1_typesensecluster_aws.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: ts.opentelekomcloud.com/v1alpha1
2 | kind: TypesenseCluster
3 | metadata:
4 | labels:
5 | app.kubernetes.io/name: typesense-operator
6 | app.kubernetes.io/managed-by: kustomize
7 | name: c-aws-1
8 | spec:
9 | image: typesense/typesense:27.1
10 | replicas: 3
11 | storage:
12 | size: 100Mi
13 | storageClassName: gp2
14 | ---
15 | apiVersion: v1
16 | kind: Secret
17 | metadata:
18 | labels:
19 | app.kubernetes.io/name: typesense-operator
20 | app.kubernetes.io/managed-by: kustomize
21 | name: typesense-common-bootstrap-key
22 | type: Opaque
23 | data:
24 | typesense-api-key: SXdpVG9CcnFYTHZYeTJNMG1TS1hPaGt0dlFUY3VWUloxc1M5REtsRUNtMFFwQU93R1hoanVIVWJLQnE2ejdlSQ==
25 | ---
26 | apiVersion: ts.opentelekomcloud.com/v1alpha1
27 | kind: TypesenseCluster
28 | metadata:
29 | labels:
30 | app.kubernetes.io/name: typesense-operator
31 | app.kubernetes.io/managed-by: kustomize
32 | name: c-aws-2
33 | spec:
34 | image: typesense/typesense:27.1
35 | replicas: 1
36 | storage:
37 | size: 100Mi
38 | storageClassName: gp2
39 | adminApiKey:
40 | name: typesense-common-bootstrap-key
41 | scrapers:
42 | - name: docusaurus-example-com
43 | image: typesense/docsearch-scraper:0.11.0
44 | config: "{\"index_name\":\"docusaurus-example\",\"start_urls\":[\"https://docusaurus.example.com/\"],\"sitemap_urls\":[\"https://docusaurus.example.com/sitemap.xml\"],\"sitemap_alternate_links\":true,\"stop_urls\":[\"/tests\"],\"selectors\":{\"lvl0\":{\"selector\":\"(//ul[contains(@class,'menu__list')]//a[contains(@class, 'menu__link menu__link--sublist menu__link--active')]/text() | //nav[contains(@class, 'navbar')]//a[contains(@class, 'navbar__link--active')]/text())[last()]\",\"type\":\"xpath\",\"global\":true,\"default_value\":\"Documentation\"},\"lvl1\":\"header h1\",\"lvl2\":\"article h2\",\"lvl3\":\"article h3\",\"lvl4\":\"article h4\",\"lvl5\":\"article h5, article td:first-child\",\"lvl6\":\"article h6\",\"text\":\"article p, article li, article td:last-child\"},\"strip_chars\":\" .,;:#\",\"custom_settings\":{\"separatorsToIndex\":\"_\",\"attributesForFaceting\":[\"language\",\"version\",\"type\",\"docusaurus_tag\"],\"attributesToRetrieve\":[\"hierarchy\",\"content\",\"anchor\",\"url\",\"url_without_anchor\",\"type\"]},\"conversation_id\":[\"833762294\"],\"nb_hits\":46250}"
45 | schedule: '*/2 * * * *'
--------------------------------------------------------------------------------
/config/samples/ts_v1alpha1_typesensecluster_azure.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: ts.opentelekomcloud.com/v1alpha1
2 | kind: TypesenseCluster
3 | metadata:
4 | labels:
5 | app.kubernetes.io/name: typesense-operator
6 | app.kubernetes.io/managed-by: kustomize
7 | name: c-az-1
8 | spec:
9 | image: typesense/typesense:27.1
10 | replicas: 3
11 | storage:
12 | size: 10Mi
13 | storageClassName: managed-csi
14 | ---
15 | apiVersion: ts.opentelekomcloud.com/v1alpha1
16 | kind: TypesenseCluster
17 | metadata:
18 | labels:
19 | app.kubernetes.io/name: typesense-operator
20 | app.kubernetes.io/managed-by: kustomize
21 | name: c-az-2
22 | spec:
23 | image: typesense/typesense:26.0
24 | replicas: 1
25 | storage:
26 | storageClassName: managed-csi
--------------------------------------------------------------------------------
/config/samples/ts_v1alpha1_typesensecluster_bm.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: ts.opentelekomcloud.com/v1alpha1
2 | kind: TypesenseCluster
3 | metadata:
4 | labels:
5 | app.kubernetes.io/name: typesense-operator
6 | app.kubernetes.io/managed-by: kustomize
7 | name: c-bm-1
8 | spec:
9 | image: typesense/typesense:27.1
10 | replicas: 3
11 | storage:
12 | size: 100Mi
13 | storageClassName: iscsi
14 | ---
15 | apiVersion: ts.opentelekomcloud.com/v1alpha1
16 | kind: TypesenseCluster
17 | metadata:
18 | labels:
19 | app.kubernetes.io/name: typesense-operator
20 | app.kubernetes.io/managed-by: kustomize
21 | name: c-bm-2
22 | spec:
23 | image: typesense/typesense:27.1
24 | replicas: 1
25 | storage:
26 | size: 100Mi
27 | storageClassName: nfs
--------------------------------------------------------------------------------
/config/samples/ts_v1alpha1_typesensecluster_kind.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: storage.k8s.io/v1
2 | kind: StorageClass
3 | metadata:
4 | name: typesense-local-path
5 | provisioner: rancher.io/local-path
6 | reclaimPolicy: Delete
7 | allowVolumeExpansion: true
8 | volumeBindingMode: WaitForFirstConsumer
9 | ---
10 | apiVersion: v1
11 | kind: Secret
12 | metadata:
13 | labels:
14 | app.kubernetes.io/name: typesense-operator
15 | app.kubernetes.io/managed-by: kustomize
16 | name: typesense-common-bootstrap-key
17 | type: Opaque
18 | data:
19 | typesense-api-key: SXdpVG9CcnFYTHZYeTJNMG1TS1hPaGt0dlFUY3VWUloxc1M5REtsRUNtMFFwQU93R1hoanVIVWJLQnE2ejdlSQ==
20 | ---
21 | apiVersion: v1
22 | kind: ConfigMap
23 | metadata:
24 | name: c-kind-1-server-configuration
25 | data:
26 | TYPESENSE_HEALTHY_READ_LAG: "1000"
27 | TYPESENSE_HEALTHY_WRITE_LAG: "500"
28 | ---
29 | apiVersion: ts.opentelekomcloud.com/v1alpha1
30 | kind: TypesenseCluster
31 | metadata:
32 | labels:
33 | app.kubernetes.io/name: typesense-operator
34 | app.kubernetes.io/managed-by: kustomize
35 | name: c-kind-1
36 | spec:
37 | image: typesense/typesense:29.0.rc30
38 | replicas: 5
39 | corsDomains: "http://localhost,https://www.example.de"
40 | storage:
41 | size: 150Mi
42 | storageClassName: typesense-local-path
43 | adminApiKey:
44 | name: typesense-common-bootstrap-key
45 | metrics:
46 | release: promstack
47 | additionalServerConfiguration:
48 | name: c-kind-1-server-configuration
49 | ---
50 | apiVersion: v1
51 | kind: ConfigMap
52 | metadata:
53 | name: c-kind-2-config
54 | data:
55 | TYPESENSE_ENABLE_SEARCH_ANALYTICS: "true"
56 | ---
57 | apiVersion: v1
58 | kind: Secret
59 | metadata:
60 | labels:
61 | app.kubernetes.io/name: typesense-operator
62 | app.kubernetes.io/managed-by: kustomize
63 | name: scraper-auth-docusaurus-example-com
64 | type: Opaque
65 | data:
66 | KC_URL: VGhpc0lzTm90QVNlY3VyZVBhc3N3b3Jk
67 | KC_REALM: VGhpc0lzTm90QVNlY3VyZVBhc3N3b3Jk
68 | KC_CLIENT_ID: VGhpc0lzTm90QVNlY3VyZVBhc3N3b3Jk
69 | KC_CLIENT_SECRET: VGhpc0lzTm90QVNlY3VyZVBhc3N3b3Jk
70 | ---
71 | apiVersion: ts.opentelekomcloud.com/v1alpha1
72 | kind: TypesenseCluster
73 | metadata:
74 | labels:
75 | app.kubernetes.io/name: typesense-operator
76 | app.kubernetes.io/managed-by: kustomize
77 | name: c-kind-2
78 | spec:
79 | image: typesense/typesense:28.0
80 | replicas: 7
81 | adminApiKey:
82 | name: typesense-common-bootstrap-key
83 | enableCors: true
84 | apiPort: 8108
85 | resources:
86 | limits:
87 | cpu: "200m"
88 | memory: "1024Mi"
89 | requests:
90 | cpu: "100m"
91 | memory: "64Mi"
92 | storage:
93 | size: 75Mi
94 | storageClassName: typesense-local-path
95 | metrics:
96 | release: promstack
97 | ingress:
98 | referer: referer.example.com
99 | host: host.example.com
100 | path: /
101 | pathType: ImplementationSpecific
102 | ingressClassName: nginx
103 | clusterIssuer: lets-encrypt-prod
104 | # readOnlyRootFilesystem:
105 | # volumes:
106 | # - name: nginx-var-cache
107 | # emptyDir: {}
108 | # - name: run
109 | # emptyDir: {}
110 | # volumeMounts:
111 | # - name: nginx-var-cache
112 | # mountPath: /var/cache/nginx
113 | # - name: run
114 | # mountPath: /run
115 | ---
116 | #apiVersion: ts.opentelekomcloud.com/v1alpha1
117 | #kind: TypesenseCluster
118 | #metadata:
119 | # labels:
120 | # app.kubernetes.io/name: typesense-operator
121 | # app.kubernetes.io/managed-by: kustomize
122 | # name: c-kind-3
123 | #spec:
124 | # image: typesense/typesense:27.1
125 | # replicas: 1
126 | # apiPort: 18108
127 | # peeringPort: 18107
128 | # enableCors: true
129 | # corsDomains: "http://localhost,https://www.example.de"
130 | # storage:
131 | # size: 10Mi
132 | # storageClassName: typesense-local-path
133 | # adminApiKey:
134 | # name: typesense-common-bootstrap-key
135 | # ingress:
136 | # host: www.example.de
137 | # ingressClassName: nginx
138 | # clusterIssuer: opentelekomcloud-letsencrypt-staging
139 | # resources:
140 | # limits:
141 | # cpu: "300m"
142 | # memory: "128Mi"
143 | # requests:
144 | # cpu: "100m"
145 | # memory: "32Mi"
146 | # scrapers:
147 | # - name: docusaurus-example-com
148 | # image: typesense/docsearch-scraper:0.11.0
149 | # authConfiguration:
150 | # name: scraper-auth-docusaurus-example-com
151 | # config: "{\"index_name\":\"docuraurus-example\",\"start_urls\":[\"https://docusaurus.example.com/\"],\"sitemap_urls\":[\"https://docusaurus.example.com/sitemap.xml\"],\"sitemap_alternate_links\":true,\"stop_urls\":[\"/tests\"],\"selectors\":{\"lvl0\":{\"selector\":\"(//ul[contains(@class,'menu__list')]//a[contains(@class, 'menu__link menu__link--sublist menu__link--active')]/text() | //nav[contains(@class, 'navbar')]//a[contains(@class, 'navbar__link--active')]/text())[last()]\",\"type\":\"xpath\",\"global\":true,\"default_value\":\"Documentation\"},\"lvl1\":\"header h1\",\"lvl2\":\"article h2\",\"lvl3\":\"article h3\",\"lvl4\":\"article h4\",\"lvl5\":\"article h5, article td:first-child\",\"lvl6\":\"article h6\",\"text\":\"article p, article li, article td:last-child\"},\"strip_chars\":\" .,;:#\",\"custom_settings\":{\"separatorsToIndex\":\"_\",\"attributesForFaceting\":[\"language\",\"version\",\"type\",\"docusaurus_tag\"],\"attributesToRetrieve\":[\"hierarchy\",\"content\",\"anchor\",\"url\",\"url_without_anchor\",\"type\"]},\"conversation_id\":[\"833762294\"],\"nb_hits\":46250}"
152 | # schedule: '*/2 * * * *'
--------------------------------------------------------------------------------
/config/samples/ts_v1alpha1_typesensecluster_opentelekomcloud.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Secret
4 | metadata:
5 | labels:
6 | app.kubernetes.io/name: typesense-operator
7 | app.kubernetes.io/managed-by: kustomize
8 | name: typesense-common-bootstrap-key
9 | type: Opaque
10 | data:
11 | typesense-api-key: SXdpVG9CcnFYTHZYeTJNMG1TS1hPaGt0dlFUY3VWUloxc1M5REtsRUNtMFFwQU93R1hoanVIVWJLQnE2ejdlSQ==
12 | ---
13 | apiVersion: v1
14 | data:
15 | TYPESENSE_HEALTHY_READ_LAG: "1000"
16 | TYPESENSE_HEALTHY_WRITE_LAG: "500"
17 | kind: ConfigMap
18 | metadata:
19 | name: c-otc-1-server-configuration
20 | ---
21 | apiVersion: ts.opentelekomcloud.com/v1alpha1
22 | kind: TypesenseCluster
23 | metadata:
24 | labels:
25 | app.kubernetes.io/name: typesense-operator
26 | app.kubernetes.io/managed-by: kustomize
27 | name: c-otc-1
28 | spec:
29 | image: typesense/typesense:28.0
30 | replicas: 5
31 | resources:
32 | limits:
33 | cpu: "1800m"
34 | memory: "1024Mi"
35 | requests:
36 | cpu: "100m"
37 | memory: "256Mi"
38 | storage:
39 | size: 10Gi
40 | storageClassName: csi-disk
41 | # ingress:
42 | # host: ts.example.de
43 | # ingressClassName: nginx
44 | # clusterIssuer: opentelekomcloud-letsencrypt
45 | adminApiKey:
46 | name: typesense-common-bootstrap-key
47 | # scrapers:
48 | # - name: docusaurus-example-com
49 | # image: typesense/docsearch-scraper:0.11.0
50 | # config: "{\"index_name\":\"docusaurus-example\",\"start_urls\":[\"https://docusaurus.example.com/\"],\"sitemap_urls\":[\"https://docusaurus.example.com/sitemap.xml\"],\"sitemap_alternate_links\":true,\"stop_urls\":[\"/tests\"],\"selectors\":{\"lvl0\":{\"selector\":\"(//ul[contains(@class,'menu__list')]//a[contains(@class, 'menu__link menu__link--sublist menu__link--active')]/text() | //nav[contains(@class, 'navbar')]//a[contains(@class, 'navbar__link--active')]/text())[last()]\",\"type\":\"xpath\",\"global\":true,\"default_value\":\"Documentation\"},\"lvl1\":\"header h1\",\"lvl2\":\"article h2\",\"lvl3\":\"article h3\",\"lvl4\":\"article h4\",\"lvl5\":\"article h5, article td:first-child\",\"lvl6\":\"article h6\",\"text\":\"article p, article li, article td:last-child\"},\"strip_chars\":\" .,;:#\",\"custom_settings\":{\"separatorsToIndex\":\"_\",\"attributesForFaceting\":[\"language\",\"version\",\"type\",\"docusaurus_tag\"],\"attributesToRetrieve\":[\"hierarchy\",\"content\",\"anchor\",\"url\",\"url_without_anchor\",\"type\"]},\"conversation_id\":[\"833762294\"],\"nb_hits\":46250}"
51 | # schedule: '*/2 * * * *'
52 | additionalServerConfiguration:
53 | name: c-otc-1-server-configuration
54 | metrics:
55 | release: promstack
56 | ---
57 | apiVersion: ts.opentelekomcloud.com/v1alpha1
58 | kind: TypesenseCluster
59 | metadata:
60 | labels:
61 | app.kubernetes.io/name: typesense-operator
62 | app.kubernetes.io/managed-by: kustomize
63 | name: c-otc-2
64 | spec:
65 | image: typesense/typesense:28.0
66 | replicas: 1
67 | storage:
68 | storageClassName: csi-disk
69 | ---
70 | #apiVersion: ts.opentelekomcloud.com/v1alpha1
71 | #kind: TypesenseCluster
72 | #metadata:
73 | # labels:
74 | # app.kubernetes.io/name: typesense-operator
75 | # app.kubernetes.io/managed-by: kustomize
76 | # name: c-otc-3
77 | #spec:
78 | # image: typesense/typesense:26.0
79 | # replicas: 3
80 | # storage:
81 | # storageClassName: csi-obs
--------------------------------------------------------------------------------
/config/scorecard/bases/config.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: scorecard.operatorframework.io/v1alpha3
2 | kind: Configuration
3 | metadata:
4 | name: config
5 | stages:
6 | - parallel: true
7 | tests: []
8 |
--------------------------------------------------------------------------------
/config/scorecard/kustomization.yaml:
--------------------------------------------------------------------------------
1 | resources:
2 | - bases/config.yaml
3 | apiVersion: kustomize.config.k8s.io/v1beta1
4 | kind: Kustomization
5 | patches:
6 | - path: patches/basic.config.yaml
7 | target:
8 | group: scorecard.operatorframework.io
9 | kind: Configuration
10 | name: config
11 | version: v1alpha3
12 | - path: patches/olm.config.yaml
13 | target:
14 | group: scorecard.operatorframework.io
15 | kind: Configuration
16 | name: config
17 | version: v1alpha3
18 | # +kubebuilder:scaffold:patches
19 |
--------------------------------------------------------------------------------
/config/scorecard/patches/basic.config.yaml:
--------------------------------------------------------------------------------
1 | - op: add
2 | path: /stages/0/tests/-
3 | value:
4 | entrypoint:
5 | - scorecard-test
6 | - basic-check-spec
7 | image: quay.io/operator-framework/scorecard-test:v1.38.0
8 | labels:
9 | suite: basic
10 | test: basic-check-spec-test
11 |
--------------------------------------------------------------------------------
/config/scorecard/patches/olm.config.yaml:
--------------------------------------------------------------------------------
1 | - op: add
2 | path: /stages/0/tests/-
3 | value:
4 | entrypoint:
5 | - scorecard-test
6 | - olm-bundle-validation
7 | image: quay.io/operator-framework/scorecard-test:v1.38.0
8 | labels:
9 | suite: olm
10 | test: olm-bundle-validation-test
11 | - op: add
12 | path: /stages/0/tests/-
13 | value:
14 | entrypoint:
15 | - scorecard-test
16 | - olm-crds-have-validation
17 | image: quay.io/operator-framework/scorecard-test:v1.38.0
18 | labels:
19 | suite: olm
20 | test: olm-crds-have-validation-test
21 | - op: add
22 | path: /stages/0/tests/-
23 | value:
24 | entrypoint:
25 | - scorecard-test
26 | - olm-crds-have-resources
27 | image: quay.io/operator-framework/scorecard-test:v1.38.0
28 | labels:
29 | suite: olm
30 | test: olm-crds-have-resources-test
31 | - op: add
32 | path: /stages/0/tests/-
33 | value:
34 | entrypoint:
35 | - scorecard-test
36 | - olm-spec-descriptors
37 | image: quay.io/operator-framework/scorecard-test:v1.38.0
38 | labels:
39 | suite: olm
40 | test: olm-spec-descriptors-test
41 | - op: add
42 | path: /stages/0/tests/-
43 | value:
44 | entrypoint:
45 | - scorecard-test
46 | - olm-status-descriptors
47 | image: quay.io/operator-framework/scorecard-test:v1.38.0
48 | labels:
49 | suite: olm
50 | test: olm-status-descriptors-test
51 |
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/akyriako/typesense-operator
2 |
3 | go 1.22.0
4 |
5 | require (
6 | github.com/go-logr/logr v1.4.1
7 | github.com/mitchellh/hashstructure/v2 v2.0.2
8 | github.com/onsi/ginkgo/v2 v2.17.1
9 | github.com/onsi/gomega v1.32.0
10 | github.com/pkg/errors v0.9.1
11 | github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.71.0
12 | go.uber.org/zap v1.26.0
13 | golang.org/x/text v0.15.0
14 | k8s.io/api v0.30.1
15 | k8s.io/apimachinery v0.30.1
16 | k8s.io/client-go v0.30.1
17 | k8s.io/utils v0.0.0-20231127182322-b307cd553661
18 | sigs.k8s.io/controller-runtime v0.18.4
19 | )
20 |
21 | require (
22 | github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect
23 | github.com/beorn7/perks v1.0.1 // indirect
24 | github.com/blang/semver/v4 v4.0.0 // indirect
25 | github.com/cenkalti/backoff/v4 v4.2.1 // indirect
26 | github.com/cespare/xxhash/v2 v2.2.0 // indirect
27 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
28 | github.com/emicklei/go-restful/v3 v3.11.0 // indirect
29 | github.com/evanphx/json-patch/v5 v5.9.0 // indirect
30 | github.com/felixge/httpsnoop v1.0.3 // indirect
31 | github.com/fsnotify/fsnotify v1.7.0 // indirect
32 | github.com/go-logr/stdr v1.2.2 // indirect
33 | github.com/go-logr/zapr v1.3.0 // indirect
34 | github.com/go-openapi/jsonpointer v0.20.2 // indirect
35 | github.com/go-openapi/jsonreference v0.20.2 // indirect
36 | github.com/go-openapi/swag v0.22.8 // indirect
37 | github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
38 | github.com/gogo/protobuf v1.3.2 // indirect
39 | github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
40 | github.com/golang/protobuf v1.5.4 // indirect
41 | github.com/google/cel-go v0.17.8 // indirect
42 | github.com/google/gnostic-models v0.6.8 // indirect
43 | github.com/google/go-cmp v0.6.0 // indirect
44 | github.com/google/gofuzz v1.2.0 // indirect
45 | github.com/google/pprof v0.0.0-20211214055906-6f57359322fd // indirect
46 | github.com/google/uuid v1.5.0 // indirect
47 | github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect
48 | github.com/imdario/mergo v0.3.6 // indirect
49 | github.com/josharian/intern v1.0.0 // indirect
50 | github.com/json-iterator/go v1.1.12 // indirect
51 | github.com/mailru/easyjson v0.7.7 // indirect
52 | github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
53 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
54 | github.com/modern-go/reflect2 v1.0.2 // indirect
55 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
56 | github.com/prometheus/client_golang v1.16.0 // indirect
57 | github.com/prometheus/client_model v0.4.0 // indirect
58 | github.com/prometheus/common v0.44.0 // indirect
59 | github.com/prometheus/procfs v0.12.0 // indirect
60 | github.com/spf13/pflag v1.0.5 // indirect
61 | github.com/stoewer/go-strcase v1.2.0 // indirect
62 | github.com/stretchr/testify v1.9.0 // indirect
63 | go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.44.0 // indirect
64 | go.opentelemetry.io/otel v1.19.0 // indirect
65 | go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 // indirect
66 | go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 // indirect
67 | go.opentelemetry.io/otel/metric v1.19.0 // indirect
68 | go.opentelemetry.io/otel/sdk v1.19.0 // indirect
69 | go.opentelemetry.io/otel/trace v1.19.0 // indirect
70 | go.opentelemetry.io/proto/otlp v1.0.0 // indirect
71 | go.uber.org/multierr v1.11.0 // indirect
72 | golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect
73 | golang.org/x/net v0.25.0 // indirect
74 | golang.org/x/oauth2 v0.12.0 // indirect
75 | golang.org/x/sync v0.7.0 // indirect
76 | golang.org/x/sys v0.20.0 // indirect
77 | golang.org/x/term v0.20.0 // indirect
78 | golang.org/x/time v0.5.0 // indirect
79 | golang.org/x/tools v0.21.0 // indirect
80 | gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
81 | google.golang.org/appengine v1.6.7 // indirect
82 | google.golang.org/genproto/googleapis/api v0.0.0-20230726155614-23370e0ffb3e // indirect
83 | google.golang.org/genproto/googleapis/rpc v0.0.0-20230822172742-b8732ec3820d // indirect
84 | google.golang.org/grpc v1.58.3 // indirect
85 | google.golang.org/protobuf v1.33.0 // indirect
86 | gopkg.in/inf.v0 v0.9.1 // indirect
87 | gopkg.in/yaml.v2 v2.4.0 // indirect
88 | gopkg.in/yaml.v3 v3.0.1 // indirect
89 | k8s.io/apiextensions-apiserver v0.30.1 // indirect
90 | k8s.io/apiserver v0.30.1 // indirect
91 | k8s.io/component-base v0.30.1 // indirect
92 | k8s.io/klog/v2 v2.120.1 // indirect
93 | k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
94 | sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.29.0 // indirect
95 | sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
96 | sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
97 | sigs.k8s.io/yaml v1.4.0 // indirect
98 | )
99 |
--------------------------------------------------------------------------------
/hack/boilerplate.go.txt:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2024.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
--------------------------------------------------------------------------------
/internal/controller/suite_test.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2024.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package controller
18 |
19 | import (
20 | "fmt"
21 | "path/filepath"
22 | "runtime"
23 | "testing"
24 |
25 | . "github.com/onsi/ginkgo/v2"
26 | . "github.com/onsi/gomega"
27 |
28 | "k8s.io/client-go/kubernetes/scheme"
29 | "k8s.io/client-go/rest"
30 | "sigs.k8s.io/controller-runtime/pkg/client"
31 | "sigs.k8s.io/controller-runtime/pkg/envtest"
32 | logf "sigs.k8s.io/controller-runtime/pkg/log"
33 | "sigs.k8s.io/controller-runtime/pkg/log/zap"
34 |
35 | tsv1alpha1 "github.com/akyriako/typesense-operator/api/v1alpha1"
36 | // +kubebuilder:scaffold:imports
37 | )
38 |
39 | // These tests use Ginkgo (BDD-style Go testing framework). Refer to
40 | // http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
41 |
42 | var cfg *rest.Config
43 | var k8sClient client.Client
44 | var testEnv *envtest.Environment
45 |
46 | func TestControllers(t *testing.T) {
47 | RegisterFailHandler(Fail)
48 |
49 | RunSpecs(t, "Controller Suite")
50 | }
51 |
52 | var _ = BeforeSuite(func() {
53 | logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
54 |
55 | By("bootstrapping test environment")
56 | testEnv = &envtest.Environment{
57 | CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")},
58 | ErrorIfCRDPathMissing: true,
59 |
60 | // The BinaryAssetsDirectory is only required if you want to run the tests directly
61 | // without call the makefile target test. If not informed it will look for the
62 | // default path defined in controller-runtime which is /usr/local/kubebuilder/.
63 | // Note that you must have the required binaries setup under the bin directory to perform
64 | // the tests directly. When we run make test it will be setup and used automatically.
65 | BinaryAssetsDirectory: filepath.Join("..", "..", "bin", "k8s",
66 | fmt.Sprintf("1.30.0-%s-%s", runtime.GOOS, runtime.GOARCH)),
67 | }
68 |
69 | var err error
70 | // cfg is defined in this file globally.
71 | cfg, err = testEnv.Start()
72 | Expect(err).NotTo(HaveOccurred())
73 | Expect(cfg).NotTo(BeNil())
74 |
75 | err = tsv1alpha1.AddToScheme(scheme.Scheme)
76 | Expect(err).NotTo(HaveOccurred())
77 |
78 | // +kubebuilder:scaffold:scheme
79 |
80 | k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
81 | Expect(err).NotTo(HaveOccurred())
82 | Expect(k8sClient).NotTo(BeNil())
83 |
84 | })
85 |
86 | var _ = AfterSuite(func() {
87 | By("tearing down the test environment")
88 | err := testEnv.Stop()
89 | Expect(err).NotTo(HaveOccurred())
90 | })
91 |
--------------------------------------------------------------------------------
/internal/controller/typesensecluster_condition_types.go:
--------------------------------------------------------------------------------
1 | package controller
2 |
3 | import (
4 | "context"
5 | tsv1alpha1 "github.com/akyriako/typesense-operator/api/v1alpha1"
6 | "k8s.io/apimachinery/pkg/api/meta"
7 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
8 | )
9 |
10 | type ConditionQuorum string
11 |
12 | // Definitions to manage status conditions
13 | const (
14 | ConditionTypeReady = "Ready"
15 |
16 | ConditionReasonReconciliationInProgress = "ReconciliationInProgress"
17 | ConditionReasonSecretNotReady = "SecretNotReady"
18 | ConditionReasonConfigMapNotReady = "ConfigMapNotReady"
19 | ConditionReasonServicesNotReady = "ServicesNotReady"
20 | ConditionReasonIngressNotReady = "IngressNotReady"
21 | ConditionReasonScrapersNotReady = "ScrapersNotReady"
22 | ConditionReasonMetricsExporterNotReady = "MetricsExporterNotReady"
23 | ConditionReasonQuorumStateUnknown ConditionQuorum = "QuorumStateUnknown"
24 | ConditionReasonQuorumReady ConditionQuorum = "QuorumReady"
25 | ConditionReasonQuorumNotReady ConditionQuorum = "QuorumNotReady"
26 | ConditionReasonQuorumNotReadyWaitATerm ConditionQuorum = "QuorumNotReadyWaitATerm"
27 | ConditionReasonQuorumDowngraded ConditionQuorum = "QuorumDowngraded"
28 | ConditionReasonQuorumUpgraded ConditionQuorum = "QuorumUpgraded"
29 | ConditionReasonQuorumNeedsAttentionMemoryOrDiskIssue ConditionQuorum = "QuorumNeedsAttentionMemoryOrDiskIssue"
30 | ConditionReasonQuorumNeedsAttentionClusterIsLagging ConditionQuorum = "QuorumNeedsAttentionClusterIsLagging"
31 | ConditionReasonQuorumQueuedWrites ConditionQuorum = "QuorumQueuedWrites"
32 | ConditionReasonStatefulSetNotReady = "StatefulSetNotReady"
33 |
34 | InitReconciliationMessage = "Starting reconciliation"
35 | UpdateStatusMessageFailed = "failed to update typesense cluster status"
36 | )
37 |
38 | func (r *TypesenseClusterReconciler) initConditions(ctx context.Context, ts *tsv1alpha1.TypesenseCluster) error {
39 | if ts.Status.Conditions == nil || len(ts.Status.Conditions) == 0 {
40 | if err := r.patchStatus(ctx, ts, func(status *tsv1alpha1.TypesenseClusterStatus) {
41 | meta.SetStatusCondition(&ts.Status.Conditions, metav1.Condition{Type: ConditionTypeReady, Status: metav1.ConditionUnknown, Reason: ConditionReasonReconciliationInProgress, Message: InitReconciliationMessage})
42 | status.Phase = "Bootstrapping"
43 | }); err != nil {
44 | r.logger.Error(err, UpdateStatusMessageFailed)
45 | return err
46 | }
47 | }
48 | return nil
49 | }
50 |
51 | func (r *TypesenseClusterReconciler) setConditionNotReady(ctx context.Context, ts *tsv1alpha1.TypesenseCluster, reason string, err error) error {
52 | if err := r.patchStatus(ctx, ts, func(status *tsv1alpha1.TypesenseClusterStatus) {
53 | meta.SetStatusCondition(&ts.Status.Conditions, metav1.Condition{Type: ConditionTypeReady, Status: metav1.ConditionFalse, Reason: reason, Message: err.Error()})
54 | status.Phase = reason
55 | }); err != nil {
56 | return err
57 | }
58 | return nil
59 | }
60 |
61 | func (r *TypesenseClusterReconciler) setConditionReady(ctx context.Context, ts *tsv1alpha1.TypesenseCluster, reason string) error {
62 | if err := r.patchStatus(ctx, ts, func(status *tsv1alpha1.TypesenseClusterStatus) {
63 | meta.SetStatusCondition(&ts.Status.Conditions, metav1.Condition{Type: ConditionTypeReady, Status: metav1.ConditionTrue, Reason: reason, Message: "Cluster is Ready"})
64 | status.Phase = reason
65 | }); err != nil {
66 | return err
67 | }
68 | return nil
69 | }
70 |
71 | func (r *TypesenseClusterReconciler) getConditionReady(ts *tsv1alpha1.TypesenseCluster) *metav1.Condition {
72 | condition := ts.Status.Conditions[0]
73 | if condition.Type != ConditionTypeReady {
74 | return nil
75 | }
76 |
77 | return &condition
78 | }
79 |
--------------------------------------------------------------------------------
/internal/controller/typesensecluster_configmap.go:
--------------------------------------------------------------------------------
1 | package controller
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | tsv1alpha1 "github.com/akyriako/typesense-operator/api/v1alpha1"
7 | appsv1 "k8s.io/api/apps/v1"
8 | v1 "k8s.io/api/core/v1"
9 | discoveryv1 "k8s.io/api/discovery/v1"
10 | apierrors "k8s.io/apimachinery/pkg/api/errors"
11 | "k8s.io/apimachinery/pkg/labels"
12 | "k8s.io/utils/ptr"
13 | ctrl "sigs.k8s.io/controller-runtime"
14 | "sigs.k8s.io/controller-runtime/pkg/client"
15 | "strings"
16 | )
17 |
18 | func (r *TypesenseClusterReconciler) ReconcileConfigMap(ctx context.Context, ts tsv1alpha1.TypesenseCluster) (updated *bool, err error) {
19 | r.logger.V(debugLevel).Info("reconciling config map")
20 |
21 | configMapName := fmt.Sprintf(ClusterNodesConfigMap, ts.Name)
22 | configMapExists := true
23 | configMapObjectKey := client.ObjectKey{Namespace: ts.Namespace, Name: configMapName}
24 |
25 | var cm = &v1.ConfigMap{}
26 | if err = r.Get(ctx, configMapObjectKey, cm); err != nil {
27 | if apierrors.IsNotFound(err) {
28 | configMapExists = false
29 | } else {
30 | r.logger.Error(err, fmt.Sprintf("unable to fetch config map: %s", configMapName))
31 | return ptr.To[bool](false), err
32 | }
33 | }
34 |
35 | if !configMapExists {
36 | r.logger.V(debugLevel).Info("creating config map", "configmap", configMapObjectKey.Name)
37 |
38 | cm, err = r.createConfigMap(ctx, configMapObjectKey, &ts)
39 | if err != nil {
40 | r.logger.Error(err, "creating config map failed", "configmap", configMapObjectKey.Name)
41 | return nil, err
42 | }
43 | } else {
44 | r.logger.V(debugLevel).Info("updating config map", "configmap", configMapObjectKey.Name)
45 |
46 | cm, _, err = r.updateConfigMap(ctx, &ts, cm, nil)
47 | if err != nil {
48 | return nil, err
49 | }
50 | }
51 |
52 | return &configMapExists, nil
53 | }
54 |
55 | const nodeNameLenLimit = 64
56 |
57 | func (r *TypesenseClusterReconciler) createConfigMap(ctx context.Context, key client.ObjectKey, ts *tsv1alpha1.TypesenseCluster) (*v1.ConfigMap, error) {
58 | nodes, err := r.getNodes(ctx, ts, ts.Spec.Replicas, true)
59 | if err != nil {
60 | return nil, err
61 | }
62 |
63 | cm := &v1.ConfigMap{
64 | ObjectMeta: getObjectMeta(ts, &key.Name, nil),
65 | Data: map[string]string{
66 | "nodes": strings.Join(nodes, ","),
67 | "fallback": strings.Join(nodes, ","),
68 | },
69 | }
70 |
71 | err = ctrl.SetControllerReference(ts, cm, r.Scheme)
72 | if err != nil {
73 | return nil, err
74 | }
75 |
76 | err = r.Create(ctx, cm)
77 | if err != nil {
78 | return nil, err
79 | }
80 |
81 | return cm, nil
82 | }
83 |
84 | func (r *TypesenseClusterReconciler) updateConfigMap(ctx context.Context, ts *tsv1alpha1.TypesenseCluster, cm *v1.ConfigMap, replicas *int32) (*v1.ConfigMap, int, error) {
85 | stsName := fmt.Sprintf(ClusterStatefulSet, ts.Name)
86 | stsObjectKey := client.ObjectKey{
87 | Name: stsName,
88 | Namespace: ts.Namespace,
89 | }
90 |
91 | var sts = &appsv1.StatefulSet{}
92 | if err := r.Get(ctx, stsObjectKey, sts); err != nil {
93 | if apierrors.IsNotFound(err) {
94 | err := r.deleteConfigMap(ctx, cm)
95 | if err != nil {
96 | return nil, 0, err
97 | }
98 | } else {
99 | r.logger.Error(err, fmt.Sprintf("unable to fetch statefulset: %s", stsName))
100 | }
101 |
102 | return nil, 0, err
103 | }
104 |
105 | if replicas == nil {
106 | replicas = sts.Spec.Replicas
107 | }
108 |
109 | nodes, err := r.getNodes(ctx, ts, *replicas, false)
110 | fallback, err := r.getNodes(ctx, ts, *replicas, true)
111 | if err != nil {
112 | return nil, 0, err
113 | }
114 |
115 | availableNodes := len(nodes)
116 | if availableNodes == 0 {
117 | r.logger.V(debugLevel).Info("empty quorum configuration")
118 | return nil, 0, fmt.Errorf("empty quorum configuration")
119 | }
120 |
121 | desired := cm.DeepCopy()
122 | desired.Data = map[string]string{
123 | "nodes": strings.Join(nodes, ","),
124 | "fallback": strings.Join(fallback, ","),
125 | }
126 |
127 | r.logger.V(debugLevel).Info("current quorum configuration", "size", availableNodes, "nodes", nodes)
128 |
129 | if cm.Data["nodes"] != desired.Data["nodes"] || cm.Data["fallback"] != desired.Data["fallback"] {
130 | r.logger.Info("updating quorum configuration", "size", availableNodes, "nodes", nodes)
131 |
132 | err := r.Update(ctx, desired)
133 | if err != nil {
134 | r.logger.Error(err, "updating quorum configuration failed")
135 | return nil, 0, err
136 | }
137 | }
138 |
139 | return desired, availableNodes, nil
140 | }
141 |
142 | func (r *TypesenseClusterReconciler) deleteConfigMap(ctx context.Context, cm *v1.ConfigMap) error {
143 | err := r.Delete(ctx, cm)
144 | if err != nil {
145 | return err
146 | }
147 |
148 | return nil
149 | }
150 |
151 | func (r *TypesenseClusterReconciler) getNodes(ctx context.Context, ts *tsv1alpha1.TypesenseCluster, replicas int32, bootstrapping bool) ([]string, error) {
152 | nodes := make([]string, 0)
153 |
154 | if bootstrapping {
155 | for i := 0; i < int(replicas); i++ {
156 | nodeName := fmt.Sprintf("%s-sts-%d.%s-sts-svc", ts.Name, i, ts.Name)
157 | if len(nodeName) > nodeNameLenLimit {
158 | return nil, fmt.Errorf("raft error: node name should not exceed %d characters: %s", nodeNameLenLimit, nodeName)
159 | }
160 |
161 | nodes = append(nodes, fmt.Sprintf("%s:%d:%d", nodeName, ts.Spec.PeeringPort, ts.Spec.ApiPort))
162 | }
163 |
164 | return nodes, nil
165 | }
166 |
167 | stsName := fmt.Sprintf(ClusterStatefulSet, ts.Name)
168 | stsObjectKey := client.ObjectKey{
169 | Name: stsName,
170 | Namespace: ts.Namespace,
171 | }
172 | sts, err := r.GetFreshStatefulSet(ctx, stsObjectKey)
173 | if err != nil {
174 | return nil, err
175 | }
176 |
177 | slices, err := r.getEndpointSlicesForStatefulSet(ctx, sts)
178 | if err != nil {
179 | return nil, err
180 | }
181 |
182 | i := 0
183 | for _, s := range slices {
184 | for _, e := range s.Endpoints {
185 | addr := e.Addresses[0]
186 | //r.logger.V(debugLevel).Info("discovered slice endpoint", "slice", s.Name, "endpoint", e.Hostname, "address", addr)
187 | nodes = append(nodes, fmt.Sprintf("%s:%d:%d", addr, ts.Spec.PeeringPort, ts.Spec.ApiPort))
188 |
189 | i++
190 | }
191 | }
192 |
193 | return nodes, nil
194 | }
195 |
196 | func (r *TypesenseClusterReconciler) getEndpointSlicesForStatefulSet(ctx context.Context, sts *appsv1.StatefulSet) ([]discoveryv1.EndpointSlice, error) {
197 | r.logger.V(debugLevel).Info("collecting endpoint slices")
198 | svcName := sts.Spec.ServiceName
199 | namespace := sts.Namespace
200 |
201 | // 1) List EndpointSlices for headless Service
202 | var sliceList discoveryv1.EndpointSliceList
203 | if err := r.Client.List(ctx, &sliceList,
204 | client.InNamespace(namespace),
205 | client.MatchingLabels{discoveryv1.LabelServiceName: svcName},
206 | ); err != nil {
207 | return nil, err
208 | }
209 |
210 | // 2) Build a set of “live” Pod IPs for this StatefulSet
211 | selector := labels.SelectorFromSet(sts.Spec.Selector.MatchLabels)
212 | var podList v1.PodList
213 | if err := r.Client.List(ctx, &podList,
214 | client.InNamespace(namespace),
215 | client.MatchingLabelsSelector{Selector: selector},
216 | ); err != nil {
217 | return nil, err
218 | }
219 | liveIPs := map[string]struct{}{}
220 | for _, pod := range podList.Items {
221 | if pod.DeletionTimestamp == nil && (pod.Status.Phase == v1.PodRunning || pod.Status.Phase == v1.PodPending) && pod.Status.PodIP != "" {
222 | liveIPs[pod.Status.PodIP] = struct{}{}
223 | }
224 | }
225 |
226 | // 3) Filter slices: keep only slices that contain at least one endpoint
227 | // whose IP is still in liveIPs
228 | var readySlices []discoveryv1.EndpointSlice
229 | for _, slice := range sliceList.Items {
230 | keep := false
231 | for _, ep := range slice.Endpoints {
232 | // only consider endpoints that reference a Pod and whose IP is still live
233 | if ep.TargetRef != nil &&
234 | ep.TargetRef.Kind == "Pod" &&
235 | len(ep.Addresses) > 0 {
236 | ip := ep.Addresses[0]
237 | if _, ok := liveIPs[ip]; ok {
238 | keep = true
239 | break
240 | }
241 | }
242 | }
243 | if keep {
244 | readySlices = append(readySlices, slice)
245 | }
246 | }
247 |
248 | return readySlices, nil
249 | }
250 |
251 | func (r *TypesenseClusterReconciler) getNodeEndpoint(ts *tsv1alpha1.TypesenseCluster, raftNodeEndpoint string) string {
252 | if hasIP4Prefix(raftNodeEndpoint) {
253 | node := strings.Replace(raftNodeEndpoint, fmt.Sprintf(":%d:%d", ts.Spec.PeeringPort, ts.Spec.ApiPort), "", 1)
254 | return node
255 | }
256 |
257 | node := strings.Replace(raftNodeEndpoint, fmt.Sprintf(":%d:%d", ts.Spec.PeeringPort, ts.Spec.ApiPort), "", 1)
258 | fqdn := fmt.Sprintf("%s.%s-sts-svc.%s.svc.cluster.local", node, ts.Name, ts.Namespace)
259 |
260 | return fqdn
261 | }
262 |
263 | func (r *TypesenseClusterReconciler) getShortName(raftNodeEndpoint string) string {
264 | parts := strings.SplitN(raftNodeEndpoint, ":", 2)
265 | host := parts[0]
266 |
267 | if hasIP4Prefix(host) {
268 | return host
269 | }
270 |
271 | if idx := strings.Index(host, "."); idx != -1 {
272 | return host[:idx]
273 | }
274 |
275 | return host
276 | }
277 |
--------------------------------------------------------------------------------
/internal/controller/typesensecluster_constants.go:
--------------------------------------------------------------------------------
1 | package controller
2 |
3 | const (
4 | ClusterNodesConfigMap = "%s-nodeslist"
5 | ClusterAdminApiKeySecret = "%s-admin-key"
6 | ClusterAdminApiKeySecretKeyName = "typesense-api-key"
7 |
8 | ClusterHeadlessService = "%s-sts-svc"
9 | ClusterRestService = "%s-svc"
10 | ClusterStatefulSet = "%s-sts"
11 | ClusterAppLabel = "%s-sts"
12 |
13 | ClusterReverseProxyAppLabel = "%s-rp"
14 | ClusterReverseProxyIngress = "%s-reverse-proxy"
15 | ClusterReverseProxyConfigMap = "%s-reverse-proxy-config"
16 | ClusterReverseProxy = "%s-reverse-proxy"
17 | ClusterReverseProxyService = "%s-reverse-proxy-svc"
18 |
19 | //TODO Remove them future version 0.2.15
20 |
21 | ClusterPrometheusExporterAppLabel = "%s-prometheus-exporter"
22 | ClusterPrometheusExporterDeployment = "%s-prometheus-exporter"
23 | ClusterPrometheusExporterService = "%s-prometheus-exporter-svc"
24 | ClusterPrometheusExporterServiceMonitor = "%s-prometheus-exporter-servicemonitor"
25 |
26 | ClusterMetricsPodMonitorAppLabel = "%s-sts"
27 | ClusterMetricsPodMonitor = "%s-podmonitor"
28 |
29 | ClusterScraperCronJob = "%s-scraper"
30 | ClusterScraperCronJobContainer = "%s-docsearch-scraper"
31 | )
32 |
--------------------------------------------------------------------------------
/internal/controller/typesensecluster_controller.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2024.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package controller
18 |
19 | import (
20 | "context"
21 | "fmt"
22 | "github.com/go-logr/logr"
23 | "github.com/pkg/errors"
24 | "golang.org/x/text/cases"
25 | "golang.org/x/text/language"
26 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
27 | "k8s.io/apimachinery/pkg/runtime"
28 | "k8s.io/client-go/discovery"
29 | "k8s.io/client-go/tools/record"
30 | ctrl "sigs.k8s.io/controller-runtime"
31 | "sigs.k8s.io/controller-runtime/pkg/builder"
32 | "sigs.k8s.io/controller-runtime/pkg/client"
33 | "sigs.k8s.io/controller-runtime/pkg/event"
34 | "sigs.k8s.io/controller-runtime/pkg/log"
35 | "sigs.k8s.io/controller-runtime/pkg/predicate"
36 | "strings"
37 | "time"
38 |
39 | tsv1alpha1 "github.com/akyriako/typesense-operator/api/v1alpha1"
40 | )
41 |
42 | // TypesenseClusterReconciler reconciles a TypesenseCluster object
43 | type TypesenseClusterReconciler struct {
44 | client.Client
45 | Scheme *runtime.Scheme
46 | logger logr.Logger
47 | Recorder record.EventRecorder
48 | DiscoveryClient *discovery.DiscoveryClient
49 | }
50 |
51 | type TypesenseClusterReconciliationPhase struct {
52 | Name string
53 | Reconcile func(context.Context, *tsv1alpha1.TypesenseCluster) (ctrl.Result, error)
54 | }
55 |
56 | var (
57 | eventFilters = builder.WithPredicates(predicate.Funcs{
58 | UpdateFunc: func(e event.UpdateEvent) bool {
59 | // We only need to check generation changes here, because it is only
60 | // updated on spec changes. On the other hand RevisionVersion
61 | // changes also on status changes. We want to omit reconciliation
62 | // for status updates.
63 | return e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration()
64 | },
65 | DeleteFunc: func(e event.DeleteEvent) bool {
66 | // DeleteStateUnknown evaluates to false only if the object
67 | // has been confirmed as deleted by the api server.
68 | return !e.DeleteStateUnknown
69 | },
70 | })
71 |
72 | requeueAfter = time.Second * 30
73 | )
74 |
75 | // +kubebuilder:rbac:groups=ts.opentelekomcloud.com,resources=typesenseclusters,verbs=get;list;watch;create;update;patch;delete
76 | // +kubebuilder:rbac:groups=ts.opentelekomcloud.com,resources=typesenseclusters/status,verbs=get;update;patch
77 | // +kubebuilder:rbac:groups=ts.opentelekomcloud.com,resources=typesenseclusters/finalizers,verbs=update
78 | // +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;patch;delete
79 | // +kubebuilder:rbac:groups="",resources=configmaps,verbs=get;list;watch;create;update;patch;delete
80 | // +kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch;delete
81 | // +kubebuilder:rbac:groups="",resources=pods,verbs=get;list;watch;delete;update;patch
82 | // +kubebuilder:rbac:groups="",resources=pods/status,verbs=get;update;patch
83 | // +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete
84 | // +kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=get;list;watch;create;update;patch;delete
85 | // +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
86 | // +kubebuilder:rbac:groups=networking.k8s.io,resources=ingresses,verbs=get;list;watch;create;update;patch;delete
87 | // +kubebuilder:rbac:groups=batch,resources=cronjobs,verbs=get;list;watch;create;update;patch;delete
88 | // +kubebuilder:rbac:groups=monitoring.coreos.com,resources=servicemonitors,verbs=get;list;watch;create;update;patch;delete
89 | // +kubebuilder:rbac:groups=monitoring.coreos.com,resources=podmonitors,verbs=get;list;watch;create;update;patch;delete
90 | // +kubebuilder:rbac:groups=discovery.k8s.io,resources=endpointslices,verbs=get;list;watch
91 |
92 | // Reconcile is part of the main kubernetes reconciliation loop which aims to
93 | // move the current state of the cluster closer to the desired state.
94 | // TODO(user): Modify the Reconcile function to compare the state specified by
95 | // the TypesenseCluster object against the actual cluster state, and then
96 | // perform operations to make the cluster state reflect the state specified by
97 | // the user.
98 | //
99 | // For more details, check Reconcile and its Result here:
100 | // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.18.4/pkg/reconcile
101 | func (r *TypesenseClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
102 | r.logger = log.Log.WithValues("namespace", req.Namespace, "cluster", req.Name)
103 | r.logger.Info("reconciling cluster")
104 |
105 | var ts tsv1alpha1.TypesenseCluster
106 | if err := r.Get(ctx, req.NamespacedName, &ts); err != nil {
107 | return ctrl.Result{}, client.IgnoreNotFound(err)
108 | }
109 |
110 | err := r.initConditions(ctx, &ts)
111 | if err != nil {
112 | return ctrl.Result{}, err
113 | }
114 |
115 | // Update strategy: Admin Secret is Immutable, will not be updated on any future change
116 | secret, err := r.ReconcileSecret(ctx, ts)
117 | if err != nil {
118 | cerr := r.setConditionNotReady(ctx, &ts, ConditionReasonSecretNotReady, err)
119 | if cerr != nil {
120 | err = errors.Wrap(err, cerr.Error())
121 | }
122 | return ctrl.Result{}, err
123 | }
124 |
125 | // Update strategy: Update the existing object, if changes are identified in the desired.Data["nodes"]
126 | updated, err := r.ReconcileConfigMap(ctx, ts)
127 | if err != nil {
128 | cerr := r.setConditionNotReady(ctx, &ts, ConditionReasonConfigMapNotReady, err)
129 | if cerr != nil {
130 | err = errors.Wrap(err, cerr.Error())
131 | }
132 | return ctrl.Result{}, err
133 | }
134 |
135 | // Update strategy: Update the existing objects, if changes are identified in api and peering ports
136 | err = r.ReconcileServices(ctx, ts)
137 | if err != nil {
138 | cerr := r.setConditionNotReady(ctx, &ts, ConditionReasonServicesNotReady, err)
139 | if cerr != nil {
140 | err = errors.Wrap(err, cerr.Error())
141 | }
142 | return ctrl.Result{}, err
143 | }
144 |
145 | // Update strategy: Update the existing objects, if changes are identified in api and peering ports
146 | err = r.ReconcileIngress(ctx, ts)
147 | if err != nil {
148 | cerr := r.setConditionNotReady(ctx, &ts, ConditionReasonIngressNotReady, err)
149 | if cerr != nil {
150 | err = errors.Wrap(err, cerr.Error())
151 | }
152 | return ctrl.Result{}, err
153 | }
154 |
155 | // Update strategy: Drop the existing objects and recreate them, if changes are identified
156 | err = r.ReconcileScraper(ctx, ts)
157 | if err != nil {
158 | cerr := r.setConditionNotReady(ctx, &ts, ConditionReasonScrapersNotReady, err)
159 | if cerr != nil {
160 | err = errors.Wrap(err, cerr.Error())
161 | }
162 | return ctrl.Result{}, err
163 | }
164 |
165 | // Update strategy: Update the Deployment if image changed. Drop the existing ServiceMonitor and recreate it, if changes are identified
166 | err = r.ReconcilePodMonitor(ctx, ts)
167 | if err != nil {
168 | cerr := r.setConditionNotReady(ctx, &ts, ConditionReasonMetricsExporterNotReady, err)
169 | if cerr != nil {
170 | err = errors.Wrap(err, cerr.Error())
171 | }
172 | return ctrl.Result{}, err
173 | }
174 |
175 | // Update strategy: Update the whole specs when changes are identified
176 | // Update the whole specs when changes are identified
177 | sts, err := r.ReconcileStatefulSet(ctx, &ts)
178 | if err != nil {
179 | cerr := r.setConditionNotReady(ctx, &ts, ConditionReasonStatefulSetNotReady, err)
180 | if cerr != nil {
181 | err = errors.Wrap(err, cerr.Error())
182 | }
183 | return ctrl.Result{}, err
184 | }
185 |
186 | terminationGracePeriodSeconds := *sts.Spec.Template.Spec.TerminationGracePeriodSeconds
187 | toTitle := func(s string) string {
188 | return cases.Title(language.Und, cases.NoLower).String(s)
189 | }
190 |
191 | cond := ConditionReasonQuorumStateUnknown
192 | if *updated {
193 | condition, _, err := r.ReconcileQuorum(ctx, &ts, secret, client.ObjectKeyFromObject(sts))
194 | if err != nil {
195 | r.logger.Error(err, "reconciling quorum health failed")
196 | }
197 |
198 | if strings.Contains(string(condition), "QuorumNeedsAttention") {
199 | eram := "cluster needs manual administrative attention: "
200 |
201 | if condition == ConditionReasonQuorumNeedsAttentionClusterIsLagging {
202 | eram += "queued_writes > healthyWriteLagThreshold"
203 | }
204 |
205 | if condition == ConditionReasonQuorumNeedsAttentionMemoryOrDiskIssue {
206 | eram += "out of memory or disk"
207 | }
208 |
209 | erram := errors.New(eram)
210 | cerr := r.setConditionNotReady(ctx, &ts, string(condition), erram)
211 | if cerr != nil {
212 | return ctrl.Result{}, cerr
213 | }
214 | r.Recorder.Eventf(&ts, "Warning", string(condition), toTitle(erram.Error()))
215 |
216 | } else {
217 | if condition != ConditionReasonQuorumReady {
218 | if err == nil {
219 | err = errors.New("quorum is not ready")
220 | }
221 | cerr := r.setConditionNotReady(ctx, &ts, string(condition), err)
222 | if cerr != nil {
223 | return ctrl.Result{}, cerr
224 | }
225 |
226 | r.Recorder.Eventf(&ts, "Warning", string(condition), toTitle(err.Error()))
227 | } else {
228 | report := ts.Status.Conditions[0].Status != metav1.ConditionTrue
229 |
230 | cerr := r.setConditionReady(ctx, &ts, string(condition))
231 | if cerr != nil {
232 | return ctrl.Result{}, cerr
233 | }
234 |
235 | if report {
236 | r.Recorder.Eventf(&ts, "Normal", string(condition), toTitle("quorum is ready"))
237 | }
238 | }
239 | }
240 | cond = condition
241 | }
242 |
243 | lastAction := "bootstrapping"
244 | if *updated {
245 | lastAction = "reconciling"
246 | }
247 | requeueAfter = time.Duration(60+terminationGracePeriodSeconds) * time.Second
248 | r.logger.Info(fmt.Sprintf("%s cluster completed", lastAction), "condition", cond, "requeueAfter", requeueAfter)
249 |
250 | return ctrl.Result{RequeueAfter: requeueAfter}, nil
251 | }
252 |
253 | // SetupWithManager sets up the controller with the Manager.
254 | func (r *TypesenseClusterReconciler) SetupWithManager(mgr ctrl.Manager) error {
255 | return ctrl.NewControllerManagedBy(mgr).
256 | For(&tsv1alpha1.TypesenseCluster{}, eventFilters).
257 | Complete(r)
258 | }
259 |
--------------------------------------------------------------------------------
/internal/controller/typesensecluster_controller_test.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2024.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package controller
18 |
19 | import (
20 | "context"
21 |
22 | . "github.com/onsi/ginkgo/v2"
23 | . "github.com/onsi/gomega"
24 | "k8s.io/apimachinery/pkg/api/errors"
25 | "k8s.io/apimachinery/pkg/types"
26 | "sigs.k8s.io/controller-runtime/pkg/reconcile"
27 |
28 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
29 |
30 | tsv1alpha1 "github.com/akyriako/typesense-operator/api/v1alpha1"
31 | )
32 |
33 | var _ = Describe("TypesenseCluster Controller", func() {
34 | Context("When reconciling a resource", func() {
35 | const resourceName = "test-resource"
36 |
37 | ctx := context.Background()
38 |
39 | typeNamespacedName := types.NamespacedName{
40 | Name: resourceName,
41 | Namespace: "default", // TODO(user):Modify as needed
42 | }
43 | typesensecluster := &tsv1alpha1.TypesenseCluster{}
44 |
45 | BeforeEach(func() {
46 | By("creating the custom resource for the Kind TypesenseCluster")
47 | err := k8sClient.Get(ctx, typeNamespacedName, typesensecluster)
48 | if err != nil && errors.IsNotFound(err) {
49 | resource := &tsv1alpha1.TypesenseCluster{
50 | ObjectMeta: metav1.ObjectMeta{
51 | Name: resourceName,
52 | Namespace: "default",
53 | },
54 | // TODO(user): Specify other spec details if needed.
55 | }
56 | Expect(k8sClient.Create(ctx, resource)).To(Succeed())
57 | }
58 | })
59 |
60 | AfterEach(func() {
61 | // TODO(user): Cleanup logic after each test, like removing the resource instance.
62 | resource := &tsv1alpha1.TypesenseCluster{}
63 | err := k8sClient.Get(ctx, typeNamespacedName, resource)
64 | Expect(err).NotTo(HaveOccurred())
65 |
66 | By("Cleanup the specific resource instance TypesenseCluster")
67 | Expect(k8sClient.Delete(ctx, resource)).To(Succeed())
68 | })
69 | It("should successfully reconcile the resource", func() {
70 | By("Reconciling the created resource")
71 | controllerReconciler := &TypesenseClusterReconciler{
72 | Client: k8sClient,
73 | Scheme: k8sClient.Scheme(),
74 | }
75 |
76 | _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{
77 | NamespacedName: typeNamespacedName,
78 | })
79 | Expect(err).NotTo(HaveOccurred())
80 | // TODO(user): Add more specific assertions depending on your controller's reconciliation logic.
81 | // Example: If you expect a certain status condition after reconciliation, verify it here.
82 | })
83 | })
84 | })
85 |
--------------------------------------------------------------------------------
/internal/controller/typesensecluster_helpers.go:
--------------------------------------------------------------------------------
1 | package controller
2 |
3 | import (
4 | "context"
5 | tsv1alpha1 "github.com/akyriako/typesense-operator/api/v1alpha1"
6 | "sigs.k8s.io/controller-runtime/pkg/client"
7 | )
8 |
9 | func (r *TypesenseClusterReconciler) patchStatus(
10 | ctx context.Context,
11 | ts *tsv1alpha1.TypesenseCluster,
12 | patcher func(status *tsv1alpha1.TypesenseClusterStatus),
13 | ) error {
14 | patch := client.MergeFrom(ts.DeepCopy())
15 | patcher(&ts.Status)
16 |
17 | err := r.Status().Patch(ctx, ts, patch)
18 | if err != nil {
19 | r.logger.Error(err, "unable to patch typesense cluster status")
20 | return err
21 | }
22 |
23 | return nil
24 | }
25 |
--------------------------------------------------------------------------------
/internal/controller/typesensecluster_podmonitor.go:
--------------------------------------------------------------------------------
1 | package controller
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | tsv1alpha1 "github.com/akyriako/typesense-operator/api/v1alpha1"
7 | monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
8 | appsv1 "k8s.io/api/apps/v1"
9 | apierrors "k8s.io/apimachinery/pkg/api/errors"
10 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
11 | ctrl "sigs.k8s.io/controller-runtime"
12 | "sigs.k8s.io/controller-runtime/pkg/client"
13 | )
14 |
15 | const prometheusApiGroup = "monitoring.coreos.com"
16 |
17 | func (r *TypesenseClusterReconciler) ReconcilePodMonitor(ctx context.Context, ts tsv1alpha1.TypesenseCluster) error {
18 | // TODO Remove in future version 0.2.15+
19 | r.deleteMetricsExporterServiceMonitor(ctx, ts)
20 |
21 | if deployed, err := r.IsPrometheusDeployed(); err != nil || !deployed {
22 | if ts.Spec.Metrics != nil {
23 | err := fmt.Errorf("prometheus api group %s was not found in cluster", prometheusApiGroup)
24 | r.logger.Error(err, "reconciling podmonitor skipped")
25 | }
26 | return nil
27 | }
28 |
29 | r.logger.V(debugLevel).Info("reconciling podmonitor")
30 |
31 | podMonitorName := fmt.Sprintf(ClusterMetricsPodMonitor, ts.Name)
32 | podMonitorExists := true
33 | podMonitorObjectKey := client.ObjectKey{Namespace: ts.Namespace, Name: podMonitorName}
34 |
35 | var podMonitor = &monitoringv1.PodMonitor{}
36 | if err := r.Get(ctx, podMonitorObjectKey, podMonitor); err != nil {
37 | if apierrors.IsNotFound(err) {
38 | podMonitorExists = false
39 | } else {
40 | r.logger.Error(err, fmt.Sprintf("unable to fetch podmonitor: %s", podMonitorName))
41 | return err
42 | }
43 | }
44 |
45 | if ts.Spec.Metrics == nil {
46 | if podMonitorExists {
47 | err := r.deleteMetricsExporterPodMonitor(ctx, podMonitor)
48 | if err != nil {
49 | return err
50 | }
51 | }
52 |
53 | return nil
54 | }
55 |
56 | if !podMonitorExists {
57 | r.logger.V(debugLevel).Info("creating podmonitor", "podmonitor", podMonitorObjectKey.Name)
58 |
59 | err := r.createMetricsExporterPodMonitor(ctx, podMonitorObjectKey, &ts)
60 | if err != nil {
61 | r.logger.Error(err, "creating podmonitor failed", "podmonitor", podMonitorObjectKey.Name)
62 | return err
63 | }
64 | } else {
65 | if ts.Spec.Metrics.Release != podMonitor.ObjectMeta.Labels["release"] || monitoringv1.Duration(fmt.Sprintf("%ds", ts.Spec.Metrics.IntervalInSeconds)) != podMonitor.Spec.PodMetricsEndpoints[0].Interval {
66 | r.logger.V(debugLevel).Info("updating podmonitor", "podmonitor", podMonitorObjectKey.Name)
67 |
68 | err := r.deleteMetricsExporterPodMonitor(ctx, podMonitor)
69 | if err != nil {
70 | r.logger.Error(err, "deleting podmonitor failed", "podmonitor", podMonitorObjectKey.Name)
71 | return err
72 | }
73 |
74 | err = r.createMetricsExporterPodMonitor(ctx, podMonitorObjectKey, &ts)
75 | if err != nil {
76 | r.logger.Error(err, "creating podmonitor failed", "podmonitor", podMonitorObjectKey.Name)
77 | return err
78 | }
79 | }
80 | }
81 |
82 | return nil
83 | }
84 |
85 | func (r *TypesenseClusterReconciler) createMetricsExporterPodMonitor(ctx context.Context, key client.ObjectKey, ts *tsv1alpha1.TypesenseCluster) error {
86 | objectMeta := getPodMonitorObjectMeta(ts, &key.Name, nil)
87 | objectMeta.Labels["release"] = ts.Spec.Metrics.Release
88 |
89 | podMonitor := &monitoringv1.PodMonitor{
90 | ObjectMeta: objectMeta,
91 | Spec: monitoringv1.PodMonitorSpec{
92 | Selector: metav1.LabelSelector{
93 | MatchLabels: getLabels(ts),
94 | },
95 | NamespaceSelector: monitoringv1.NamespaceSelector{
96 | MatchNames: []string{ts.Namespace},
97 | },
98 | PodMetricsEndpoints: []monitoringv1.PodMetricsEndpoint{
99 | {
100 | Port: "metrics",
101 | Path: "/metrics",
102 | Interval: monitoringv1.Duration(fmt.Sprintf("%ds", ts.Spec.Metrics.IntervalInSeconds)),
103 | Scheme: "http",
104 | },
105 | },
106 | },
107 | }
108 |
109 | err := ctrl.SetControllerReference(ts, podMonitor, r.Scheme)
110 | if err != nil {
111 | return err
112 | }
113 |
114 | err = r.Create(ctx, podMonitor)
115 | if err != nil {
116 | return err
117 | }
118 |
119 | return nil
120 | }
121 |
122 | func (r *TypesenseClusterReconciler) deleteMetricsExporterPodMonitor(ctx context.Context, podMonitor *monitoringv1.PodMonitor) error {
123 | err := r.Delete(ctx, podMonitor)
124 | if err != nil {
125 | return err
126 | }
127 |
128 | return nil
129 | }
130 |
131 | func (r *TypesenseClusterReconciler) IsPrometheusDeployed() (bool, error) {
132 | apiGroupList, err := r.DiscoveryClient.ServerGroups()
133 | if err != nil {
134 | return false, err
135 | }
136 |
137 | for _, apiGroup := range apiGroupList.Groups {
138 | if apiGroup.Name == prometheusApiGroup {
139 | return true, nil
140 | }
141 | }
142 |
143 | return false, nil
144 | }
145 |
146 | // TODO Remove in future version 0.2.15+
147 | func (r *TypesenseClusterReconciler) deleteMetricsExporterServiceMonitor(ctx context.Context, ts tsv1alpha1.TypesenseCluster) {
148 | deploymentName := fmt.Sprintf(ClusterPrometheusExporterDeployment, ts.Name)
149 | deploymentExists := true
150 | deploymentObjectKey := client.ObjectKey{Namespace: ts.Namespace, Name: deploymentName}
151 |
152 | var deployment = &appsv1.Deployment{}
153 | if err := r.Get(ctx, deploymentObjectKey, deployment); err != nil {
154 | if apierrors.IsNotFound(err) {
155 | deploymentExists = false
156 | } else {
157 | r.logger.V(debugLevel).Error(err, fmt.Sprintf("unable to fetch metrics exporter deployment: %s", deploymentName))
158 | }
159 | }
160 |
161 | if deploymentExists {
162 | err := r.deleteMetricsExporterDeployment(ctx, deployment)
163 | if err != nil {
164 | r.logger.V(debugLevel).Error(err, fmt.Sprintf("unable to cleanup metrics exporter deployment: %s", deploymentName))
165 | }
166 | }
167 | }
168 |
169 | // TODO Remove in future version 0.2.15+
170 | func (r *TypesenseClusterReconciler) deleteMetricsExporterDeployment(ctx context.Context, deployment *appsv1.Deployment) error {
171 | err := r.Delete(ctx, deployment)
172 | if err != nil {
173 | return err
174 | }
175 |
176 | return nil
177 | }
178 |
--------------------------------------------------------------------------------
/internal/controller/typesensecluster_quorum.go:
--------------------------------------------------------------------------------
1 | package controller
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | tsv1alpha1 "github.com/akyriako/typesense-operator/api/v1alpha1"
7 | v1 "k8s.io/api/core/v1"
8 | "k8s.io/utils/ptr"
9 | "net/http"
10 | "sigs.k8s.io/controller-runtime/pkg/client"
11 | "sort"
12 | "strconv"
13 | "time"
14 | )
15 |
16 | const (
17 | QuorumReadinessGateCondition = "RaftQuorumReady"
18 | HealthyWriteLagKey = "TYPESENSE_HEALTHY_WRITE_LAG"
19 | HealthyWriteLagDefaultValue = 500
20 | HealthyReadLagKey = "TYPESENSE_HEALTHY_READ_LAG"
21 | HealthyReadLagDefaultValue = 1000
22 | )
23 |
24 | func (r *TypesenseClusterReconciler) ReconcileQuorum(ctx context.Context, ts *tsv1alpha1.TypesenseCluster, secret *v1.Secret, stsObjectKey client.ObjectKey) (ConditionQuorum, int, error) {
25 | r.logger.Info("reconciling quorum health")
26 |
27 | sts, err := r.GetFreshStatefulSet(ctx, stsObjectKey)
28 | if err != nil {
29 | return ConditionReasonQuorumNotReady, 0, err
30 | }
31 |
32 | quorum, err := r.getQuorum(ctx, ts, sts)
33 | if err != nil {
34 | return ConditionReasonQuorumNotReady, 0, err
35 | }
36 |
37 | r.logger.Info("calculated quorum", "minRequiredNodes", quorum.MinRequiredNodes, "availableNodes", quorum.AvailableNodes)
38 |
39 | nodesStatus := make(map[string]NodeStatus)
40 | httpClient := &http.Client{
41 | Timeout: 500 * time.Millisecond,
42 | }
43 |
44 | queuedWrites := 0
45 | healthyWriteLagThreshold := r.getHealthyWriteLagThreshold(ctx, ts)
46 |
47 | nodeKeys := make([]string, 0, len(quorum.Nodes))
48 | for k := range quorum.Nodes {
49 | nodeKeys = append(nodeKeys, k)
50 | }
51 | sort.Strings(nodeKeys)
52 |
53 | //quorum.Nodes is coming straight from the PodList of Statefulset
54 | for _, key := range nodeKeys {
55 | node := key
56 | ip := quorum.Nodes[key]
57 | ne := NodeEndpoint{
58 | PodName: node,
59 | IP: ip,
60 | }
61 |
62 | status, err := r.getNodeStatus(ctx, httpClient, ne, ts, secret)
63 | if err != nil {
64 | r.logger.Error(err, "fetching node status failed", "node", r.getShortName(node), "ip", ip)
65 | }
66 |
67 | if status.QueuedWrites > 0 && queuedWrites < status.QueuedWrites {
68 | queuedWrites = status.QueuedWrites
69 | }
70 |
71 | r.logger.V(debugLevel).Info(
72 | "reporting node status",
73 | "node",
74 | r.getShortName(node),
75 | "state",
76 | status.State,
77 | "ip",
78 | ip,
79 | "queued_writes",
80 | status.QueuedWrites,
81 | "commited_index",
82 | status.CommittedIndex,
83 | )
84 | nodesStatus[node] = status
85 | }
86 |
87 | clusterStatus := r.getClusterStatus(nodesStatus)
88 | r.logger.V(debugLevel).Info("reporting cluster status", "status", clusterStatus)
89 |
90 | if clusterStatus == ClusterStatusSplitBrain {
91 | return r.downgradeQuorum(ctx, ts, quorum.NodesListConfigMap, stsObjectKey, sts.Status.ReadyReplicas, int32(quorum.MinRequiredNodes))
92 | }
93 |
94 | clusterNeedsAttention := false
95 | nodesHealth := make(map[string]bool)
96 |
97 | for o, key := range nodeKeys {
98 | node := key
99 | ip := quorum.Nodes[key]
100 | ne := NodeEndpoint{
101 | PodName: node,
102 | IP: ip,
103 | }
104 | nodeStatus := nodesStatus[node]
105 |
106 | condition := r.calculatePodReadinessGate(ctx, httpClient, ne, nodeStatus, ts)
107 | if condition.Reason == string(nodeNotRecoverable) {
108 | clusterNeedsAttention = true
109 | }
110 |
111 | nodesHealth[node], _ = strconv.ParseBool(string(condition.Status))
112 |
113 | podName := fmt.Sprintf("%s-%d", fmt.Sprintf(ClusterStatefulSet, ts.Name), o)
114 | podObjectKey := client.ObjectKey{Namespace: ts.Namespace, Name: podName}
115 |
116 | err = r.updatePodReadinessGate(ctx, podObjectKey, condition)
117 | if err != nil {
118 | r.logger.Error(err, fmt.Sprintf("unable to update statefulset pod: %s", podObjectKey.Name))
119 | return ConditionReasonQuorumNotReady, 0, err
120 | }
121 | }
122 |
123 | if clusterNeedsAttention {
124 | return ConditionReasonQuorumNeedsAttentionMemoryOrDiskIssue, 0, nil
125 | }
126 |
127 | minRequiredNodes := quorum.MinRequiredNodes
128 | availableNodes := quorum.AvailableNodes
129 | healthyNodes := availableNodes
130 |
131 | for _, healthy := range nodesHealth {
132 | if !healthy {
133 | healthyNodes--
134 | }
135 | }
136 |
137 | r.logger.Info("evaluated quorum", "minRequiredNodes", minRequiredNodes, "availableNodes", availableNodes, "healthyNodes", healthyNodes)
138 |
139 | if queuedWrites > healthyWriteLagThreshold {
140 | return ConditionReasonQuorumNeedsAttentionClusterIsLagging, 0, nil
141 | }
142 |
143 | if clusterStatus == ClusterStatusElectionDeadlock {
144 | return r.downgradeQuorum(ctx, ts, quorum.NodesListConfigMap, stsObjectKey, int32(healthyNodes), int32(minRequiredNodes))
145 | }
146 |
147 | if clusterStatus == ClusterStatusNotReady {
148 | if availableNodes == 1 {
149 |
150 | podName := fmt.Sprintf("%s-%d", fmt.Sprintf(ClusterStatefulSet, ts.Name), 0)
151 | nodeStatus := nodesStatus[podName]
152 | state := nodeStatus.State
153 |
154 | if state == ErrorState || state == UnreachableState {
155 | r.logger.Info("purging quorum")
156 | err := r.PurgeStatefulSetPods(ctx, sts)
157 | if err != nil {
158 | return ConditionReasonQuorumNotReady, 0, err
159 | }
160 |
161 | return ConditionReasonQuorumNotReady, 0, nil
162 | }
163 |
164 | return ConditionReasonQuorumNotReadyWaitATerm, 0, nil
165 | }
166 |
167 | if minRequiredNodes > healthyNodes {
168 | return ConditionReasonQuorumNotReadyWaitATerm, 0, nil
169 | }
170 | }
171 |
172 | if clusterStatus == ClusterStatusOK && *sts.Spec.Replicas < ts.Spec.Replicas {
173 | if queuedWrites > 0 {
174 | return ConditionReasonQuorumQueuedWrites, 0, nil
175 | }
176 |
177 | return r.upgradeQuorum(ctx, ts, quorum.NodesListConfigMap, stsObjectKey)
178 | }
179 |
180 | if healthyNodes < minRequiredNodes {
181 | return ConditionReasonQuorumNotReady, 0, nil
182 | }
183 |
184 | return ConditionReasonQuorumReady, 0, nil
185 | }
186 |
187 | func (r *TypesenseClusterReconciler) downgradeQuorum(
188 | ctx context.Context,
189 | ts *tsv1alpha1.TypesenseCluster,
190 | cm *v1.ConfigMap,
191 | stsObjectKey client.ObjectKey,
192 | healthyNodes, minRequiredNodes int32,
193 | ) (ConditionQuorum, int, error) {
194 | r.logger.Info("downgrading quorum")
195 |
196 | sts, err := r.GetFreshStatefulSet(ctx, stsObjectKey)
197 | if err != nil {
198 | return ConditionReasonQuorumNotReady, 0, err
199 | }
200 |
201 | if healthyNodes == 0 && minRequiredNodes == 1 {
202 | r.logger.Info("purging quorum")
203 | err := r.PurgeStatefulSetPods(ctx, sts)
204 | if err != nil {
205 | return ConditionReasonQuorumNotReady, 0, err
206 | }
207 |
208 | return ConditionReasonQuorumNotReady, 0, nil
209 | }
210 |
211 | desiredReplicas := int32(1)
212 |
213 | err = r.ScaleStatefulSet(ctx, stsObjectKey, desiredReplicas)
214 | if err != nil {
215 | return ConditionReasonQuorumNotReady, 0, err
216 | }
217 |
218 | _, size, err := r.updateConfigMap(ctx, ts, cm, ptr.To[int32](desiredReplicas))
219 | if err != nil {
220 | return ConditionReasonQuorumNotReady, 0, err
221 | }
222 |
223 | return ConditionReasonQuorumDowngraded, size, nil
224 | }
225 |
226 | func (r *TypesenseClusterReconciler) upgradeQuorum(
227 | ctx context.Context,
228 | ts *tsv1alpha1.TypesenseCluster,
229 | cm *v1.ConfigMap,
230 | stsObjectKey client.ObjectKey,
231 | ) (ConditionQuorum, int, error) {
232 | r.logger.Info("upgrading quorum", "incremental", ts.Spec.IncrementalQuorumRecovery)
233 |
234 | sts, err := r.GetFreshStatefulSet(ctx, stsObjectKey)
235 | if err != nil {
236 | return ConditionReasonQuorumNotReady, 0, err
237 | }
238 | size := ts.Spec.Replicas
239 | if ts.Spec.IncrementalQuorumRecovery {
240 | size = sts.Status.Replicas + 1
241 | }
242 |
243 | err = r.ScaleStatefulSet(ctx, stsObjectKey, size)
244 | if err != nil {
245 | return ConditionReasonQuorumNotReady, 0, err
246 | }
247 |
248 | _, _, err = r.updateConfigMap(ctx, ts, cm, &size)
249 | if err != nil {
250 | return ConditionReasonQuorumNotReady, 0, err
251 | }
252 |
253 | return ConditionReasonQuorumUpgraded, int(size), nil
254 | }
255 |
256 | type readinessGateReason string
257 |
258 | const (
259 | nodeHealthy readinessGateReason = "NodeHealthy"
260 | nodeNotHealthy readinessGateReason = "NodeNotHealthy"
261 | nodeNotRecoverable readinessGateReason = "NodeNotRecoverable"
262 | )
263 |
264 | func (r *TypesenseClusterReconciler) calculatePodReadinessGate(ctx context.Context, httpClient *http.Client, node NodeEndpoint, nodeStatus NodeStatus, ts *tsv1alpha1.TypesenseCluster) *v1.PodCondition {
265 | conditionReason := nodeHealthy
266 | conditionMessage := fmt.Sprintf("node's role is now: %s", nodeStatus.State)
267 | conditionStatus := v1.ConditionTrue
268 |
269 | health, err := r.getNodeHealth(ctx, httpClient, node, ts)
270 | if err != nil {
271 | conditionReason = nodeNotHealthy
272 | conditionStatus = v1.ConditionFalse
273 |
274 | r.logger.Error(err, "fetching node health failed", "node", r.getShortName(node.PodName), "ip", node.IP)
275 | } else {
276 | if !health.Ok {
277 | if health.ResourceError != nil && (*health.ResourceError == OutOfMemory || *health.ResourceError == OutOfDisk) {
278 | conditionReason = nodeNotRecoverable
279 | conditionMessage = fmt.Sprintf("node is failing: %s", string(*health.ResourceError))
280 | conditionStatus = v1.ConditionFalse
281 |
282 | err := fmt.Errorf("health check reported a blocking node error on %s: %s", r.getShortName(node.PodName), string(*health.ResourceError))
283 | r.logger.Error(err, "quorum cannot be recovered automatically")
284 | }
285 |
286 | conditionReason = nodeNotHealthy
287 | conditionStatus = v1.ConditionFalse
288 | }
289 | }
290 |
291 | r.logger.V(debugLevel).Info("reporting node health", "node", r.getShortName(node.PodName), "healthy", health.Ok, "ip", node.IP)
292 | condition := &v1.PodCondition{
293 | Type: QuorumReadinessGateCondition,
294 | Status: conditionStatus,
295 | Reason: string(conditionReason),
296 | Message: conditionMessage,
297 | }
298 |
299 | return condition
300 | }
301 |
302 | func (r *TypesenseClusterReconciler) updatePodReadinessGate(ctx context.Context, podObjectKey client.ObjectKey, condition *v1.PodCondition) error {
303 |
304 | pod := &v1.Pod{}
305 | err := r.Get(ctx, podObjectKey, pod)
306 | if err != nil {
307 | r.logger.Error(err, fmt.Sprintf("unable to fetch statefulset pod: %s", podObjectKey.Name))
308 | return nil
309 | }
310 |
311 | patch := client.MergeFrom(pod.DeepCopy())
312 |
313 | found := false
314 | var updatedConditions []v1.PodCondition
315 | for _, c := range pod.Status.Conditions {
316 | if c.Type == condition.Type {
317 | if !found {
318 | updatedConditions = append(updatedConditions, *condition)
319 | found = true
320 | }
321 | } else {
322 | updatedConditions = append(updatedConditions, c)
323 | }
324 | }
325 | if !found {
326 | updatedConditions = append(updatedConditions, *condition)
327 | }
328 |
329 | pod.Status.Conditions = updatedConditions
330 |
331 | if err := r.Status().Patch(ctx, pod, patch); err != nil {
332 | r.logger.Error(err, "updating pod readiness gate condition failed", "pod", pod.Name)
333 | return err
334 | }
335 |
336 | return nil
337 | }
338 |
--------------------------------------------------------------------------------
/internal/controller/typesensecluster_quorum_helpers.go:
--------------------------------------------------------------------------------
1 | package controller
2 |
3 | import (
4 | "context"
5 | "encoding/json"
6 | "fmt"
7 | tsv1alpha1 "github.com/akyriako/typesense-operator/api/v1alpha1"
8 | "io"
9 | appsv1 "k8s.io/api/apps/v1"
10 | v1 "k8s.io/api/core/v1"
11 | "k8s.io/apimachinery/pkg/labels"
12 | "net"
13 | "net/http"
14 | "sigs.k8s.io/controller-runtime/pkg/client"
15 | "strconv"
16 | "strings"
17 | )
18 |
19 | func (r *TypesenseClusterReconciler) getNodeStatus(ctx context.Context, httpClient *http.Client, node NodeEndpoint, ts *tsv1alpha1.TypesenseCluster, secret *v1.Secret) (NodeStatus, error) {
20 | fqdn := r.getNodeEndpoint(ts, node.IP.String())
21 | url := fmt.Sprintf("http://%s:%d/status", fqdn, ts.Spec.ApiPort)
22 |
23 | req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
24 | if err != nil {
25 | r.logger.Error(err, "creating request failed")
26 | return NodeStatus{State: ErrorState}, nil
27 | }
28 |
29 | apiKey := secret.Data[ClusterAdminApiKeySecretKeyName]
30 | req.Header.Set("x-typesense-api-key", string(apiKey))
31 |
32 | resp, err := httpClient.Do(req)
33 | if err != nil {
34 | r.logger.Error(err, "request failed")
35 | return NodeStatus{State: UnreachableState}, nil
36 | }
37 | defer resp.Body.Close()
38 |
39 | if resp.StatusCode != http.StatusOK {
40 | r.logger.Error(err, "error executing request", "httpStatusCode", resp.StatusCode)
41 | }
42 |
43 | body, err := io.ReadAll(resp.Body)
44 | if err != nil {
45 | return NodeStatus{State: ErrorState}, nil
46 | }
47 |
48 | var nodeStatus NodeStatus
49 | err = json.Unmarshal(body, &nodeStatus)
50 | if err != nil {
51 | return NodeStatus{State: ErrorState}, nil
52 | }
53 |
54 | return nodeStatus, nil
55 | }
56 |
57 | func (r *TypesenseClusterReconciler) getClusterStatus(nodesStatus map[string]NodeStatus) ClusterStatus {
58 | leaderNodes := 0
59 | notReadyNodes := 0
60 | availableNodes := len(nodesStatus)
61 | minRequiredNodes := getMinimumRequiredNodes(availableNodes)
62 |
63 | for _, nodeStatus := range nodesStatus {
64 | if nodeStatus.State == LeaderState {
65 | leaderNodes++
66 | }
67 |
68 | if nodeStatus.State == NotReadyState || nodeStatus.State == UnreachableState {
69 | notReadyNodes++
70 | }
71 | }
72 |
73 | if leaderNodes > 1 {
74 | return ClusterStatusSplitBrain
75 | }
76 |
77 | if leaderNodes == 0 {
78 | if availableNodes == 1 {
79 | return ClusterStatusNotReady
80 | } // here is setting as not ready even if the single node returns state ERROR
81 | return ClusterStatusElectionDeadlock
82 | }
83 |
84 | if leaderNodes == 1 {
85 | if minRequiredNodes > (availableNodes - notReadyNodes) {
86 | return ClusterStatusNotReady
87 | }
88 | return ClusterStatusOK
89 | }
90 |
91 | return ClusterStatusNotReady
92 | }
93 |
94 | func (r *TypesenseClusterReconciler) getNodeHealth(ctx context.Context, httpClient *http.Client, node NodeEndpoint, ts *tsv1alpha1.TypesenseCluster) (NodeHealth, error) {
95 | fqdn := r.getNodeEndpoint(ts, node.IP.String())
96 | url := fmt.Sprintf("http://%s:%d/health", fqdn, ts.Spec.ApiPort)
97 |
98 | req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
99 | if err != nil {
100 | r.logger.Error(err, "creating request failed")
101 | return NodeHealth{Ok: false}, nil
102 | }
103 |
104 | resp, err := httpClient.Do(req)
105 | if err != nil {
106 | r.logger.Error(err, "request failed")
107 | return NodeHealth{Ok: false}, nil
108 | }
109 | defer resp.Body.Close()
110 |
111 | body, err := io.ReadAll(resp.Body)
112 | if err != nil {
113 | return NodeHealth{Ok: false}, nil
114 | }
115 |
116 | var nodeHealth NodeHealth
117 | err = json.Unmarshal(body, &nodeHealth)
118 | if err != nil {
119 | return NodeHealth{Ok: false}, nil
120 | }
121 |
122 | return nodeHealth, nil
123 | }
124 |
125 | func (r *TypesenseClusterReconciler) getQuorum(ctx context.Context, ts *tsv1alpha1.TypesenseCluster, sts *appsv1.StatefulSet) (*Quorum, error) {
126 | configMapName := fmt.Sprintf(ClusterNodesConfigMap, ts.Name)
127 | configMapObjectKey := client.ObjectKey{Namespace: ts.Namespace, Name: configMapName}
128 |
129 | var cm = &v1.ConfigMap{}
130 | if err := r.Get(ctx, configMapObjectKey, cm); err != nil {
131 | r.logger.Error(err, fmt.Sprintf("unable to fetch config map: %s", configMapName))
132 | return &Quorum{}, err
133 | }
134 |
135 | nodes := strings.Split(cm.Data["nodes"], ",")
136 | availableNodes := len(nodes)
137 | minRequiredNodes := getMinimumRequiredNodes(availableNodes)
138 |
139 | var pods v1.PodList
140 | labelSelector := labels.SelectorFromSet(sts.Spec.Selector.MatchLabels)
141 | if err := r.List(ctx, &pods, &client.ListOptions{
142 | Namespace: sts.Namespace,
143 | LabelSelector: labelSelector,
144 | }); err != nil {
145 | r.logger.Error(err, "failed to list pods", "statefulset", sts.Name)
146 | return nil, err
147 | }
148 |
149 | qn := make(map[string]net.IP)
150 |
151 | for _, pod := range pods.Items {
152 | if pod.Status.PodIP != "" {
153 | raftEndpoint := fmt.Sprintf("%s:%d:%d", pod.Status.PodIP, ts.Spec.PeeringPort, ts.Spec.ApiPort)
154 | if _, contains := contains(nodes, raftEndpoint); contains {
155 | qn[pod.Name] = net.ParseIP(pod.Status.PodIP)
156 | }
157 | }
158 | }
159 |
160 | return &Quorum{minRequiredNodes, availableNodes, qn, cm}, nil
161 | }
162 |
163 | func getMinimumRequiredNodes(availableNodes int) int {
164 | return (availableNodes-1)/2 + 1
165 | }
166 |
167 | func (r *TypesenseClusterReconciler) getHealthyWriteLagThreshold(ctx context.Context, ts *tsv1alpha1.TypesenseCluster) int {
168 | if ts.Spec.AdditionalServerConfiguration == nil {
169 | return HealthyWriteLagDefaultValue
170 | }
171 |
172 | configMapName := ts.Spec.AdditionalServerConfiguration.Name
173 | configMapObjectKey := client.ObjectKey{Namespace: ts.Namespace, Name: configMapName}
174 |
175 | var cm = &v1.ConfigMap{}
176 | if err := r.Get(ctx, configMapObjectKey, cm); err != nil {
177 | r.logger.Error(err, "unable to additional server configuration config map", "configMap", configMapName)
178 | return HealthyWriteLagDefaultValue
179 | }
180 |
181 | healthyWriteLagValue := cm.Data[HealthyWriteLagKey]
182 | if healthyWriteLagValue == "" {
183 | return HealthyWriteLagDefaultValue
184 | }
185 |
186 | healthyWriteLag, err := strconv.Atoi(healthyWriteLagValue)
187 | if err != nil {
188 | r.logger.Error(err, "unable to parse server configuration value", "configMap", configMapName, "key", HealthyWriteLagKey)
189 | return HealthyWriteLagDefaultValue
190 | }
191 |
192 | return healthyWriteLag
193 | }
194 |
195 | func (r *TypesenseClusterReconciler) getHealthyReadLagThreshold(ctx context.Context, ts *tsv1alpha1.TypesenseCluster) int {
196 | if ts.Spec.AdditionalServerConfiguration == nil {
197 | return HealthyReadLagDefaultValue
198 | }
199 |
200 | configMapName := ts.Spec.AdditionalServerConfiguration.Name
201 | configMapObjectKey := client.ObjectKey{Namespace: ts.Namespace, Name: configMapName}
202 |
203 | var cm = &v1.ConfigMap{}
204 | if err := r.Get(ctx, configMapObjectKey, cm); err != nil {
205 | r.logger.Error(err, "unable to additional server configuration config map", "configMap", configMapName)
206 | return HealthyReadLagDefaultValue
207 | }
208 |
209 | healthyReadLagValue := cm.Data[HealthyReadLagKey]
210 | if healthyReadLagValue == "" {
211 | return HealthyReadLagDefaultValue
212 | }
213 |
214 | healthyReadLag, err := strconv.Atoi(healthyReadLagValue)
215 | if err != nil {
216 | r.logger.Error(err, "unable to parse server configuration value", "configMap", configMapName, "key", HealthyReadLagKey)
217 | return HealthyReadLagDefaultValue
218 | }
219 |
220 | return healthyReadLag
221 | }
222 |
223 | func (r *TypesenseClusterReconciler) getHealthyLagThresholds(ctx context.Context, ts *tsv1alpha1.TypesenseCluster) (read int, write int) {
224 | read = HealthyReadLagDefaultValue
225 | write = HealthyWriteLagDefaultValue
226 |
227 | if ts.Spec.AdditionalServerConfiguration == nil {
228 | return
229 | }
230 |
231 | configMapName := ts.Spec.AdditionalServerConfiguration.Name
232 | configMapObjectKey := client.ObjectKey{Namespace: ts.Namespace, Name: configMapName}
233 |
234 | var cm = &v1.ConfigMap{}
235 | if err := r.Get(ctx, configMapObjectKey, cm); err != nil {
236 | r.logger.Error(err, "unable to additional server configuration config map", "configMap", configMapName)
237 | return
238 | }
239 |
240 | healthyReadLagValue := cm.Data[HealthyReadLagKey]
241 | if healthyReadLagValue == "" {
242 | healthyReadLagValue = strconv.Itoa(HealthyReadLagDefaultValue)
243 | }
244 |
245 | healthyWriteLagValue := cm.Data[HealthyWriteLagKey]
246 | if healthyWriteLagValue == "" {
247 | healthyWriteLagValue = strconv.Itoa(HealthyWriteLagDefaultValue)
248 | }
249 |
250 | healthyReadLag, err := strconv.Atoi(healthyReadLagValue)
251 | if err != nil {
252 | r.logger.Error(err, "unable to parse server configuration value", "configMap", configMapName, "key", HealthyReadLagKey)
253 | }
254 |
255 | healthyWriteLag, err := strconv.Atoi(healthyWriteLagValue)
256 | if err != nil {
257 | r.logger.Error(err, "unable to parse server configuration value", "configMap", configMapName, "key", HealthyWriteLagKey)
258 | }
259 |
260 | read = healthyReadLag
261 | write = healthyWriteLag
262 |
263 | return
264 | }
265 |
--------------------------------------------------------------------------------
/internal/controller/typesensecluster_quorum_types.go:
--------------------------------------------------------------------------------
1 | package controller
2 |
3 | import (
4 | v1 "k8s.io/api/core/v1"
5 | "net"
6 | )
7 |
8 | type NodeState string
9 |
10 | const (
11 | LeaderState NodeState = "LEADER"
12 | FollowerState NodeState = "FOLLOWER"
13 | CandidateState NodeState = "CANDIDATE"
14 | NotReadyState NodeState = "NOT_READY"
15 | ErrorState NodeState = "ERROR"
16 | UnreachableState NodeState = "UNREACHABLE"
17 | )
18 |
19 | type NodeStatus struct {
20 | CommittedIndex int `json:"committed_index"`
21 | QueuedWrites int `json:"queued_writes"`
22 | State NodeState `json:"state"`
23 | }
24 |
25 | type ClusterStatus string
26 |
27 | const (
28 | ClusterStatusOK ClusterStatus = "OK"
29 | ClusterStatusSplitBrain ClusterStatus = "SPLIT_BRAIN"
30 | ClusterStatusNotReady ClusterStatus = "NOT_READY"
31 | ClusterStatusElectionDeadlock ClusterStatus = "ELECTION_DEADLOCK"
32 | )
33 |
34 | type NodeHealthResourceError string
35 |
36 | const (
37 | OutOfMemory NodeHealthResourceError = "OUT_OF_MEMORY"
38 | OutOfDisk NodeHealthResourceError = "OUT_OF_DISK"
39 | )
40 |
41 | type NodeHealth struct {
42 | Ok bool `json:"ok"`
43 | ResourceError *NodeHealthResourceError `json:"resource_error,omitempty"`
44 | }
45 |
46 | type NodeEndpoint struct {
47 | PodName string
48 | IP net.IP
49 | }
50 |
51 | type Quorum struct {
52 | MinRequiredNodes int
53 | AvailableNodes int
54 | Nodes map[string]net.IP
55 | NodesListConfigMap *v1.ConfigMap
56 | }
57 |
--------------------------------------------------------------------------------
/internal/controller/typesensecluster_scraper.go:
--------------------------------------------------------------------------------
1 | package controller
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | tsv1alpha1 "github.com/akyriako/typesense-operator/api/v1alpha1"
7 | batchv1 "k8s.io/api/batch/v1"
8 | corev1 "k8s.io/api/core/v1"
9 | apierrors "k8s.io/apimachinery/pkg/api/errors"
10 | "k8s.io/apimachinery/pkg/api/resource"
11 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
12 | "k8s.io/utils/ptr"
13 | ctrl "sigs.k8s.io/controller-runtime"
14 | "sigs.k8s.io/controller-runtime/pkg/client"
15 | "strconv"
16 | )
17 |
18 | func (r *TypesenseClusterReconciler) ReconcileScraper(ctx context.Context, ts tsv1alpha1.TypesenseCluster) (err error) {
19 | r.logger.V(debugLevel).Info("reconciling scrapers")
20 |
21 | labelSelector := getLabels(&ts)
22 | listOptions := []client.ListOption{
23 | client.InNamespace(ts.Namespace),
24 | client.MatchingLabels(labelSelector),
25 | }
26 |
27 | var scraperCronJobs batchv1.CronJobList
28 | if err := r.List(ctx, &scraperCronJobs, listOptions...); err != nil {
29 | return err
30 | }
31 |
32 | inSpecs := func(cronJobName string, scrapers []tsv1alpha1.DocSearchScraperSpec) bool {
33 | for _, scraper := range scrapers {
34 | if cronJobName == fmt.Sprintf(ClusterScraperCronJob, scraper.Name) {
35 | return true
36 | }
37 | }
38 |
39 | return false
40 | }
41 |
42 | for _, scraperCronJob := range scraperCronJobs.Items {
43 | if ts.Spec.Scrapers == nil || !inSpecs(scraperCronJob.Name, ts.Spec.Scrapers) {
44 | err = r.deleteScraper(ctx, &scraperCronJob)
45 | if err != nil {
46 | return err
47 | }
48 | }
49 | }
50 |
51 | if ts.Spec.Scrapers == nil {
52 | return nil
53 | }
54 |
55 | for _, scraper := range ts.Spec.Scrapers {
56 | scraperName := fmt.Sprintf(ClusterScraperCronJob, scraper.Name)
57 | scraperExists := true
58 | scraperObjectKey := client.ObjectKey{Namespace: ts.Namespace, Name: scraperName}
59 |
60 | var scraperCronJob = &batchv1.CronJob{}
61 | if err := r.Get(ctx, scraperObjectKey, scraperCronJob); err != nil {
62 | if apierrors.IsNotFound(err) {
63 | scraperExists = false
64 | } else {
65 | r.logger.Error(err, fmt.Sprintf("unable to fetch scraper cronjob: %s", scraperObjectKey))
66 | return err
67 | }
68 | }
69 |
70 | if !scraperExists {
71 | r.logger.V(debugLevel).Info("creating scraper cronjob", "cronjob", scraperObjectKey.Name)
72 |
73 | err = r.createScraper(ctx, scraperObjectKey, &ts, &scraper)
74 | if err != nil {
75 | r.logger.Error(err, "creating scraper cronjob failed", "cronjob", scraperObjectKey.Name)
76 | return err
77 | }
78 | } else {
79 | hasChanged := false
80 | hasChangedConfig := false
81 | container := scraperCronJob.Spec.JobTemplate.Spec.Template.Spec.Containers[0]
82 |
83 | for _, env := range container.Env {
84 | if env.Name == "CONFIG" && env.Value != scraper.Config {
85 | hasChangedConfig = true
86 | break
87 | }
88 | }
89 |
90 | if scraperCronJob.Spec.Schedule != scraper.Schedule || container.Image != scraper.Image || hasChangedConfig {
91 | hasChanged = true
92 | }
93 |
94 | if hasChanged {
95 | r.logger.V(debugLevel).Info("updating scraper cronjob", "cronjob", scraperObjectKey.Name)
96 |
97 | err = r.deleteScraper(ctx, scraperCronJob)
98 | if err != nil {
99 | r.logger.Error(err, "deleting scraper cronjob failed", "cronjob", scraperObjectKey.Name)
100 | return err
101 | }
102 |
103 | err = r.createScraper(ctx, scraperObjectKey, &ts, &scraper)
104 | if err != nil {
105 | r.logger.Error(err, "creating scraper cronjob failed", "cronjob", scraperObjectKey.Name)
106 | return err
107 | }
108 | }
109 | }
110 | }
111 |
112 | return nil
113 | }
114 |
115 | func (r *TypesenseClusterReconciler) createScraper(ctx context.Context, key client.ObjectKey, ts *tsv1alpha1.TypesenseCluster, scraperSpec *tsv1alpha1.DocSearchScraperSpec) error {
116 | scraper := &batchv1.CronJob{
117 | TypeMeta: metav1.TypeMeta{
118 | APIVersion: "batch/v1",
119 | Kind: "CronJob",
120 | },
121 | ObjectMeta: getObjectMeta(ts, &key.Name, nil),
122 | Spec: batchv1.CronJobSpec{
123 | ConcurrencyPolicy: batchv1.ForbidConcurrent,
124 | SuccessfulJobsHistoryLimit: ptr.To[int32](1),
125 | FailedJobsHistoryLimit: ptr.To[int32](1),
126 | Schedule: scraperSpec.Schedule,
127 | JobTemplate: batchv1.JobTemplateSpec{
128 | Spec: batchv1.JobSpec{
129 | BackoffLimit: ptr.To[int32](0),
130 | Template: corev1.PodTemplateSpec{
131 | Spec: corev1.PodSpec{
132 | RestartPolicy: corev1.RestartPolicyNever,
133 | Containers: []corev1.Container{
134 | {
135 | Name: fmt.Sprintf(ClusterScraperCronJobContainer, scraperSpec.Name),
136 | Image: scraperSpec.Image,
137 | Env: []corev1.EnvVar{
138 | {
139 | Name: "CONFIG",
140 | Value: scraperSpec.Config,
141 | },
142 | {
143 | Name: "TYPESENSE_API_KEY",
144 | ValueFrom: &corev1.EnvVarSource{
145 | SecretKeyRef: &corev1.SecretKeySelector{
146 | Key: ClusterAdminApiKeySecretKeyName,
147 | LocalObjectReference: corev1.LocalObjectReference{
148 | Name: r.getAdminApiKeyObjectKey(ts).Name,
149 | },
150 | },
151 | },
152 | },
153 | {
154 | Name: "TYPESENSE_HOST",
155 | Value: fmt.Sprintf(ClusterRestService, ts.Name),
156 | },
157 | {
158 | Name: "TYPESENSE_PORT",
159 | Value: strconv.Itoa(ts.Spec.ApiPort),
160 | },
161 | {
162 | Name: "TYPESENSE_PROTOCOL",
163 | Value: "http",
164 | },
165 | },
166 | EnvFrom: scraperSpec.GetScraperAuthConfiguration(),
167 | Resources: corev1.ResourceRequirements{
168 | Limits: corev1.ResourceList{
169 | corev1.ResourceCPU: resource.MustParse("1024m"),
170 | corev1.ResourceMemory: resource.MustParse("512Mi"),
171 | },
172 | Requests: corev1.ResourceList{
173 | corev1.ResourceCPU: resource.MustParse("128m"),
174 | corev1.ResourceMemory: resource.MustParse("112Mi"),
175 | },
176 | },
177 | },
178 | },
179 | },
180 | },
181 | },
182 | },
183 | },
184 | }
185 |
186 | err := ctrl.SetControllerReference(ts, scraper, r.Scheme)
187 | if err != nil {
188 | return err
189 | }
190 |
191 | err = r.Create(ctx, scraper)
192 | if err != nil {
193 | return err
194 | }
195 |
196 | return nil
197 | }
198 |
199 | func (r *TypesenseClusterReconciler) deleteScraper(ctx context.Context, scraper *batchv1.CronJob) error {
200 | err := r.Delete(ctx, scraper)
201 | if err != nil {
202 | return err
203 | }
204 |
205 | return nil
206 | }
207 |
--------------------------------------------------------------------------------
/internal/controller/typesensecluster_secret.go:
--------------------------------------------------------------------------------
1 | package controller
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | tsv1alpha1 "github.com/akyriako/typesense-operator/api/v1alpha1"
7 | v1 "k8s.io/api/core/v1"
8 | apierrors "k8s.io/apimachinery/pkg/api/errors"
9 | "k8s.io/utils/ptr"
10 | ctrl "sigs.k8s.io/controller-runtime"
11 | "sigs.k8s.io/controller-runtime/pkg/client"
12 | )
13 |
14 | func (r *TypesenseClusterReconciler) ReconcileSecret(ctx context.Context, ts tsv1alpha1.TypesenseCluster) (*v1.Secret, error) {
15 | r.logger.V(debugLevel).Info("reconciling secret")
16 |
17 | secretExists := true
18 | secretObjectKey := r.getAdminApiKeyObjectKey(&ts)
19 |
20 | var secret = &v1.Secret{}
21 | if err := r.Get(ctx, secretObjectKey, secret); err != nil {
22 | if apierrors.IsNotFound(err) && ts.Spec.AdminApiKey == nil {
23 | secretExists = false
24 | } else {
25 | r.logger.Error(err, fmt.Sprintf("unable to fetch secret: %s", secretObjectKey))
26 | return secret, err
27 | }
28 | }
29 |
30 | if !secretExists {
31 | r.logger.V(debugLevel).Info("creating admin api key", "secret", secretObjectKey)
32 |
33 | secret, err := r.createAdminApiKey(ctx, secretObjectKey, &ts)
34 | if err != nil {
35 | r.logger.Error(err, "creating admin api key failed", "secret", secretObjectKey)
36 | return nil, err
37 | }
38 | return secret, nil
39 | }
40 | return secret, nil
41 | }
42 |
43 | func (r *TypesenseClusterReconciler) createAdminApiKey(
44 | ctx context.Context,
45 | secretObjectKey client.ObjectKey,
46 | ts *tsv1alpha1.TypesenseCluster,
47 | ) (*v1.Secret, error) {
48 | token, err := generateToken()
49 | if err != nil {
50 | return nil, err
51 | }
52 |
53 | secret := &v1.Secret{
54 | ObjectMeta: getObjectMeta(ts, &secretObjectKey.Name, nil),
55 | Type: v1.SecretTypeOpaque,
56 | Immutable: ptr.To[bool](true),
57 | Data: map[string][]byte{
58 | ClusterAdminApiKeySecretKeyName: []byte(token),
59 | },
60 | }
61 |
62 | err = ctrl.SetControllerReference(ts, secret, r.Scheme)
63 | if err != nil {
64 | return nil, err
65 | }
66 |
67 | err = r.Create(ctx, secret)
68 | if err != nil {
69 | return nil, err
70 | }
71 |
72 | return secret, nil
73 | }
74 |
75 | func (r *TypesenseClusterReconciler) getAdminApiKeyObjectKey(ts *tsv1alpha1.TypesenseCluster) client.ObjectKey {
76 | if ts.Spec.AdminApiKey != nil {
77 | return client.ObjectKey{
78 | Namespace: ts.Namespace,
79 | Name: ts.Spec.AdminApiKey.Name,
80 | }
81 | }
82 |
83 | return client.ObjectKey{
84 | Namespace: ts.Namespace,
85 | Name: fmt.Sprintf(ClusterAdminApiKeySecret, ts.Name),
86 | }
87 | }
88 |
--------------------------------------------------------------------------------
/internal/controller/typesensecluster_services.go:
--------------------------------------------------------------------------------
1 | package controller
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | tsv1alpha1 "github.com/akyriako/typesense-operator/api/v1alpha1"
7 | v1 "k8s.io/api/core/v1"
8 | apierrors "k8s.io/apimachinery/pkg/api/errors"
9 | "k8s.io/apimachinery/pkg/util/intstr"
10 | ctrl "sigs.k8s.io/controller-runtime"
11 | "sigs.k8s.io/controller-runtime/pkg/client"
12 | )
13 |
14 | func (r *TypesenseClusterReconciler) ReconcileServices(ctx context.Context, ts tsv1alpha1.TypesenseCluster) error {
15 | r.logger.V(debugLevel).Info("reconciling services")
16 |
17 | headlessSvcName := fmt.Sprintf(ClusterHeadlessService, ts.Name)
18 | headlessExists := true
19 | headlessObjectKey := client.ObjectKey{Namespace: ts.Namespace, Name: headlessSvcName}
20 |
21 | var headless = &v1.Service{}
22 | if err := r.Get(ctx, headlessObjectKey, headless); err != nil {
23 | if apierrors.IsNotFound(err) {
24 | headlessExists = false
25 | } else {
26 | r.logger.Error(err, fmt.Sprintf("unable to fetch service: %s", headlessSvcName))
27 | return err
28 | }
29 | }
30 |
31 | if !headlessExists {
32 | r.logger.V(debugLevel).Info("creating headless service", "service", headlessObjectKey.Name)
33 |
34 | _, err := r.createHeadlessService(ctx, headlessObjectKey, &ts)
35 | if err != nil {
36 | r.logger.Error(err, "creating headless service failed", "service", headlessObjectKey.Name)
37 | return err
38 | }
39 | } else {
40 | if int32(ts.Spec.ApiPort) != headless.Spec.Ports[0].Port {
41 | r.logger.V(debugLevel).Info("updating headless service", "service", headlessObjectKey.Name)
42 |
43 | err := r.updateHeadlessService(ctx, headless, &ts)
44 | if err != nil {
45 | r.logger.Error(err, "updating headless service failed", "service", headlessObjectKey.Name)
46 | return err
47 | }
48 | }
49 | }
50 |
51 | svcName := fmt.Sprintf(ClusterRestService, ts.Name)
52 | svcExists := true
53 | svcObjectKey := client.ObjectKey{Namespace: ts.Namespace, Name: svcName}
54 |
55 | var svc = &v1.Service{}
56 | if err := r.Get(ctx, svcObjectKey, svc); err != nil {
57 | if apierrors.IsNotFound(err) {
58 | svcExists = false
59 | } else {
60 | r.logger.Error(err, fmt.Sprintf("unable to fetch service: %s", svcName))
61 | return err
62 | }
63 | }
64 |
65 | if !svcExists {
66 | r.logger.V(debugLevel).Info("creating resolver service", "service", svcObjectKey.Name)
67 |
68 | _, err := r.createService(ctx, svcObjectKey, &ts)
69 | if err != nil {
70 | r.logger.Error(err, "creating resolver service failed", "service", svcObjectKey.Name)
71 | return err
72 | }
73 | } else {
74 | // temporary to update healthcheck endpoints. remove in future versions
75 | if len(svc.Spec.Ports) < 2 {
76 | err := r.deleteService(ctx, svc)
77 | if err != nil {
78 | return err
79 | }
80 |
81 | _, err = r.createService(ctx, svcObjectKey, &ts)
82 | if err != nil {
83 | r.logger.Error(err, "creating resolver service failed", "service", svcObjectKey.Name)
84 | return err
85 | }
86 | }
87 |
88 | if int32(ts.Spec.ApiPort) != svc.Spec.Ports[0].Port {
89 | r.logger.V(debugLevel).Info("updating resolver service", "service", svcObjectKey.Name)
90 |
91 | err := r.updateService(ctx, svc, &ts)
92 | if err != nil {
93 | r.logger.Error(err, "updating resolver service failed", "service", svcObjectKey.Name)
94 | return err
95 | }
96 | }
97 | }
98 |
99 | return nil
100 | }
101 |
102 | func (r *TypesenseClusterReconciler) createHeadlessService(ctx context.Context, key client.ObjectKey, ts *tsv1alpha1.TypesenseCluster) (*v1.Service, error) {
103 | svc := &v1.Service{
104 | ObjectMeta: getObjectMeta(ts, &key.Name, nil),
105 | Spec: v1.ServiceSpec{
106 | ClusterIP: v1.ClusterIPNone,
107 | PublishNotReadyAddresses: true,
108 | Selector: getLabels(ts),
109 | Ports: []v1.ServicePort{
110 | {
111 | Name: "http",
112 | Port: int32(ts.Spec.ApiPort),
113 | TargetPort: intstr.IntOrString{IntVal: 8108},
114 | },
115 | },
116 | },
117 | }
118 |
119 | err := ctrl.SetControllerReference(ts, svc, r.Scheme)
120 | if err != nil {
121 | return nil, err
122 | }
123 |
124 | err = r.Create(ctx, svc)
125 | if err != nil {
126 | return nil, err
127 | }
128 |
129 | return svc, nil
130 | }
131 |
132 | func (r *TypesenseClusterReconciler) updateHeadlessService(ctx context.Context, headless *v1.Service, ts *tsv1alpha1.TypesenseCluster) error {
133 | patch := client.MergeFrom(headless.DeepCopy())
134 | headless.Spec.Ports[0].Port = int32(ts.Spec.ApiPort)
135 |
136 | if err := r.Patch(ctx, headless, patch); err != nil {
137 | return err
138 | }
139 |
140 | return nil
141 | }
142 |
143 | func (r *TypesenseClusterReconciler) createService(ctx context.Context, key client.ObjectKey, ts *tsv1alpha1.TypesenseCluster) (*v1.Service, error) {
144 | svc := &v1.Service{
145 | ObjectMeta: getObjectMeta(ts, &key.Name, nil),
146 | Spec: v1.ServiceSpec{
147 | Type: v1.ServiceTypeClusterIP,
148 | Selector: getLabels(ts),
149 | Ports: []v1.ServicePort{
150 | {
151 | Name: "http",
152 | Port: int32(ts.Spec.ApiPort),
153 | TargetPort: intstr.IntOrString{IntVal: 8108},
154 | },
155 | {
156 | Name: "healthcheck",
157 | Port: 8808,
158 | TargetPort: intstr.IntOrString{IntVal: 8808},
159 | },
160 | },
161 | },
162 | }
163 |
164 | err := ctrl.SetControllerReference(ts, svc, r.Scheme)
165 | if err != nil {
166 | return nil, err
167 | }
168 |
169 | err = r.Create(ctx, svc)
170 | if err != nil {
171 | return nil, err
172 | }
173 |
174 | return svc, nil
175 | }
176 |
177 | func (r *TypesenseClusterReconciler) updateService(ctx context.Context, svc *v1.Service, ts *tsv1alpha1.TypesenseCluster) error {
178 | patch := client.MergeFrom(svc.DeepCopy())
179 | svc.Spec.Ports[0].Port = int32(ts.Spec.ApiPort)
180 |
181 | if err := r.Patch(ctx, svc, patch); err != nil {
182 | return err
183 | }
184 |
185 | return nil
186 | }
187 |
188 | func (r *TypesenseClusterReconciler) deleteService(ctx context.Context, svc *v1.Service) error {
189 | err := r.Delete(ctx, svc)
190 | if err != nil {
191 | return err
192 | }
193 |
194 | return nil
195 | }
196 |
--------------------------------------------------------------------------------
/internal/controller/utils.go:
--------------------------------------------------------------------------------
1 | package controller
2 |
3 | import (
4 | "crypto/rand"
5 | "encoding/base64"
6 | "fmt"
7 | tsv1alpha1 "github.com/akyriako/typesense-operator/api/v1alpha1"
8 | corev1 "k8s.io/api/core/v1"
9 | "k8s.io/apimachinery/pkg/api/equality"
10 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
11 | "regexp"
12 | "sort"
13 | )
14 |
15 | const (
16 | letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
17 | debugLevel = 1
18 | )
19 |
20 | func generateToken() (string, error) {
21 | token := make([]byte, 256)
22 | _, err := rand.Read(token)
23 | if err != nil {
24 | return "", err
25 | }
26 |
27 | base64EncodedToken := base64.StdEncoding.EncodeToString(token)
28 | return base64EncodedToken, nil
29 | }
30 |
31 | func generateSecureRandomString(length int) (string, error) {
32 | result := make([]byte, length)
33 | _, err := rand.Read(result)
34 | if err != nil {
35 | return "", err
36 | }
37 |
38 | for i := range result {
39 | result[i] = letters[int(result[i])%len(letters)]
40 | }
41 | return string(result), nil
42 | }
43 |
44 | func getLabels(ts *tsv1alpha1.TypesenseCluster) map[string]string {
45 | return map[string]string{
46 | "app": fmt.Sprintf(ClusterAppLabel, ts.Name),
47 | }
48 | }
49 |
50 | func getObjectMeta(ts *tsv1alpha1.TypesenseCluster, name *string, annotations map[string]string) metav1.ObjectMeta {
51 | if name == nil {
52 | name = &ts.Name
53 | }
54 |
55 | return metav1.ObjectMeta{
56 | Name: *name,
57 | Namespace: ts.Namespace,
58 | Labels: getLabels(ts),
59 | Annotations: annotations,
60 | }
61 | }
62 |
63 | func getReverseProxyLabels(ts *tsv1alpha1.TypesenseCluster) map[string]string {
64 | return map[string]string{
65 | "app": fmt.Sprintf(ClusterReverseProxyAppLabel, ts.Name),
66 | }
67 | }
68 |
69 | func getReverseProxyObjectMeta(ts *tsv1alpha1.TypesenseCluster, name *string, annotations map[string]string) metav1.ObjectMeta {
70 | if name == nil {
71 | name = &ts.Name
72 | }
73 |
74 | return metav1.ObjectMeta{
75 | Name: *name,
76 | Namespace: ts.Namespace,
77 | Labels: getReverseProxyLabels(ts),
78 | Annotations: annotations,
79 | }
80 | }
81 |
82 | func getPodMonitorLabels(ts *tsv1alpha1.TypesenseCluster) map[string]string {
83 | return map[string]string{
84 | "app": fmt.Sprintf(ClusterMetricsPodMonitorAppLabel, ts.Name),
85 | }
86 | }
87 |
88 | func getPodMonitorObjectMeta(ts *tsv1alpha1.TypesenseCluster, name *string, annotations map[string]string) metav1.ObjectMeta {
89 | if name == nil {
90 | name = &ts.Name
91 | }
92 |
93 | return metav1.ObjectMeta{
94 | Name: *name,
95 | Namespace: ts.Namespace,
96 | Labels: getPodMonitorLabels(ts),
97 | Annotations: annotations,
98 | }
99 | }
100 |
101 | const (
102 | minDelayPerReplicaFactor = 1
103 | maxDelayPerReplicaFactor = 3
104 | )
105 |
106 | func getDelayPerReplicaFactor(size int) int64 {
107 | if size != 0 {
108 | if size <= maxDelayPerReplicaFactor {
109 | return int64(size)
110 | } else {
111 | return maxDelayPerReplicaFactor
112 | }
113 | }
114 | return minDelayPerReplicaFactor
115 | }
116 |
117 | func contains(values []string, value string) (int, bool) {
118 | //sort.Strings(values)
119 |
120 | for i, v := range values {
121 | if v == value {
122 | return i, true
123 | }
124 | }
125 |
126 | return -1, false
127 | }
128 |
129 | func normalizeVolumes(vols []corev1.Volume) []corev1.Volume {
130 | if vols == nil {
131 | vols = []corev1.Volume{}
132 | }
133 |
134 | vcopy := append([]corev1.Volume(nil), vols...)
135 | for i := range vcopy {
136 | if cm := vcopy[i].VolumeSource.ConfigMap; cm != nil {
137 | cm.DefaultMode = nil
138 | }
139 | }
140 |
141 | sort.Slice(vcopy, func(i, j int) bool {
142 | return vcopy[i].Name < vcopy[j].Name
143 | })
144 |
145 | return vcopy
146 | }
147 |
148 | func normalizeVolumeMounts(mounts []corev1.VolumeMount) []corev1.VolumeMount {
149 | if mounts == nil {
150 | mounts = []corev1.VolumeMount{}
151 | }
152 | copyMounts := append([]corev1.VolumeMount(nil), mounts...)
153 | sort.Slice(copyMounts, func(i, j int) bool {
154 | return copyMounts[i].Name < copyMounts[j].Name
155 | })
156 | return copyMounts
157 | }
158 |
159 | // needsSyncVolumes returns true if the desired vols differ from what's in the pod.
160 | func needsSyncVolumes(desired, existing []corev1.Volume) bool {
161 | return !equality.Semantic.DeepEqual(
162 | normalizeVolumes(desired),
163 | normalizeVolumes(existing),
164 | )
165 | }
166 |
167 | // needsSyncMounts returns true if the desired mounts differ from what's in the container.
168 | func needsSyncMounts(desired, existing []corev1.VolumeMount) bool {
169 | return !equality.Semantic.DeepEqual(
170 | normalizeVolumeMounts(desired),
171 | normalizeVolumeMounts(existing),
172 | )
173 | }
174 |
175 | var ip4Prefix = regexp.MustCompile(
176 | `^((25[0-5]|2[0-4]\d|[01]?\d?\d)\.){3}` +
177 | `(25[0-5]|2[0-4]\d|[01]?\d?\d)`,
178 | )
179 |
180 | func hasIP4Prefix(s string) bool {
181 | return ip4Prefix.MatchString(s)
182 | }
183 |
--------------------------------------------------------------------------------
/test/e2e/e2e_suite_test.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2024.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package e2e
18 |
19 | import (
20 | "fmt"
21 | "testing"
22 |
23 | . "github.com/onsi/ginkgo/v2"
24 | . "github.com/onsi/gomega"
25 | )
26 |
27 | // Run e2e tests using the Ginkgo runner.
28 | func TestE2E(t *testing.T) {
29 | RegisterFailHandler(Fail)
30 | _, _ = fmt.Fprintf(GinkgoWriter, "Starting typesense-operator suite\n")
31 | RunSpecs(t, "e2e suite")
32 | }
33 |
--------------------------------------------------------------------------------
/test/e2e/e2e_test.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2024.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package e2e
18 |
19 | import (
20 | "fmt"
21 | "os/exec"
22 | "time"
23 |
24 | . "github.com/onsi/ginkgo/v2"
25 | . "github.com/onsi/gomega"
26 |
27 | "github.com/akyriako/typesense-operator/test/utils"
28 | )
29 |
30 | const namespace = "typesense-operator-system"
31 |
32 | var _ = Describe("controller", Ordered, func() {
33 | BeforeAll(func() {
34 | By("installing prometheus operator")
35 | Expect(utils.InstallPrometheusOperator()).To(Succeed())
36 |
37 | By("installing the cert-manager")
38 | Expect(utils.InstallCertManager()).To(Succeed())
39 |
40 | By("creating manager namespace")
41 | cmd := exec.Command("kubectl", "create", "ns", namespace)
42 | _, _ = utils.Run(cmd)
43 | })
44 |
45 | AfterAll(func() {
46 | By("uninstalling the Prometheus manager bundle")
47 | utils.UninstallPrometheusOperator()
48 |
49 | By("uninstalling the cert-manager bundle")
50 | utils.UninstallCertManager()
51 |
52 | By("removing manager namespace")
53 | cmd := exec.Command("kubectl", "delete", "ns", namespace)
54 | _, _ = utils.Run(cmd)
55 | })
56 |
57 | Context("Operator", func() {
58 | It("should run successfully", func() {
59 | var controllerPodName string
60 | var err error
61 |
62 | // projectimage stores the name of the image used in the example
63 | var projectimage = "example.com/typesense-operator:v0.0.1"
64 |
65 | By("building the manager(Operator) image")
66 | cmd := exec.Command("make", "docker-build", fmt.Sprintf("IMG=%s", projectimage))
67 | _, err = utils.Run(cmd)
68 | ExpectWithOffset(1, err).NotTo(HaveOccurred())
69 |
70 | By("loading the the manager(Operator) image on Kind")
71 | err = utils.LoadImageToKindClusterWithName(projectimage)
72 | ExpectWithOffset(1, err).NotTo(HaveOccurred())
73 |
74 | By("installing CRDs")
75 | cmd = exec.Command("make", "install")
76 | _, err = utils.Run(cmd)
77 | ExpectWithOffset(1, err).NotTo(HaveOccurred())
78 |
79 | By("deploying the controller-manager")
80 | cmd = exec.Command("make", "deploy", fmt.Sprintf("IMG=%s", projectimage))
81 | _, err = utils.Run(cmd)
82 | ExpectWithOffset(1, err).NotTo(HaveOccurred())
83 |
84 | By("validating that the controller-manager pod is running as expected")
85 | verifyControllerUp := func() error {
86 | // Get pod name
87 |
88 | cmd = exec.Command("kubectl", "get",
89 | "pods", "-l", "control-plane=controller-manager",
90 | "-o", "go-template={{ range .items }}"+
91 | "{{ if not .metadata.deletionTimestamp }}"+
92 | "{{ .metadata.name }}"+
93 | "{{ \"\\n\" }}{{ end }}{{ end }}",
94 | "-n", namespace,
95 | )
96 |
97 | podOutput, err := utils.Run(cmd)
98 | ExpectWithOffset(2, err).NotTo(HaveOccurred())
99 | podNames := utils.GetNonEmptyLines(string(podOutput))
100 | if len(podNames) != 1 {
101 | return fmt.Errorf("expect 1 controller pods running, but got %d", len(podNames))
102 | }
103 | controllerPodName = podNames[0]
104 | ExpectWithOffset(2, controllerPodName).Should(ContainSubstring("controller-manager"))
105 |
106 | // Validate pod status
107 | cmd = exec.Command("kubectl", "get",
108 | "pods", controllerPodName, "-o", "jsonpath={.status.phase}",
109 | "-n", namespace,
110 | )
111 | status, err := utils.Run(cmd)
112 | ExpectWithOffset(2, err).NotTo(HaveOccurred())
113 | if string(status) != "Running" {
114 | return fmt.Errorf("controller pod in %s status", status)
115 | }
116 | return nil
117 | }
118 | EventuallyWithOffset(1, verifyControllerUp, time.Minute, time.Second).Should(Succeed())
119 |
120 | })
121 | })
122 | })
123 |
--------------------------------------------------------------------------------
/test/utils/utils.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2024.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | package utils
18 |
19 | import (
20 | "fmt"
21 | "os"
22 | "os/exec"
23 | "strings"
24 |
25 | . "github.com/onsi/ginkgo/v2" //nolint:golint,revive
26 | )
27 |
28 | const (
29 | prometheusOperatorVersion = "v0.72.0"
30 | prometheusOperatorURL = "https://github.com/prometheus-operator/prometheus-operator/" +
31 | "releases/download/%s/bundle.yaml"
32 |
33 | certmanagerVersion = "v1.14.4"
34 | certmanagerURLTmpl = "https://github.com/jetstack/cert-manager/releases/download/%s/cert-manager.yaml"
35 | )
36 |
37 | func warnError(err error) {
38 | _, _ = fmt.Fprintf(GinkgoWriter, "warning: %v\n", err)
39 | }
40 |
41 | // InstallPrometheusOperator installs the prometheus Operator to be used to export the enabled metrics.
42 | func InstallPrometheusOperator() error {
43 | url := fmt.Sprintf(prometheusOperatorURL, prometheusOperatorVersion)
44 | cmd := exec.Command("kubectl", "create", "-f", url)
45 | _, err := Run(cmd)
46 | return err
47 | }
48 |
49 | // Run executes the provided command within this context
50 | func Run(cmd *exec.Cmd) ([]byte, error) {
51 | dir, _ := GetProjectDir()
52 | cmd.Dir = dir
53 |
54 | if err := os.Chdir(cmd.Dir); err != nil {
55 | _, _ = fmt.Fprintf(GinkgoWriter, "chdir dir: %s\n", err)
56 | }
57 |
58 | cmd.Env = append(os.Environ(), "GO111MODULE=on")
59 | command := strings.Join(cmd.Args, " ")
60 | _, _ = fmt.Fprintf(GinkgoWriter, "running: %s\n", command)
61 | output, err := cmd.CombinedOutput()
62 | if err != nil {
63 | return output, fmt.Errorf("%s failed with error: (%v) %s", command, err, string(output))
64 | }
65 |
66 | return output, nil
67 | }
68 |
69 | // UninstallPrometheusOperator uninstalls the prometheus
70 | func UninstallPrometheusOperator() {
71 | url := fmt.Sprintf(prometheusOperatorURL, prometheusOperatorVersion)
72 | cmd := exec.Command("kubectl", "delete", "-f", url)
73 | if _, err := Run(cmd); err != nil {
74 | warnError(err)
75 | }
76 | }
77 |
78 | // UninstallCertManager uninstalls the cert manager
79 | func UninstallCertManager() {
80 | url := fmt.Sprintf(certmanagerURLTmpl, certmanagerVersion)
81 | cmd := exec.Command("kubectl", "delete", "-f", url)
82 | if _, err := Run(cmd); err != nil {
83 | warnError(err)
84 | }
85 | }
86 |
87 | // InstallCertManager installs the cert manager bundle.
88 | func InstallCertManager() error {
89 | url := fmt.Sprintf(certmanagerURLTmpl, certmanagerVersion)
90 | cmd := exec.Command("kubectl", "apply", "-f", url)
91 | if _, err := Run(cmd); err != nil {
92 | return err
93 | }
94 | // Wait for cert-manager-webhook to be ready, which can take time if cert-manager
95 | // was re-installed after uninstalling on a cluster.
96 | cmd = exec.Command("kubectl", "wait", "deployment.apps/cert-manager-webhook",
97 | "--for", "condition=Available",
98 | "--namespace", "cert-manager",
99 | "--timeout", "5m",
100 | )
101 |
102 | _, err := Run(cmd)
103 | return err
104 | }
105 |
106 | // LoadImageToKindClusterWithName loads a local docker image to the kind cluster
107 | func LoadImageToKindClusterWithName(name string) error {
108 | cluster := "kind"
109 | if v, ok := os.LookupEnv("KIND_CLUSTER"); ok {
110 | cluster = v
111 | }
112 | kindOptions := []string{"load", "docker-image", name, "--name", cluster}
113 | cmd := exec.Command("kind", kindOptions...)
114 | _, err := Run(cmd)
115 | return err
116 | }
117 |
118 | // GetNonEmptyLines converts given command output string into individual objects
119 | // according to line breakers, and ignores the empty elements in it.
120 | func GetNonEmptyLines(output string) []string {
121 | var res []string
122 | elements := strings.Split(output, "\n")
123 | for _, element := range elements {
124 | if element != "" {
125 | res = append(res, element)
126 | }
127 | }
128 |
129 | return res
130 | }
131 |
132 | // GetProjectDir will return the directory where the project is
133 | func GetProjectDir() (string, error) {
134 | wd, err := os.Getwd()
135 | if err != nil {
136 | return wd, err
137 | }
138 | wd = strings.Replace(wd, "/test/e2e", "", -1)
139 | return wd, nil
140 | }
141 |
--------------------------------------------------------------------------------