├── .codecov.yml ├── .github ├── ISSUE_TEMPLATE.md ├── PULL_REQUEST_TEMPLATE.md └── workflows │ └── ci.yaml ├── .gitignore ├── CONTRIBUTING.md ├── Dockerfile ├── LICENSE ├── Makefile ├── PROJECT ├── README.md ├── api └── v1beta1 │ ├── deepcopy_test.go │ ├── doc.go │ ├── groupversion_info.go │ ├── status.go │ ├── status_test.go │ ├── v1beta1_suite_test.go │ ├── zookeepercluster_types.go │ ├── zookeepercluster_types_test.go │ └── zz_generated.deepcopy.go ├── build └── Dockerfile ├── charts ├── zookeeper-operator │ ├── Chart.yaml │ ├── README.md │ ├── templates │ │ ├── _helpers.tpl │ │ ├── clusterrole.yaml │ │ ├── clusterrolebinding.yaml │ │ ├── operator.yaml │ │ ├── post-install-upgrade-hooks.yaml │ │ ├── pre-delete-hooks.yaml │ │ ├── role.yaml │ │ ├── rolebinding.yaml │ │ ├── service_account.yaml │ │ └── zookeeper.pravega.io_zookeeperclusters_crd.yaml │ └── values.yaml └── zookeeper │ ├── Chart.yaml │ ├── README.md │ ├── templates │ ├── _helpers.tpl │ ├── post-install-upgrade-hooks.yaml │ └── zookeeper.yaml │ ├── values.yaml │ └── values │ └── minikube.yaml ├── cmd └── exporter │ └── main.go ├── config ├── crd │ ├── bases │ │ └── zookeeper.pravega.io_zookeeperclusters.yaml │ └── kustomization.yaml ├── default │ └── kustomization.yaml ├── manager │ ├── kustomization.yaml │ └── manager.yaml ├── rbac │ ├── all_ns_rbac.yaml │ ├── default_ns_rbac.yaml │ ├── kustomization.yaml │ ├── role.yaml │ ├── role_binding.yaml │ └── service_account.yaml ├── samples │ ├── ECS │ │ └── zookeeper_v1beta1_zookeepercluster_cr.yaml │ ├── kustomization.yaml │ └── pravega │ │ └── zookeeper_v1beta1_zookeepercluster_cr.yaml └── test │ └── kustomization.yaml ├── controllers ├── zookeepercluster_controller.go └── zookeepercluster_controller_test.go ├── doc └── operator-upgrade.md ├── docker ├── Dockerfile ├── Dockerfile-swarm ├── bin │ ├── zookeeperFunctions.sh │ ├── zookeeperLive.sh │ ├── zookeeperMetrics.sh │ ├── zookeeperReady.sh │ ├── zookeeperStart.sh │ └── zookeeperTeardown.sh ├── zoo.cfg.swarm └── zu │ ├── build.gradle.kts │ ├── gradle │ └── wrapper │ │ ├── gradle-wrapper.jar │ │ └── gradle-wrapper.properties │ ├── gradlew │ └── src │ └── main │ └── java │ └── io │ └── pravega │ └── zookeeper │ ├── Main.kt │ └── Zookeeper.kt ├── go.mod ├── go.sum ├── hack └── boilerplate.go.txt ├── main.go ├── pkg ├── controller │ └── config │ │ └── config.go ├── test │ └── e2e │ │ └── e2eutil │ │ ├── spec_util.go │ │ └── zookeepercluster_util.go ├── utils │ ├── finalizer_utils.go │ ├── finalizer_utils_test.go │ ├── leader.go │ ├── leader_test.go │ ├── test_utils.go │ ├── test_utils_test.go │ ├── utils_suite_test.go │ ├── zookeeper_util.go │ └── zookeeper_util_test.go ├── version │ └── version.go ├── yamlexporter │ ├── exporterutil_test.go │ └── exportutil.go └── zk │ ├── generators.go │ ├── generators_test.go │ ├── synchronizers.go │ ├── synchronizers_test.go │ ├── zk_suite_test.go │ ├── zookeeper_client.go │ └── zookeeper_client_test.go ├── scripts ├── check_format.sh └── check_license.sh └── test └── e2e ├── basic_test.go ├── ephemeral_test.go ├── image_pullsecret_test.go ├── multiple_zk_test.go ├── pod_deletion_test.go ├── rolling_restart_test.go ├── scale_test.go ├── suite_test.go └── upgrade_test.go /.codecov.yml: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright Pravega Authors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | codecov: 17 | require_ci_to_pass: yes 18 | max_report_age: off 19 | notify: 20 | wait_for_ci: yes 21 | 22 | coverage: 23 | status: 24 | project: 25 | default: 26 | threshold: 0.5% 27 | patch: 28 | default: 29 | target: 70% 30 | ignore: 31 | - "**/generated/**" 32 | - "test" 33 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | ### Description 2 | 3 | _(Describe the feature, bug, question, proposal that you are requesting)_ 4 | 5 | ### Importance 6 | 7 | _(Indicate the importance of this issue to you (blocker, must-have, should-have, nice-to-have))_ 8 | 9 | ### Location 10 | 11 | _(Where is the piece of code, package, or document affected by this issue?)_ 12 | 13 | ### Suggestions for an improvement 14 | 15 | _(How do you suggest to fix or proceed with this issue?)_ 16 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | ### Change log description 2 | 3 | _(2-3 concise points about the changes in this PR. When committing this PR, the committer is expected to copy the content of this section to the merge description box)_ 4 | 5 | ### Purpose of the change 6 | 7 | _(e.g., Fixes #666, Closes #1234)_ 8 | 9 | ### What the code does 10 | 11 | _(Detailed description of the code changes)_ 12 | 13 | ### How to verify it 14 | 15 | _(Steps to verify that the changes are effective)_ 16 | -------------------------------------------------------------------------------- /.github/workflows/ci.yaml: -------------------------------------------------------------------------------- 1 | # workflow name 2 | name: CI 3 | 4 | # on events 5 | on: 6 | push: 7 | branches: 8 | - master 9 | pull_request: 10 | branches: 11 | - master 12 | release: 13 | types: 14 | - created 15 | 16 | # jobs to run 17 | jobs: 18 | build: 19 | runs-on: ubuntu-latest 20 | steps: 21 | - name: Set up Go 1.21 22 | uses: actions/setup-go@v2 23 | with: 24 | go-version: "1.21" 25 | id: go 26 | - name: Set up Go for root 27 | run: | 28 | sudo ln -sf `which go` `sudo which go` || true 29 | sudo go version 30 | - name: Check out code into the Go module directory 31 | uses: actions/checkout@v2 32 | - name: get go version 33 | run: go version 34 | - name: Gofmt and License checks 35 | run: make check 36 | - name: unit tests 37 | run: make test 38 | - name: Codecov 39 | uses: codecov/codecov-action@v1.0.12 40 | - name: Set env 41 | run: | 42 | echo "KUBERNETES_VERSION=v1.23.1" >> $GITHUB_ENV 43 | echo "MINIKUBE_VERSION=v1.25.2" >> $GITHUB_ENV 44 | echo "KUBERNETES_CONFIG_FILE=$HOME/.kube/config" >> $GITHUB_ENV 45 | echo "CHANGE_MINIKUBE_NONE_USER=true" >> $GITHUB_ENV 46 | - name: minikube setup 47 | run: | 48 | curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/$KUBERNETES_VERSION/bin/linux/amd64/kubectl && chmod +x kubectl && sudo mv kubectl /usr/local/bin/ 49 | curl -Lo minikube https://storage.googleapis.com/minikube/releases/$MINIKUBE_VERSION/minikube-linux-amd64 && chmod +x minikube && sudo mv minikube /usr/local/bin/ 50 | sudo mount --make-rshared / 51 | sudo apt-get install -y conntrack 52 | CHANGE_MINIKUBE_NONE_USER=true 53 | export KUBERNETES_CONFIG_FILE=$HOME/.kube/config 54 | export KUBERNETES_CONFIG_FILE=$HOME/.kube/config;export CHANGE_MINIKUBE_NONE_USER=true;sudo minikube start --vm-driver=none --bootstrapper=kubeadm --kubernetes-version=$KUBERNETES_VERSION 55 | echo "minikube started, updating context" 56 | export KUBERNETES_CONFIG_FILE=$HOME/.kube/config;export CHANGE_MINIKUBE_NONE_USER=true; sudo minikube update-context 57 | echo "context is updated" 58 | #JSONPATH='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}{end}'; until kubectl get nodes -o jsonpath="$JSONPATH" 2>&1 | grep -q "Ready=True"; do sleep 1; done 59 | sleep 60 60 | sudo kubectl create clusterrolebinding add-on-cluster-admin --clusterrole=cluster-admin --serviceaccount=kube-system:default 61 | echo "created cluster role" 62 | sudo kubectl cluster-info 63 | echo "cluster info" 64 | sudo kubectl -n kube-system get pod -o wide 65 | sudo kubectl create secret docker-registry regcred --docker-server=https://index.docker.io/v1/ --docker-username="testanisha" --docker-password="123456789" --docker-email=testimage456@gmail.com 66 | - name: E2E 67 | run: sudo make test-e2e 68 | publish: 69 | name: Publish docker image 70 | runs-on: ubuntu-latest 71 | needs: build 72 | if: github.event_name == 'release' && github.event.action == 'created' 73 | steps: 74 | - name: Pushing docker images 75 | run: sudo make push 76 | - name: Uploading binary files 77 | uses: actions/upload-artifact@v2 78 | with: 79 | name: zookeeper-exporter 80 | path: bin/zookeeper-exporter* 81 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Temporary Build Files 2 | bin 3 | build/_output 4 | build/_test 5 | # Created by https://www.gitignore.io/api/go,vim,emacs,visualstudiocode 6 | # Intellij 7 | .idea 8 | 9 | # Vendored depdendencies 10 | vendor/* 11 | ### Emacs ### 12 | # -*- mode: gitignore; -*- 13 | *~ 14 | \#*\# 15 | /.emacs.desktop 16 | /.emacs.desktop.lock 17 | *.elc 18 | auto-save-list 19 | tramp 20 | .\#* 21 | # Org-mode 22 | .org-id-locations 23 | *_archive 24 | # flymake-mode 25 | *_flymake.* 26 | # eshell files 27 | /eshell/history 28 | /eshell/lastdir 29 | # elpa packages 30 | /elpa/ 31 | # reftex files 32 | *.rel 33 | # AUCTeX auto folder 34 | /auto/ 35 | # cask packages 36 | .cask/ 37 | dist/ 38 | # Flycheck 39 | flycheck_*.el 40 | # server auth directory 41 | /server/ 42 | # projectiles files 43 | .projectile 44 | projectile-bookmarks.eld 45 | # directory configuration 46 | .dir-locals.el 47 | # saveplace 48 | places 49 | # url cache 50 | url/cache/ 51 | # cedet 52 | ede-projects.el 53 | # smex 54 | smex-items 55 | # company-statistics 56 | company-statistics-cache.el 57 | # anaconda-mode 58 | anaconda-mode/ 59 | ### Go ### 60 | # Binaries for programs and plugins 61 | *.exe 62 | *.exe~ 63 | *.dll 64 | *.so 65 | *.dylib 66 | # Test binary, build with 'go test -c' 67 | *.test 68 | # Output of the go coverage tool, specifically when used with LiteIDE 69 | coverage.txt 70 | *.out 71 | ### Vim ### 72 | # swap 73 | .sw[a-p] 74 | .*.sw[a-p] 75 | # session 76 | Session.vim 77 | # temporary 78 | .netrwhist 79 | # auto-generated tag files 80 | tags 81 | ### VisualStudioCode ### 82 | .vscode/* 83 | .history 84 | # End of https://www.gitignore.io/api/go,vim,emacs,visualstudiocode 85 | *.iml 86 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to Zookeeper Operator 2 | 3 | Please check the [Contributing](https://github.com/pravega/zookeeper-operator/wiki/Contributing) wiki page. 4 | 5 | Happy hacking! 6 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | ARG DOCKER_REGISTRY 2 | ARG DISTROLESS_DOCKER_REGISTRY 3 | ARG ALPINE_VERSION=3.18 4 | FROM ${DOCKER_REGISTRY:+$DOCKER_REGISTRY/}golang:1.21-alpine${ALPINE_VERSION} as go-builder 5 | 6 | ARG PROJECT_NAME=zookeeper-operator 7 | ARG REPO_PATH=github.com/pravega/$PROJECT_NAME 8 | 9 | # Build version and commit should be passed in when performing docker build 10 | ARG VERSION=0.0.0-localdev 11 | ARG GIT_SHA=0000000 12 | 13 | WORKDIR /src 14 | COPY pkg ./pkg 15 | COPY cmd ./cmd 16 | # Copy the Go Modules manifests 17 | COPY go.mod go.mod 18 | COPY go.sum go.sum 19 | 20 | # Download all dependencies. 21 | RUN go mod download 22 | 23 | # Copy the go source 24 | COPY main.go main.go 25 | COPY api/ api/ 26 | COPY controllers/ controllers/ 27 | 28 | # Build 29 | RUN GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -o /src/${PROJECT_NAME} \ 30 | -ldflags "-X ${REPO_PATH}/pkg/version.Version=${VERSION} -X ${REPO_PATH}/pkg/version.GitSHA=${GIT_SHA}" main.go 31 | 32 | FROM ${DISTROLESS_DOCKER_REGISTRY:-gcr.io/}distroless/static-debian11:nonroot AS final 33 | 34 | ARG PROJECT_NAME=zookeeper-operator 35 | 36 | COPY --from=go-builder /src/${PROJECT_NAME} /usr/local/bin/${PROJECT_NAME} 37 | 38 | ENTRYPOINT ["/usr/local/bin/zookeeper-operator"] 39 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | SHELL=/bin/bash -o pipefail 10 | # Produce CRDs that work back to Kubernetes 1.11 (no version conversion) 11 | CRD_OPTIONS ?= "crd" 12 | 13 | PROJECT_NAME=zookeeper-operator 14 | EXPORTER_NAME=zookeeper-exporter 15 | APP_NAME=zookeeper 16 | REPO=pravega/$(PROJECT_NAME) 17 | TEST_REPO=testzkop/$(PROJECT_NAME) 18 | APP_REPO=pravega/$(APP_NAME) 19 | ALTREPO=emccorp/$(PROJECT_NAME) 20 | APP_ALTREPO=emccorp/$(APP_NAME) 21 | VERSION=$(shell git describe --always --tags --dirty | tr -d "v" | sed "s/\(.*\)-g`git rev-parse --short HEAD`/\1/") 22 | GIT_SHA=$(shell git rev-parse --short HEAD) 23 | TEST_IMAGE=$(TEST_REPO)-testimages:$(VERSION) 24 | DOCKER_TEST_PASS=testzkop@123 25 | DOCKER_TEST_USER=testzkop 26 | .PHONY: all build check clean test 27 | # Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) 28 | ifeq (,$(shell go env GOBIN)) 29 | GOBIN=$(shell go env GOPATH)/bin 30 | else 31 | GOBIN=$(shell go env GOBIN) 32 | endif 33 | 34 | # Install CRDs into a cluster 35 | install: manifests kustomize 36 | $(KUSTOMIZE) build config/crd | kubectl apply -f - 37 | 38 | # Uninstall CRDs from a cluster 39 | uninstall: manifests kustomize 40 | $(KUSTOMIZE) build config/crd | kubectl delete -f - 41 | 42 | crds: ## Generate CRDs 43 | - make controller-gen 44 | - $(CONTROLLER_GEN) crd paths=./api/... output:dir=./config/crd/bases schemapatch:manifests=./config/crd/bases 45 | 46 | 47 | # Deploy controller in the configured Kubernetes cluster in ~/.kube/config 48 | deploy: manifests kustomize 49 | cd config/manager && $(KUSTOMIZE) edit set image pravega/zookeeper-operator=$(TEST_IMAGE) 50 | $(KUSTOMIZE) build config/default | kubectl apply -f - 51 | 52 | 53 | # Deploy controller in the configured Kubernetes cluster in ~/.kube/config 54 | deploy-test: manifests kustomize 55 | cd config/test 56 | $(KUSTOMIZE) build config/test | kubectl apply -f - 57 | 58 | # Undeploy controller in the configured Kubernetes cluster in ~/.kube/config 59 | undeploy-test: manifests kustomize 60 | cd config/test 61 | $(KUSTOMIZE) build config/test | kubectl apply -f - 62 | 63 | # Undeploy controller in the configured Kubernetes cluster in ~/.kube/config 64 | undeploy: 65 | $(KUSTOMIZE) build config/default | kubectl delete -f - 66 | 67 | # Generate manifests e.g. CRD, RBAC etc. 68 | manifests: controller-gen 69 | $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases 70 | 71 | # Run go fmt against code 72 | fmt: 73 | go fmt ./... 74 | 75 | # Run go vet against code 76 | vet: 77 | go vet ./... 78 | 79 | ## Location to install dependencies to 80 | LOCALBIN ?= $(shell pwd)/bin 81 | $(LOCALBIN): 82 | mkdir -p $(LOCALBIN) 83 | ## Tool Binaries 84 | KUSTOMIZE ?= $(LOCALBIN)/kustomize 85 | CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen 86 | ## Tool Versions 87 | KUSTOMIZE_VERSION ?= v3.5.4 88 | CONTROLLER_TOOLS_VERSION ?= v0.9.0 89 | KUSTOMIZE_INSTALL_SCRIPT ?= "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" 90 | .PHONY: kustomize 91 | kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary. 92 | $(KUSTOMIZE): $(LOCALBIN) 93 | test -s $(LOCALBIN)/kustomize || { curl -s $(KUSTOMIZE_INSTALL_SCRIPT) | bash -s -- $(subst v,,$(KUSTOMIZE_VERSION)) $(LOCALBIN); } 94 | .PHONY: controller-gen 95 | controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary. 96 | $(CONTROLLER_GEN): $(LOCALBIN) 97 | test -s $(LOCALBIN)/controller-gen || GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-tools/cmd/controller-gen@$(CONTROLLER_TOOLS_VERSION) 98 | 99 | all: generate check build 100 | 101 | generate: 102 | $(CONTROLLER_GEN) object paths="./..." 103 | make manifests 104 | # sync crd generated to helm-chart 105 | echo '{{- if .Values.crd.create }}' > charts/zookeeper-operator/templates/zookeeper.pravega.io_zookeeperclusters_crd.yaml 106 | cat config/crd/bases/zookeeper.pravega.io_zookeeperclusters.yaml >> charts/zookeeper-operator/templates/zookeeper.pravega.io_zookeeperclusters_crd.yaml 107 | echo '{{- end }}' >> charts/zookeeper-operator/templates/zookeeper.pravega.io_zookeeperclusters_crd.yaml 108 | 109 | 110 | build: test build-go build-image 111 | 112 | build-go: 113 | CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build \ 114 | -ldflags "-X github.com/$(REPO)/pkg/version.Version=$(VERSION) -X github.com/$(REPO)/pkg/version.GitSHA=$(GIT_SHA)" \ 115 | -o bin/$(PROJECT_NAME)-linux-amd64 main.go 116 | CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build \ 117 | -ldflags "-X github.com/$(REPO)/pkg/version.Version=$(VERSION) -X github.com/$(REPO)/pkg/version.GitSHA=$(GIT_SHA)" \ 118 | -o bin/$(EXPORTER_NAME)-linux-amd64 cmd/exporter/main.go 119 | CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 go build \ 120 | -ldflags "-X github.com/$(REPO)/pkg/version.Version=$(VERSION) -X github.com/$(REPO)/pkg/version.GitSHA=$(GIT_SHA)" \ 121 | -o bin/$(PROJECT_NAME)-darwin-amd64 main.go 122 | CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 go build \ 123 | -ldflags "-X github.com/$(REPO)/pkg/version.Version=$(VERSION) -X github.com/$(REPO)/pkg/version.GitSHA=$(GIT_SHA)" \ 124 | -o bin/$(EXPORTER_NAME)-darwin-amd64 cmd/exporter/main.go 125 | CGO_ENABLED=0 GOOS=windows GOARCH=amd64 go build \ 126 | -ldflags "-X github.com/$(REPO)/pkg/version.Version=$(VERSION) -X github.com/$(REPO)/pkg/version.GitSHA=$(GIT_SHA)" \ 127 | -o bin/$(PROJECT_NAME)-windows-amd64.exe main.go 128 | CGO_ENABLED=0 GOOS=windows GOARCH=amd64 go build \ 129 | -ldflags "-X github.com/$(REPO)/pkg/version.Version=$(VERSION) -X github.com/$(REPO)/pkg/version.GitSHA=$(GIT_SHA)" \ 130 | -o bin/$(EXPORTER_NAME)-windows-amd64.exe cmd/exporter/main.go 131 | 132 | build-image: 133 | docker build --build-arg VERSION=$(VERSION) --build-arg DOCKER_REGISTRY=$(DOCKER_REGISTRY) --build-arg DISTROLESS_DOCKER_REGISTRY=$(DISTROLESS_DOCKER_REGISTRY) --build-arg GIT_SHA=$(GIT_SHA) -t $(REPO):$(VERSION) . 134 | docker tag $(REPO):$(VERSION) $(REPO):latest 135 | 136 | build-zk-image: 137 | 138 | docker build --build-arg VERSION=$(VERSION) --build-arg DOCKER_REGISTRY=$(DOCKER_REGISTRY) --build-arg GIT_SHA=$(GIT_SHA) -t $(APP_REPO):$(VERSION) ./docker 139 | docker tag $(APP_REPO):$(VERSION) $(APP_REPO):latest 140 | 141 | build-zk-image-swarm: 142 | docker build --build-arg VERSION=$(VERSION)-swarm --build-arg DOCKER_REGISTRY=$(DOCKER_REGISTRY) --build-arg GIT_SHA=$(GIT_SHA) \ 143 | -f ./docker/Dockerfile-swarm -t $(APP_REPO):$(VERSION)-swarm ./docker 144 | 145 | test: 146 | go test $$(go list ./... | grep -v /vendor/ | grep -v /test/e2e) -race -coverprofile=coverage.txt -covermode=atomic 147 | 148 | test-e2e: test-e2e-remote 149 | 150 | test-e2e-remote: 151 | make test-login 152 | docker build . -t $(TEST_IMAGE) 153 | docker push $(TEST_IMAGE) 154 | make deploy 155 | RUN_LOCAL=false go test -v -timeout 2h ./test/e2e... -args -ginkgo.v 156 | make undeploy 157 | 158 | test-e2e-local: 159 | make deploy-test 160 | RUN_LOCAL=true go test -v -timeout 2h ./test/e2e... -args -ginkgo.v 161 | make undeploy-test 162 | 163 | run-local: 164 | go run ./main.go 165 | 166 | login: 167 | @docker login -u "$(DOCKER_USER)" -p "$(DOCKER_PASS)" 168 | 169 | test-login: 170 | echo "$(DOCKER_TEST_PASS)" | docker login -u "$(DOCKER_TEST_USER)" --password-stdin 171 | 172 | push: build-image build-zk-image login 173 | docker push $(REPO):$(VERSION) 174 | docker push $(REPO):latest 175 | docker push $(APP_REPO):$(VERSION) 176 | docker push $(APP_REPO):latest 177 | docker tag $(REPO):$(VERSION) $(ALTREPO):$(VERSION) 178 | docker tag $(REPO):$(VERSION) $(ALTREPO):latest 179 | docker tag $(APP_REPO):$(VERSION) $(APP_ALTREPO):$(VERSION) 180 | docker tag $(APP_REPO):$(VERSION) $(APP_ALTREPO):latest 181 | docker push $(ALTREPO):$(VERSION) 182 | docker push $(ALTREPO):latest 183 | docker push $(APP_ALTREPO):$(VERSION) 184 | docker push $(APP_ALTREPO):latest 185 | 186 | clean: 187 | rm -f bin/$(PROJECT_NAME) 188 | 189 | check: check-format check-license 190 | 191 | check-format: 192 | ./scripts/check_format.sh 193 | 194 | check-license: 195 | ./scripts/check_license.sh 196 | -------------------------------------------------------------------------------- /PROJECT: -------------------------------------------------------------------------------- 1 | domain: zookeeper.pravega.io 2 | layout: 3 | - go.kubebuilder.io/v3 4 | repo: github.com/pravega/zookeeper-operator 5 | resources: 6 | - group: zookeeper.pravega.io 7 | kind: ZookeeperCluster 8 | version: v1beta1 9 | version: "3" 10 | plugins: 11 | manifests.sdk.operatorframework.io/v2: {} 12 | scorecard.sdk.operatorframework.io/v2: {} 13 | projectName: zookeeper-operator 14 | -------------------------------------------------------------------------------- /api/v1beta1/doc.go: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | */ 10 | 11 | // Package v1beta1 contains API Schema definitions for the zookeeper v1beta1 API 12 | // group 13 | // +k8s:deepcopy-gen=package,register 14 | // +groupName=zookeeper.pravega.io 15 | package v1beta1 16 | -------------------------------------------------------------------------------- /api/v1beta1/groupversion_info.go: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (&the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | */ 10 | 11 | // Package v1beta1 contains API Schema definitions for the zookeeper.pravega.io v1beta1 API group 12 | // +kubebuilder:object:generate=true 13 | // +groupName=zookeeper.pravega.io.zookeeper.pravega.io 14 | package v1beta1 15 | 16 | import ( 17 | "k8s.io/apimachinery/pkg/runtime/schema" 18 | "sigs.k8s.io/controller-runtime/pkg/scheme" 19 | ) 20 | 21 | var ( 22 | // GroupVersion is group version used to register these objects 23 | GroupVersion = schema.GroupVersion{Group: "zookeeper.pravega.io", Version: "v1beta1"} 24 | 25 | // SchemeBuilder is used to add go types to the GroupVersionKind scheme 26 | SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} 27 | 28 | // AddToScheme adds the types in this group-version to the given scheme. 29 | AddToScheme = SchemeBuilder.AddToScheme 30 | ) 31 | -------------------------------------------------------------------------------- /api/v1beta1/status.go: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | */ 10 | 11 | package v1beta1 12 | 13 | import ( 14 | "time" 15 | 16 | v1 "k8s.io/api/core/v1" 17 | ) 18 | 19 | type ClusterConditionType string 20 | 21 | const ( 22 | ClusterConditionPodsReady ClusterConditionType = "PodsReady" 23 | ClusterConditionUpgrading = "Upgrading" 24 | ClusterConditionError = "Error" 25 | 26 | // Reasons for cluster upgrading condition 27 | UpdatingZookeeperReason = "Updating Zookeeper" 28 | UpgradeErrorReason = "Upgrade Error" 29 | ) 30 | 31 | // ZookeeperClusterStatus defines the observed state of ZookeeperCluster 32 | type ZookeeperClusterStatus struct { 33 | // Members is the zookeeper members in the cluster 34 | Members MembersStatus `json:"members,omitempty"` 35 | 36 | // Replicas is the number of number of desired replicas in the cluster 37 | Replicas int32 `json:"replicas,omitempty"` 38 | 39 | // ReadyReplicas is the number of number of ready replicas in the cluster 40 | ReadyReplicas int32 `json:"readyReplicas,omitempty"` 41 | 42 | // InternalClientEndpoint is the internal client IP and port 43 | InternalClientEndpoint string `json:"internalClientEndpoint,omitempty"` 44 | 45 | // ExternalClientEndpoint is the internal client IP and port 46 | ExternalClientEndpoint string `json:"externalClientEndpoint,omitempty"` 47 | 48 | MetaRootCreated bool `json:"metaRootCreated,omitempty"` 49 | 50 | // CurrentVersion is the current cluster version 51 | CurrentVersion string `json:"currentVersion,omitempty"` 52 | 53 | TargetVersion string `json:"targetVersion,omitempty"` 54 | 55 | // Conditions list all the applied conditions 56 | Conditions []ClusterCondition `json:"conditions,omitempty"` 57 | } 58 | 59 | // MembersStatus is the status of the members of the cluster with both 60 | // ready and unready node membership lists 61 | type MembersStatus struct { 62 | //+nullable 63 | Ready []string `json:"ready,omitempty"` 64 | //+nullable 65 | Unready []string `json:"unready,omitempty"` 66 | } 67 | 68 | // ClusterCondition shows the current condition of a Zookeeper cluster. 69 | // Comply with k8s API conventions 70 | type ClusterCondition struct { 71 | // Type of Zookeeper cluster condition. 72 | Type ClusterConditionType `json:"type,omitempty"` 73 | 74 | // Status of the condition, one of True, False, Unknown. 75 | Status v1.ConditionStatus `json:"status,omitempty"` 76 | 77 | // The reason for the condition's last transition. 78 | Reason string `json:"reason,omitempty"` 79 | 80 | // A human readable message indicating details about the transition. 81 | Message string `json:"message,omitempty"` 82 | 83 | // The last time this condition was updated. 84 | LastUpdateTime string `json:"lastUpdateTime,omitempty"` 85 | 86 | // Last time the condition transitioned from one status to another. 87 | LastTransitionTime string `json:"lastTransitionTime,omitempty"` 88 | } 89 | 90 | func (zs *ZookeeperClusterStatus) Init() { 91 | // Initialise conditions 92 | conditionTypes := []ClusterConditionType{ 93 | ClusterConditionPodsReady, 94 | ClusterConditionUpgrading, 95 | ClusterConditionError, 96 | } 97 | for _, conditionType := range conditionTypes { 98 | if _, condition := zs.GetClusterCondition(conditionType); condition == nil { 99 | c := newClusterCondition(conditionType, v1.ConditionFalse, "", "") 100 | zs.setClusterCondition(*c) 101 | } 102 | } 103 | } 104 | 105 | func newClusterCondition(condType ClusterConditionType, status v1.ConditionStatus, reason, message string) *ClusterCondition { 106 | return &ClusterCondition{ 107 | Type: condType, 108 | Status: status, 109 | Reason: reason, 110 | Message: message, 111 | LastUpdateTime: "", 112 | LastTransitionTime: "", 113 | } 114 | } 115 | 116 | func (zs *ZookeeperClusterStatus) SetPodsReadyConditionTrue() { 117 | c := newClusterCondition(ClusterConditionPodsReady, v1.ConditionTrue, "", "") 118 | zs.setClusterCondition(*c) 119 | } 120 | 121 | func (zs *ZookeeperClusterStatus) SetPodsReadyConditionFalse() { 122 | c := newClusterCondition(ClusterConditionPodsReady, v1.ConditionFalse, "", "") 123 | zs.setClusterCondition(*c) 124 | } 125 | 126 | func (zs *ZookeeperClusterStatus) SetUpgradingConditionTrue(reason, message string) { 127 | c := newClusterCondition(ClusterConditionUpgrading, v1.ConditionTrue, reason, message) 128 | zs.setClusterCondition(*c) 129 | } 130 | 131 | func (zs *ZookeeperClusterStatus) SetUpgradingConditionFalse() { 132 | c := newClusterCondition(ClusterConditionUpgrading, v1.ConditionFalse, "", "") 133 | zs.setClusterCondition(*c) 134 | } 135 | 136 | func (zs *ZookeeperClusterStatus) SetErrorConditionTrue(reason, message string) { 137 | c := newClusterCondition(ClusterConditionError, v1.ConditionTrue, reason, message) 138 | zs.setClusterCondition(*c) 139 | } 140 | 141 | func (zs *ZookeeperClusterStatus) SetErrorConditionFalse() { 142 | c := newClusterCondition(ClusterConditionError, v1.ConditionFalse, "", "") 143 | zs.setClusterCondition(*c) 144 | } 145 | 146 | func (zs *ZookeeperClusterStatus) GetClusterCondition(t ClusterConditionType) (int, *ClusterCondition) { 147 | for i, c := range zs.Conditions { 148 | if t == c.Type { 149 | return i, &c 150 | } 151 | } 152 | return -1, nil 153 | } 154 | 155 | func (zs *ZookeeperClusterStatus) setClusterCondition(newCondition ClusterCondition) { 156 | now := time.Now().Format(time.RFC3339) 157 | position, existingCondition := zs.GetClusterCondition(newCondition.Type) 158 | 159 | if existingCondition == nil { 160 | zs.Conditions = append(zs.Conditions, newCondition) 161 | return 162 | } 163 | 164 | if existingCondition.Status != newCondition.Status { 165 | existingCondition.Status = newCondition.Status 166 | existingCondition.LastTransitionTime = now 167 | existingCondition.LastUpdateTime = now 168 | } 169 | 170 | if existingCondition.Reason != newCondition.Reason || existingCondition.Message != newCondition.Message { 171 | existingCondition.Reason = newCondition.Reason 172 | existingCondition.Message = newCondition.Message 173 | existingCondition.LastUpdateTime = now 174 | } 175 | 176 | zs.Conditions[position] = *existingCondition 177 | } 178 | 179 | func (zs *ZookeeperClusterStatus) IsClusterInUpgradeFailedState() bool { 180 | _, errorCondition := zs.GetClusterCondition(ClusterConditionError) 181 | if errorCondition == nil { 182 | return false 183 | } 184 | if errorCondition.Status == v1.ConditionTrue && errorCondition.Reason == "UpgradeFailed" { 185 | return true 186 | } 187 | return false 188 | } 189 | 190 | func (zs *ZookeeperClusterStatus) IsClusterInUpgradingState() bool { 191 | _, upgradeCondition := zs.GetClusterCondition(ClusterConditionUpgrading) 192 | if upgradeCondition == nil { 193 | return false 194 | } 195 | if upgradeCondition.Status == v1.ConditionTrue { 196 | return true 197 | } 198 | return false 199 | } 200 | 201 | func (zs *ZookeeperClusterStatus) IsClusterInReadyState() bool { 202 | _, readyCondition := zs.GetClusterCondition(ClusterConditionPodsReady) 203 | if readyCondition != nil && readyCondition.Status == v1.ConditionTrue { 204 | return true 205 | } 206 | return false 207 | } 208 | 209 | func (zs *ZookeeperClusterStatus) UpdateProgress(reason, updatedReplicas string) { 210 | if zs.IsClusterInUpgradingState() { 211 | // Set the upgrade condition reason to be UpgradingZookeeperReason, message to be the upgradedReplicas 212 | zs.SetUpgradingConditionTrue(reason, updatedReplicas) 213 | } 214 | } 215 | 216 | func (zs *ZookeeperClusterStatus) GetLastCondition() (lastCondition *ClusterCondition) { 217 | if zs.IsClusterInUpgradingState() { 218 | _, lastCondition := zs.GetClusterCondition(ClusterConditionUpgrading) 219 | return lastCondition 220 | } 221 | // nothing to do if we are not upgrading 222 | return nil 223 | } 224 | -------------------------------------------------------------------------------- /api/v1beta1/status_test.go: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | */ 10 | 11 | package v1beta1_test 12 | 13 | import ( 14 | corev1 "k8s.io/api/core/v1" 15 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 16 | 17 | . "github.com/onsi/ginkgo" 18 | . "github.com/onsi/gomega" 19 | "github.com/pravega/zookeeper-operator/api/v1beta1" 20 | ) 21 | 22 | var _ = Describe("ZookeeperCluster Status", func() { 23 | 24 | var z v1beta1.ZookeeperCluster 25 | 26 | Context("Checking when zookeepercluster has nil status conditions", func() { 27 | var isClusterUpgradingState, isClusterInUpgradeFailedState bool 28 | BeforeEach(func() { 29 | isClusterUpgradingState = z.Status.IsClusterInUpgradingState() 30 | isClusterInUpgradeFailedState = z.Status.IsClusterInUpgradeFailedState() 31 | }) 32 | It("should have set isclusterupgrading to false", func() { 33 | Ω(isClusterUpgradingState).To(Equal(false)) 34 | }) 35 | It("should have set isClusterInUpgradeFailedState to false", func() { 36 | Ω(isClusterInUpgradeFailedState).To(Equal(false)) 37 | }) 38 | }) 39 | 40 | Context("checking for default values", func() { 41 | BeforeEach(func() { 42 | z.Status.Init() 43 | }) 44 | It("should contains pods ready condition and it is false status", func() { 45 | _, condition := z.Status.GetClusterCondition(v1beta1.ClusterConditionPodsReady) 46 | Ω(condition.Status).To(Equal(corev1.ConditionFalse)) 47 | }) 48 | It("should contains upgrade ready condition and it is false status", func() { 49 | _, condition := z.Status.GetClusterCondition(v1beta1.ClusterConditionUpgrading) 50 | Ω(condition.Status).To(Equal(corev1.ConditionFalse)) 51 | }) 52 | It("should contains pods ready condition and it is false status", func() { 53 | _, condition := z.Status.GetClusterCondition(v1beta1.ClusterConditionError) 54 | Ω(condition.Status).To(Equal(corev1.ConditionFalse)) 55 | }) 56 | }) 57 | 58 | BeforeEach(func() { 59 | z = v1beta1.ZookeeperCluster{ 60 | ObjectMeta: metav1.ObjectMeta{ 61 | Name: "default", 62 | }, 63 | } 64 | }) 65 | 66 | Context("manually set pods ready condition to be true", func() { 67 | BeforeEach(func() { 68 | condition := v1beta1.ClusterCondition{ 69 | Type: v1beta1.ClusterConditionPodsReady, 70 | Status: corev1.ConditionTrue, 71 | Reason: "", 72 | Message: "", 73 | LastUpdateTime: "", 74 | LastTransitionTime: "", 75 | } 76 | z.Status.Conditions = append(z.Status.Conditions, condition) 77 | }) 78 | 79 | It("should contains pods ready condition and it is true status", func() { 80 | _, condition := z.Status.GetClusterCondition(v1beta1.ClusterConditionPodsReady) 81 | Ω(condition.Status).To(Equal(corev1.ConditionTrue)) 82 | }) 83 | }) 84 | 85 | Context("manually set pods upgrade condition to be true", func() { 86 | BeforeEach(func() { 87 | condition := v1beta1.ClusterCondition{ 88 | Type: v1beta1.ClusterConditionUpgrading, 89 | Status: corev1.ConditionTrue, 90 | Reason: "", 91 | Message: "", 92 | LastUpdateTime: "", 93 | LastTransitionTime: "", 94 | } 95 | z.Status.Conditions = append(z.Status.Conditions, condition) 96 | z.Status.UpdateProgress(" ", "3") 97 | }) 98 | 99 | It("should contains pods upgrade condition and it is true status", func() { 100 | _, condition := z.Status.GetClusterCondition(v1beta1.ClusterConditionUpgrading) 101 | Ω(condition.Status).To(Equal(corev1.ConditionTrue)) 102 | }) 103 | 104 | It("should set the message to 3", func() { 105 | _, condition := z.Status.GetClusterCondition(v1beta1.ClusterConditionUpgrading) 106 | Ω(condition.Message).To(Equal("3")) 107 | }) 108 | }) 109 | Context("manually set pods Error condition to be true", func() { 110 | BeforeEach(func() { 111 | condition := v1beta1.ClusterCondition{ 112 | Type: v1beta1.ClusterConditionError, 113 | Status: corev1.ConditionTrue, 114 | Reason: "", 115 | Message: "", 116 | LastUpdateTime: "", 117 | LastTransitionTime: "", 118 | } 119 | z.Status.Conditions = append(z.Status.Conditions, condition) 120 | }) 121 | 122 | It("should contains pods upgrade condition and it is true status", func() { 123 | _, condition := z.Status.GetClusterCondition(v1beta1.ClusterConditionError) 124 | Ω(condition.Status).To(Equal(corev1.ConditionTrue)) 125 | }) 126 | }) 127 | 128 | Context("set conditions for pods ready", func() { 129 | Context("set pods ready condition to be true", func() { 130 | BeforeEach(func() { 131 | z.Status.SetPodsReadyConditionFalse() 132 | z.Status.SetPodsReadyConditionTrue() 133 | }) 134 | It("should have pods ready condition with true status", func() { 135 | _, condition := z.Status.GetClusterCondition(v1beta1.ClusterConditionPodsReady) 136 | Ω(condition.Status).To(Equal(corev1.ConditionTrue)) 137 | }) 138 | It("should have pods ready condition with true status using function", func() { 139 | Ω(z.Status.IsClusterInReadyState()).To(Equal(true)) 140 | }) 141 | }) 142 | 143 | Context("set pod ready condition to be false", func() { 144 | BeforeEach(func() { 145 | z.Status.SetPodsReadyConditionTrue() 146 | z.Status.SetPodsReadyConditionFalse() 147 | }) 148 | 149 | It("should have ready condition with false status", func() { 150 | _, condition := z.Status.GetClusterCondition(v1beta1.ClusterConditionPodsReady) 151 | Ω(condition.Status).To(Equal(corev1.ConditionFalse)) 152 | }) 153 | 154 | It("should have ready condition with false status using function", func() { 155 | Ω(z.Status.IsClusterInReadyState()).To(Equal(false)) 156 | }) 157 | 158 | It("should have updated timestamps", func() { 159 | _, condition := z.Status.GetClusterCondition(v1beta1.ClusterConditionPodsReady) 160 | //check the timestamps 161 | Ω(condition.LastUpdateTime).NotTo(Equal("")) 162 | Ω(condition.LastTransitionTime).NotTo(Equal("")) 163 | }) 164 | }) 165 | }) 166 | 167 | Context("set conditions for upgrade", func() { 168 | Context("set pods upgrade condition to be true", func() { 169 | BeforeEach(func() { 170 | z.Status.SetUpgradingConditionFalse() 171 | z.Status.SetUpgradingConditionTrue(" ", " ") 172 | }) 173 | It("should have pods upgrade condition with true status", func() { 174 | _, condition := z.Status.GetClusterCondition(v1beta1.ClusterConditionUpgrading) 175 | Ω(condition.Status).To(Equal(corev1.ConditionTrue)) 176 | }) 177 | It("should have pods upgrade condition with true status using function", func() { 178 | Ω(z.Status.IsClusterInUpgradingState()).To(Equal(true)) 179 | }) 180 | It("Checking GetlastCondition function and It should return UpgradeCondition as cluster in Upgrading state", func() { 181 | condition := z.Status.GetLastCondition() 182 | Ω(string(condition.Type)).To(Equal(v1beta1.ClusterConditionUpgrading)) 183 | }) 184 | }) 185 | 186 | Context("set pod upgrade condition to be false", func() { 187 | BeforeEach(func() { 188 | z.Status.SetUpgradingConditionTrue(" ", " ") 189 | z.Status.SetUpgradingConditionFalse() 190 | }) 191 | 192 | It("should have upgrade condition with false status", func() { 193 | _, condition := z.Status.GetClusterCondition(v1beta1.ClusterConditionUpgrading) 194 | Ω(condition.Status).To(Equal(corev1.ConditionFalse)) 195 | }) 196 | 197 | It("should have upgrade condition with false status using function", func() { 198 | Ω(z.Status.IsClusterInUpgradingState()).To(Equal(false)) 199 | }) 200 | 201 | It("Checking GetlastCondition function and It should return nil as not in Upgrading state", func() { 202 | condition := z.Status.GetLastCondition() 203 | Ω(condition).To(BeNil()) 204 | }) 205 | 206 | It("should have updated timestamps", func() { 207 | _, condition := z.Status.GetClusterCondition(v1beta1.ClusterConditionUpgrading) 208 | //check the timestamps 209 | Ω(condition.LastUpdateTime).NotTo(Equal("")) 210 | Ω(condition.LastTransitionTime).NotTo(Equal("")) 211 | }) 212 | }) 213 | }) 214 | 215 | Context("set conditions for Error", func() { 216 | Context("set pods Error condition to be true", func() { 217 | BeforeEach(func() { 218 | z.Status.SetErrorConditionFalse() 219 | z.Status.SetErrorConditionTrue("UpgradeFailed", " ") 220 | }) 221 | It("should have pods Error condition with true status using function", func() { 222 | Ω(z.Status.IsClusterInUpgradeFailedState()).To(Equal(true)) 223 | }) 224 | It("should have pods Error condition with true status", func() { 225 | _, condition := z.Status.GetClusterCondition(v1beta1.ClusterConditionError) 226 | Ω(condition.Status).To(Equal(corev1.ConditionTrue)) 227 | 228 | }) 229 | }) 230 | 231 | Context("set pod Error condition to be false", func() { 232 | BeforeEach(func() { 233 | z.Status.SetErrorConditionTrue("UpgradeFailed", " ") 234 | z.Status.SetErrorConditionFalse() 235 | }) 236 | 237 | It("should have Error condition with false status", func() { 238 | _, condition := z.Status.GetClusterCondition(v1beta1.ClusterConditionError) 239 | Ω(condition.Status).To(Equal(corev1.ConditionFalse)) 240 | }) 241 | 242 | It("should have Error condition with false status using function", func() { 243 | Ω(z.Status.IsClusterInUpgradeFailedState()).To(Equal(false)) 244 | }) 245 | 246 | It("should have updated timestamps", func() { 247 | _, condition := z.Status.GetClusterCondition(v1beta1.ClusterConditionError) 248 | //check the timestamps 249 | Ω(condition.LastUpdateTime).NotTo(Equal("")) 250 | Ω(condition.LastTransitionTime).NotTo(Equal("")) 251 | }) 252 | }) 253 | }) 254 | }) 255 | -------------------------------------------------------------------------------- /api/v1beta1/v1beta1_suite_test.go: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2021 Dell Inc., or its subsidiaries. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | */ 10 | 11 | package v1beta1 12 | 13 | import ( 14 | "testing" 15 | 16 | . "github.com/onsi/ginkgo" 17 | . "github.com/onsi/gomega" 18 | ) 19 | 20 | func TestZookeeperAPIs(t *testing.T) { 21 | RegisterFailHandler(Fail) 22 | RunSpecs(t, "ZookeeperCluster API Tests") 23 | } 24 | -------------------------------------------------------------------------------- /build/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.8 2 | 3 | RUN apk upgrade --update --no-cache 4 | 5 | USER nobody 6 | 7 | ADD build/_output/bin/zookeeper-operator /usr/local/bin/zookeeper-operator 8 | -------------------------------------------------------------------------------- /charts/zookeeper-operator/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: zookeeper-operator 3 | description: Zookeeper Operator Helm chart for Kubernetes 4 | version: 0.2.15 5 | appVersion: 0.2.15 6 | keywords: 7 | - zookeeper 8 | - storage 9 | home: https://github.com/pravega/zookeeper-operator 10 | icon: https://zookeeper.apache.org/images/zookeeper_small.gif 11 | -------------------------------------------------------------------------------- /charts/zookeeper-operator/README.md: -------------------------------------------------------------------------------- 1 | # Zookeeper Operator Helm Chart 2 | 3 | Installs [Zookeeper Operator](https://github.com/pravega/zookeeper-operator) to create/configure/manage Zookeeper clusters atop Kubernetes. 4 | 5 | ## Introduction 6 | 7 | This chart bootstraps a [Zookeeper Operator](https://github.com/pravega/zookeeper-operator) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. 8 | 9 | ## Prerequisites 10 | - Kubernetes 1.15+ with Beta APIs 11 | - Helm 3.2.1+ 12 | 13 | ## Installing the Chart 14 | 15 | To install the zookeeper-operator chart, use the following commands: 16 | 17 | ``` 18 | $ helm repo add pravega https://charts.pravega.io 19 | $ helm repo update 20 | $ helm install [RELEASE_NAME] pravega/zookeeper-operator --version=[VERSION] 21 | ``` 22 | - **[RELEASE_NAME]** is the release name for the zookeeper-operator chart. 23 | - **[DEPLOYMENT_NAME]** is the name of the zookeeper-operator deployment so created. (If [RELEASE_NAME] contains the string `zookeeper-operator`, `[DEPLOYMENT_NAME] = [RELEASE_NAME]`, else `[DEPLOYMENT_NAME] = [RELEASE_NAME]-zookeeper-operator`. The [DEPLOYMENT_NAME] can however be overridden by providing `--set fullnameOverride=[DEPLOYMENT_NAME]` along with the helm install command) 24 | - **[VERSION]** can be any stable release version for zookeeper-operator from 0.2.8 onwards. 25 | 26 | This command deploys a zookeeper-operator on the Kubernetes cluster in its default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. 27 | 28 | ## Uninstalling the Chart 29 | 30 | To uninstall/delete the zookeeper-operator chart, use the following command: 31 | 32 | ``` 33 | $ helm uninstall [RELEASE_NAME] 34 | ``` 35 | 36 | The command removes all the Kubernetes components associated with the chart and deletes the release. 37 | 38 | ## Configuration 39 | 40 | The following table lists the configurable parameters of the zookeeper-operator chart and their default values. 41 | 42 | | Parameter | Description | Default | 43 | | ----- | ----------- | ------ | 44 | | `additionalEnv` | Additional Environment Variables | `[]` | 45 | | `additionalSidecars` | Additional Sidecars Configuration | `[]` | 46 | | `additionalVolumes` | Additional volumes required for sidecars | `[]` | 47 | | `affinity` | Specifies scheduling constraints on pods | `{}` | 48 | | `annotations` | Operator pod annotations | `{}` | 49 | | `crd.create` | Create zookeeper CRD | `true` | 50 | | `disableFinalizer` | Disable finalizer for zookeeper clusters, PVCs clean-up will be skipped.| `false` | 51 | | `global.imagePullSecrets` | Lists of secrets to use to pull zookeeper-operator image from a private registry | `[]` | 52 | | `hooks.backoffLimit` | backoffLimit for batch jobs | `10` | 53 | | `hooks.delete` | Create pre-delete hook which ensures that the operator cannot be deleted until the zookeeper cluster custom resources have been cleaned up | `true` | 54 | | `hooks.image.repository` | Image repository for batch jobs | `"lachlanevenson/k8s-kubectl"` | 55 | | `hooks.image.tag` | Image tag for batch jobs | `"v1.16.10"` | 56 | | `image.pullPolicy` | Image pull policy | `IfNotPresent` | 57 | | `image.repository` | Image repository | `pravega/zookeeper-operator` | 58 | | `image.tag` | Image tag | `0.2.15` | 59 | | `labels` | Operator pod labels | `{}` | 60 | | `nodeSelector` | Map of key-value pairs to be present as labels in the node in which the pod should run | `{}` | 61 | | `rbac.create` | Create RBAC resources | `true` | 62 | | `resources` | Specifies resource requirements for the container | `{}` | 63 | | `serviceAccount.create` | Create service account | `true` | 64 | | `serviceAccount.name` | Name for the service account | `zookeeper-operator` | 65 | | `tolerations` | Specifies the pod's tolerations | `[]` | 66 | | `watchNamespace` | Namespaces to be watched | `""` | 67 | -------------------------------------------------------------------------------- /charts/zookeeper-operator/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "zookeeper-operator.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | If release name contains chart name it will be used as a full name. 13 | */}} 14 | {{- define "zookeeper-operator.fullname" -}} 15 | {{- if .Values.fullnameOverride -}} 16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} 17 | {{- else -}} 18 | {{- $name := default .Chart.Name .Values.nameOverride -}} 19 | {{- if contains $name .Release.Name -}} 20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}} 21 | {{- else -}} 22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 23 | {{- end -}} 24 | {{- end -}} 25 | {{- end -}} 26 | 27 | {{/* 28 | Common labels 29 | */}} 30 | {{- define "zookeeper-operator.commonLabels" -}} 31 | app.kubernetes.io/name: {{ include "zookeeper-operator.name" . }} 32 | {{- if .Chart.AppVersion }} 33 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} 34 | {{- end }} 35 | app.kubernetes.io/managed-by: {{ .Release.Service }} 36 | helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" 37 | {{- end -}} 38 | 39 | {{/* 40 | Default sidecar template 41 | */}} 42 | {{- define "chart.additionalSidecars"}} 43 | {{ toYaml .Values.additionalSidecars }} 44 | {{- end}} 45 | 46 | {{/* 47 | Default volume template 48 | */}} 49 | {{- define "chart.additionalVolumes"}} 50 | {{ toYaml .Values.additionalVolumes }} 51 | {{- end}} 52 | -------------------------------------------------------------------------------- /charts/zookeeper-operator/templates/clusterrole.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.rbac.create }} 2 | kind: ClusterRole 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: {{ template "zookeeper-operator.fullname" . }} 6 | labels: 7 | {{ include "zookeeper-operator.commonLabels" . | indent 4 }} 8 | rules: 9 | - apiGroups: 10 | - zookeeper.pravega.io 11 | resources: 12 | - "*" 13 | verbs: 14 | - "*" 15 | - apiGroups: 16 | - "" 17 | resources: 18 | - nodes 19 | - pods 20 | - services 21 | - endpoints 22 | - persistentvolumeclaims 23 | - events 24 | - configmaps 25 | - secrets 26 | - serviceaccounts 27 | verbs: 28 | - "*" 29 | - apiGroups: 30 | - apps 31 | resources: 32 | - deployments 33 | - daemonsets 34 | - replicasets 35 | - statefulsets 36 | verbs: 37 | - "*" 38 | - apiGroups: 39 | - policy 40 | resources: 41 | - poddisruptionbudgets 42 | verbs: 43 | - "*" 44 | {{- end }} 45 | -------------------------------------------------------------------------------- /charts/zookeeper-operator/templates/clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.rbac.create }} 2 | kind: ClusterRoleBinding 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: {{ template "zookeeper-operator.fullname" . }} 6 | labels: 7 | {{ include "zookeeper-operator.commonLabels" . | indent 4 }} 8 | subjects: 9 | - kind: ServiceAccount 10 | name: {{ .Values.serviceAccount.name }} 11 | namespace: {{ .Release.Namespace }} 12 | roleRef: 13 | kind: ClusterRole 14 | name: {{ template "zookeeper-operator.fullname" . }} 15 | apiGroup: rbac.authorization.k8s.io 16 | {{- end }} 17 | -------------------------------------------------------------------------------- /charts/zookeeper-operator/templates/operator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: {{ template "zookeeper-operator.fullname" . }} 5 | namespace: {{ .Release.Namespace }} 6 | labels: 7 | {{ include "zookeeper-operator.commonLabels" . | indent 4 }} 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | name: {{ template "zookeeper-operator.fullname" . }} 13 | template: 14 | metadata: 15 | labels: 16 | name: {{ template "zookeeper-operator.fullname" . }} 17 | component: zookeeper-operator 18 | {{- if .Values.labels }} 19 | {{ toYaml .Values.labels | indent 8 }} 20 | {{- end }} 21 | {{- if .Values.annotations }} 22 | annotations: 23 | {{ toYaml .Values.annotations | indent 8 }} 24 | {{- end }} 25 | spec: 26 | serviceAccountName: {{ .Values.serviceAccount.name }} 27 | {{- if .Values.additionalVolumes }} 28 | volumes: 29 | {{- include "chart.additionalVolumes" . | indent 6 }} 30 | {{- end }} 31 | containers: 32 | - name: {{ template "zookeeper-operator.fullname" . }} 33 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" 34 | imagePullPolicy: {{ .Values.image.pullPolicy }} 35 | ports: 36 | - containerPort: {{ int .Values.metricsPort }} 37 | name: metrics 38 | command: 39 | - zookeeper-operator 40 | args: 41 | - -metrics-bind-address={{ .Values.metricsBindAddress }}:{{ int .Values.metricsPort }} 42 | {{- if .Values.disableFinalizer }} 43 | - -disableFinalizer 44 | {{- end }} 45 | env: 46 | - name: WATCH_NAMESPACE 47 | value: "{{ .Values.watchNamespace }}" 48 | - name: POD_NAME 49 | valueFrom: 50 | fieldRef: 51 | fieldPath: metadata.name 52 | - name: OPERATOR_NAME 53 | value: {{ template "zookeeper-operator.fullname" . }} 54 | {{- if .Values.additionalEnv }} 55 | {{ toYaml .Values.additionalEnv | indent 8 }} 56 | {{- end }} 57 | {{- if .Values.resources }} 58 | resources: 59 | {{ toYaml .Values.resources | indent 10 }} 60 | {{- end }} 61 | {{- if .Values.additionalSidecars }} 62 | {{- include "chart.additionalSidecars" . | indent 6 }} 63 | {{- end }} 64 | {{- if .Values.nodeSelector }} 65 | nodeSelector: 66 | {{ toYaml .Values.nodeSelector | indent 8 }} 67 | {{- end }} 68 | {{- if .Values.affinity }} 69 | affinity: 70 | {{ toYaml .Values.affinity | indent 8 }} 71 | {{- end }} 72 | {{- if .Values.tolerations }} 73 | tolerations: 74 | {{ toYaml .Values.tolerations | indent 8 }} 75 | {{- end }} 76 | -------------------------------------------------------------------------------- /charts/zookeeper-operator/templates/post-install-upgrade-hooks.yaml: -------------------------------------------------------------------------------- 1 | kind: Role 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: {{ template "zookeeper-operator.fullname" . }}-post-install-upgrade 5 | namespace: {{ .Release.Namespace }} 6 | annotations: 7 | "helm.sh/hook": post-install, post-upgrade 8 | "helm.sh/hook-weight": "1" 9 | "helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation, hook-failed 10 | rules: 11 | - apiGroups: 12 | - zookeeper.pravega.io 13 | resources: 14 | - "*" 15 | verbs: 16 | - get 17 | - apiGroups: 18 | - extensions 19 | resources: 20 | - "deployments" 21 | verbs: 22 | - get 23 | --- 24 | 25 | kind: RoleBinding 26 | apiVersion: rbac.authorization.k8s.io/v1 27 | metadata: 28 | name: {{ template "zookeeper-operator.fullname" . }}-post-install-upgrade 29 | namespace: {{ .Release.Namespace }} 30 | annotations: 31 | "helm.sh/hook": post-install, post-upgrade 32 | "helm.sh/hook-weight": "1" 33 | "helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation, hook-failed 34 | subjects: 35 | - kind: ServiceAccount 36 | name: {{ template "zookeeper-operator.fullname" . }}-post-install-upgrade 37 | namespace: {{.Release.Namespace}} 38 | roleRef: 39 | kind: Role 40 | name: {{ template "zookeeper-operator.fullname" . }}-post-install-upgrade 41 | apiGroup: rbac.authorization.k8s.io 42 | 43 | --- 44 | 45 | apiVersion: v1 46 | kind: ServiceAccount 47 | metadata: 48 | name: {{ template "zookeeper-operator.fullname" . }}-post-install-upgrade 49 | namespace: {{ .Release.Namespace }} 50 | annotations: 51 | "helm.sh/hook": post-install, post-upgrade 52 | "helm.sh/hook-weight": "1" 53 | "helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation, hook-failed 54 | {{- if or .Values.global.imagePullSecrets .Values.hooks.serviceAccount.imagePullSecrets }} 55 | imagePullSecrets: 56 | {{- range (default .Values.global.imagePullSecrets .Values.hooks.serviceAccount.imagePullSecrets) }} 57 | - name: {{ . }} 58 | {{- end }} 59 | {{- end }} 60 | 61 | --- 62 | 63 | apiVersion: v1 64 | kind: ConfigMap 65 | metadata: 66 | name: {{ template "zookeeper-operator.fullname" . }}-post-install-upgrade 67 | namespace: {{ .Release.Namespace }} 68 | annotations: 69 | "helm.sh/hook": post-install, post-upgrade 70 | "helm.sh/hook-weight": "1" 71 | "helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation, hook-failed 72 | data: 73 | validations.sh: | 74 | #!/bin/sh 75 | set -e 76 | sleep 30 77 | 78 | if [ -z "$(kubectl api-resources | grep ZookeeperCluster)" ]; then 79 | exit 1 80 | fi 81 | --- 82 | 83 | apiVersion: batch/v1 84 | kind: Job 85 | metadata: 86 | name: {{ template "zookeeper-operator.fullname" . }}-post-install-upgrade 87 | namespace: {{ .Release.Namespace }} 88 | annotations: 89 | "helm.sh/hook": post-install, post-upgrade 90 | "helm.sh/hook-weight": "2" 91 | "helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation, hook-failed 92 | spec: 93 | backoffLimit: {{ .Values.hooks.backoffLimit }} 94 | template: 95 | metadata: 96 | name: {{ template "zookeeper-operator.fullname" . }}-post-install-upgrade 97 | spec: 98 | serviceAccountName: {{ template "zookeeper-operator.fullname" . }}-post-install-upgrade 99 | restartPolicy: Never 100 | containers: 101 | - name: post-install-upgrade-job 102 | image: "{{ .Values.hooks.image.repository }}:{{ .Values.hooks.image.tag }}" 103 | {{- if .Values.hooks.securityContext }} 104 | securityContext: 105 | {{ toYaml .Values.hooks.securityContext | indent 10 }} 106 | {{- end }} 107 | command: 108 | - /scripts/validations.sh 109 | volumeMounts: 110 | - name: sh 111 | mountPath: /scripts 112 | readOnly: true 113 | volumes: 114 | - name: sh 115 | configMap: 116 | name: {{ template "zookeeper-operator.fullname" . }}-post-install-upgrade 117 | defaultMode: 0555 118 | {{- if .Values.nodeSelector }} 119 | nodeSelector: 120 | {{ toYaml .Values.nodeSelector | indent 8 }} 121 | {{- end }} 122 | {{- if .Values.affinity }} 123 | affinity: 124 | {{ toYaml .Values.affinity | indent 8 }} 125 | {{- end }} 126 | {{- if .Values.tolerations }} 127 | tolerations: 128 | {{ toYaml .Values.tolerations | indent 8 }} 129 | {{- end }} 130 | -------------------------------------------------------------------------------- /charts/zookeeper-operator/templates/pre-delete-hooks.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.hooks.delete }} 2 | kind: ClusterRole 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: {{ template "zookeeper-operator.fullname" . }}-pre-delete 6 | annotations: 7 | "helm.sh/hook": pre-delete 8 | "helm.sh/hook-weight": "1" 9 | "helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation, hook-failed 10 | rules: 11 | - apiGroups: 12 | - zookeeper.pravega.io 13 | resources: 14 | - "*" 15 | verbs: 16 | - get 17 | - list 18 | --- 19 | 20 | kind: ClusterRoleBinding 21 | apiVersion: rbac.authorization.k8s.io/v1 22 | metadata: 23 | name: {{ template "zookeeper-operator.fullname" . }}-pre-delete 24 | annotations: 25 | "helm.sh/hook": pre-delete 26 | "helm.sh/hook-weight": "1" 27 | "helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation, hook-failed 28 | subjects: 29 | - kind: ServiceAccount 30 | name: {{ template "zookeeper-operator.fullname" . }}-pre-delete 31 | namespace: {{ .Release.Namespace }} 32 | roleRef: 33 | kind: ClusterRole 34 | name: {{ template "zookeeper-operator.fullname" . }}-pre-delete 35 | apiGroup: rbac.authorization.k8s.io 36 | 37 | --- 38 | 39 | apiVersion: v1 40 | kind: ServiceAccount 41 | metadata: 42 | name: {{ template "zookeeper-operator.fullname" . }}-pre-delete 43 | namespace: {{ .Release.Namespace }} 44 | annotations: 45 | "helm.sh/hook": pre-delete 46 | "helm.sh/hook-weight": "1" 47 | "helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation, hook-failed 48 | {{- if or .Values.global.imagePullSecrets .Values.hooks.serviceAccount.imagePullSecrets }} 49 | imagePullSecrets: 50 | {{- range (default .Values.global.imagePullSecrets .Values.hooks.serviceAccount.imagePullSecrets) }} 51 | - name: {{ . }} 52 | {{- end }} 53 | {{- end }} 54 | 55 | --- 56 | 57 | apiVersion: v1 58 | kind: ConfigMap 59 | metadata: 60 | name: {{ template "zookeeper-operator.fullname" . }}-pre-delete 61 | namespace: {{ .Release.Namespace }} 62 | annotations: 63 | "helm.sh/hook": pre-delete 64 | "helm.sh/hook-weight": "1" 65 | "helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation, hook-failed 66 | data: 67 | pre-delete.sh: | 68 | #!/bin/sh 69 | exit_code=0 70 | echo "Checking for ZookeeperCluster Resource" 71 | 72 | ret=$(kubectl get zookeepercluster --all-namespaces --no-headers 2>&1) 73 | if (echo $ret | grep -e "No resources found" -e "the server doesn't have a resource type \"zookeepercluster\"" > /dev/null); 74 | then 75 | echo "None" 76 | else 77 | echo "$ret" 78 | exit_code=1 79 | fi 80 | 81 | if [ $exit_code -ne 0 ]; 82 | then 83 | echo "Pre-delete Check Failed" 84 | exit $exit_code 85 | fi 86 | echo "Pre-delete Check OK" 87 | --- 88 | 89 | apiVersion: batch/v1 90 | kind: Job 91 | metadata: 92 | name: {{ template "zookeeper-operator.fullname" . }}-pre-delete 93 | namespace: {{ .Release.Namespace }} 94 | annotations: 95 | "helm.sh/hook": pre-delete 96 | "helm.sh/hook-weight": "2" 97 | "helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation, hook-failed 98 | spec: 99 | backoffLimit: 6 100 | template: 101 | metadata: 102 | name: {{ template "zookeeper-operator.fullname" . }}-pre-delete 103 | spec: 104 | serviceAccountName: {{ template "zookeeper-operator.fullname" . }}-pre-delete 105 | restartPolicy: Never 106 | containers: 107 | - name: pre-delete-job 108 | image: "{{ .Values.hooks.image.repository }}:{{ .Values.hooks.image.tag }}" 109 | {{- if .Values.hooks.securityContext }} 110 | securityContext: 111 | {{ toYaml .Values.hooks.securityContext | indent 12 }} 112 | {{- end }} 113 | command: 114 | - /scripts/pre-delete.sh 115 | volumeMounts: 116 | - name: sh 117 | mountPath: /scripts 118 | readOnly: true 119 | volumes: 120 | - name: sh 121 | configMap: 122 | name: {{ template "zookeeper-operator.fullname" . }}-pre-delete 123 | defaultMode: 0555 124 | {{- if .Values.nodeSelector }} 125 | nodeSelector: 126 | {{ toYaml .Values.nodeSelector | indent 8 }} 127 | {{- end }} 128 | {{- if .Values.affinity }} 129 | affinity: 130 | {{ toYaml .Values.affinity | indent 8 }} 131 | {{- end }} 132 | {{- if .Values.tolerations }} 133 | tolerations: 134 | {{ toYaml .Values.tolerations | indent 8 }} 135 | {{- end }} 136 | {{- end }} 137 | -------------------------------------------------------------------------------- /charts/zookeeper-operator/templates/role.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.rbac.create }} 2 | kind: Role 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: {{ template "zookeeper-operator.fullname" . }} 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | {{ include "zookeeper-operator.commonLabels" . | indent 4 }} 9 | rules: 10 | - apiGroups: 11 | - zookeeper.pravega.io 12 | resources: 13 | - "*" 14 | verbs: 15 | - "*" 16 | - apiGroups: 17 | - "" 18 | resources: 19 | - pods 20 | - services 21 | - endpoints 22 | - persistentvolumeclaims 23 | - events 24 | - configmaps 25 | - secrets 26 | verbs: 27 | - "*" 28 | - apiGroups: 29 | - apps 30 | resources: 31 | - deployments 32 | - daemonsets 33 | - replicasets 34 | - statefulsets 35 | verbs: 36 | - "*" 37 | - apiGroups: 38 | - policy 39 | resources: 40 | - poddisruptionbudgets 41 | verbs: 42 | - "*" 43 | {{- end }} 44 | -------------------------------------------------------------------------------- /charts/zookeeper-operator/templates/rolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.rbac.create }} 2 | kind: RoleBinding 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: {{ template "zookeeper-operator.fullname" . }} 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | {{ include "zookeeper-operator.commonLabels" . | indent 4 }} 9 | subjects: 10 | - kind: ServiceAccount 11 | name: {{ .Values.serviceAccount.name }} 12 | roleRef: 13 | kind: Role 14 | name: {{ template "zookeeper-operator.fullname" . }} 15 | apiGroup: rbac.authorization.k8s.io 16 | {{- end }} 17 | -------------------------------------------------------------------------------- /charts/zookeeper-operator/templates/service_account.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.serviceAccount.create }} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | {{- if or .Values.global.imagePullSecrets .Values.serviceAccount.imagePullSecrets }} 5 | imagePullSecrets: 6 | {{- range (default .Values.global.imagePullSecrets .Values.serviceAccount.imagePullSecrets) }} 7 | - name: {{ . }} 8 | {{- end }} 9 | {{- end }} 10 | metadata: 11 | name: {{ .Values.serviceAccount.name }} 12 | namespace: {{ .Release.Namespace }} 13 | labels: 14 | {{ include "zookeeper-operator.commonLabels" . | indent 4 }} 15 | {{- end }} 16 | -------------------------------------------------------------------------------- /charts/zookeeper-operator/values.yaml: -------------------------------------------------------------------------------- 1 | ## Default values for zookeeper-operator. 2 | ## This is a YAML-formatted file. 3 | ## Declare variables to be passed into your templates. 4 | 5 | global: 6 | # Lists the secrets you need to use to pull zookeeper-operator image from a private registry. 7 | imagePullSecrets: [] 8 | # - private-registry-key 9 | 10 | image: 11 | repository: pravega/zookeeper-operator 12 | tag: 0.2.15 13 | pullPolicy: IfNotPresent 14 | 15 | securityContext: {} 16 | # runAsUser: 1001 17 | # runAsGroup: 1001 18 | 19 | ## Additional labels to be added to resources 20 | labels: {} 21 | 22 | ## Install RBAC roles and bindings. 23 | rbac: 24 | create: true 25 | 26 | ## Service account name and whether to create it. 27 | serviceAccount: 28 | create: true 29 | name: zookeeper-operator 30 | ## Optionally specify an array of imagePullSecrets. Will override the global parameter if set 31 | # imagePullSecrets: 32 | 33 | ## Whether to create the CRD. 34 | crd: 35 | create: true 36 | 37 | ## Specifies which namespace(s) the Operator should watch over. 38 | ## Default: An empty string means all namespaces. 39 | ## Multiple namespaces can be configured using a comma separated list of namespaces 40 | watchNamespace: "" 41 | 42 | ## Operator pod resources 43 | resources: {} 44 | # limits: 45 | # cpu: 2 46 | # memory: 256Mi 47 | # requests: 48 | # cpu: 1 49 | # memory: 128Mi 50 | 51 | # Scheduling constraints 52 | nodeSelector: {} 53 | affinity: {} 54 | tolerations: [] 55 | 56 | # Pod annotations 57 | annotations: {} 58 | 59 | hooks: 60 | ## Optionally specify an array of imagePullSecrets. Will override the global parameter if set 61 | serviceAccount: 62 | imagePullSecrets: [] 63 | 64 | backoffLimit: 10 65 | image: 66 | repository: lachlanevenson/k8s-kubectl 67 | tag: v1.23.2 68 | ## Whether to create pre-delete hook which ensures that 69 | ## the operator cannot be deleted till the zookeeper cluster 70 | ## custom resources have been cleaned up 71 | delete: true 72 | securityContext: {} 73 | # runAsUser: 1001 74 | # runAsGroup: 1001 75 | 76 | 77 | ## Additional Sidecars Configuration. 78 | additionalSidecars: {} 79 | # - name: nginx 80 | # image: nginx:latest 81 | 82 | ## Additional Environment Variables. 83 | additionalEnv: {} 84 | 85 | ## Additional volumes required for sidecars. 86 | additionalVolumes: {} 87 | # - name: volume1 88 | # emptyDir: {} 89 | # - name: volume2 90 | # emptyDir: {} 91 | 92 | disableFinalizer: false 93 | 94 | ## In order to enable gathering metrics by Prometheus etc... bind to 0.0.0.0 95 | metricsBindAddress: 127.0.0.1 96 | metricsPort: "6000" 97 | -------------------------------------------------------------------------------- /charts/zookeeper/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: zookeeper 3 | description: Zookeeper Helm chart for Kubernetes 4 | version: 0.2.15 5 | appVersion: 0.2.15 6 | keywords: 7 | - zookeeper 8 | - storage 9 | home: https://github.com/apache/zookeeper 10 | icon: https://zookeeper.apache.org/images/zookeeper_small.gif 11 | -------------------------------------------------------------------------------- /charts/zookeeper/README.md: -------------------------------------------------------------------------------- 1 | # Zookeeper Helm Chart 2 | 3 | Installs Zookeeper clusters atop Kubernetes. 4 | 5 | ## Introduction 6 | 7 | This chart creates a Zookeeper cluster in [Kubernetes](http://kubernetes.io) using the [Helm](https://helm.sh) package manager. The chart can be installed multiple times to create Zookeeper cluster in multiple namespaces. 8 | 9 | ## Prerequisites 10 | 11 | - Kubernetes 1.15+ with Beta APIs 12 | - Helm 3.2.1+ 13 | - Zookeeper Operator. You can install it using its own [Helm chart](https://github.com/pravega/zookeeper-operator/tree/master/charts/zookeeper-operator) 14 | 15 | ## Installing the Chart 16 | 17 | To install the zookeeper chart, use the following commands: 18 | 19 | ``` 20 | $ helm repo add pravega https://charts.pravega.io 21 | $ helm repo update 22 | $ helm install [RELEASE_NAME] pravega/zookeeper --version=[VERSION] 23 | ``` 24 | where: 25 | - **[RELEASE_NAME]** is the release name for the zookeeper chart. 26 | - **[CLUSTER_NAME]** is the name of the zookeeper cluster so created. (If [RELEASE_NAME] contains the string `zookeeper`, `[CLUSTER_NAME] = [RELEASE_NAME]`, else `[CLUSTER_NAME] = [RELEASE_NAME]-zookeeper`. The [CLUSTER_NAME] can however be overridden by providing `--set fullnameOverride=[CLUSTER_NAME]` along with the helm install command) 27 | - **[VERSION]** can be any stable release version for zookeeper from 0.2.8 onwards. 28 | 29 | This command deploys zookeeper on the Kubernetes cluster in its default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. 30 | 31 | ## Upgrading the Chart 32 | 33 | To upgrade the zookeeper chart from version **[OLD_VERSION]** to version **[NEW_VERSION]**, use the following command: 34 | 35 | ``` 36 | $ helm upgrade [RELEASE_NAME] pravega/zookeeper --version=[NEW_VERSION] --set image.tag=[NEW_VERSION] --reuse-values --timeout 600s 37 | ``` 38 | **Note:** By specifying the `--reuse-values` option, the configuration of all parameters are retained across upgrades. However if some values need to be modified during the upgrade, the `--set` flag can be used to specify the new configuration for these parameters. Also, by skipping the `reuse-values` flag, the values of all parameters are reset to the default configuration that has been specified in the published charts for version [NEW_VERSION]. 39 | 40 | ## Uninstalling the Chart 41 | 42 | To uninstall/delete the zookeeper chart, use the following command: 43 | 44 | ``` 45 | $ helm uninstall [RELEASE_NAME] 46 | ``` 47 | 48 | This command removes all the Kubernetes components associated with the chart and deletes the release. 49 | 50 | ## Configuration 51 | 52 | The following table lists the configurable parameters of the zookeeper chart and their default values. 53 | 54 | | Parameter | Description | Default | 55 | | ----- | ----------- | ------ | 56 | | `replicas` | Expected size of the zookeeper cluster (valid range is from 1 to 7) | `3` | 57 | | `maxUnavailableReplicas` | Max unavailable replicas in pdb | `1` | 58 | | `triggerRollingRestart` | If true, the zookeeper cluster is restarted. After the restart is triggered, this value is auto-reverted to false. | `false` | 59 | | `image.repository` | Image repository | `pravega/zookeeper` | 60 | | `image.tag` | Image tag | `0.2.15` | 61 | | `image.pullPolicy` | Image pull policy | `IfNotPresent` | 62 | | `domainName` | External host name appended for dns annotation | | 63 | | `kubernetesClusterDomain` | Domain of the kubernetes cluster | `cluster.local` | 64 | | `probes.readiness.initialDelaySeconds` | Number of seconds after the container has started before readiness probe is initiated | `10` | 65 | | `probes.readiness.periodSeconds` | Number of seconds in which readiness probe will be performed | `10` | 66 | | `probes.readiness.failureThreshold` | Number of seconds after which the readiness probe times out | `3` | 67 | | `probes.readiness.successThreshold` | Minimum number of consecutive successes for the readiness probe to be considered successful after having failed | `1` | 68 | | `probes.readiness.timeoutSeconds` | Number of times Kubernetes will retry after a readiness probe failure before restarting the container | `10` | 69 | | `probes.liveness.initialDelaySeconds` | Number of seconds after the container has started before liveness probe is initiated | `10` | 70 | | `probes.liveness.periodSeconds` | Number of seconds in which liveness probe will be performed | `10` | 71 | | `probes.liveness.failureThreshold` | Number of seconds after which the liveness probe times out | `3` | 72 | | `probes.liveness.timeoutSeconds` | Number of times Kubernetes will retry after a liveness probe failure before restarting the container | `10` | 73 | | `labels` | Specifies the labels to be attached | `{}` | 74 | | `ports` | Groups the ports for a zookeeper cluster node for easy access | `[]` | 75 | | `pod` | Defines the policy to create new pods for the zookeeper cluster | `{}` | 76 | | `pod.labels` | Labels to attach to the pods | `{}` | 77 | | `pod.nodeSelector` | Map of key-value pairs to be present as labels in the node in which the pod should run | `{}` | 78 | | `pod.affinity` | Specifies scheduling constraints on pods | `{}` | 79 | | `pod.resources` | Specifies resource requirements for the container | `{}` | 80 | | `pod.tolerations` | Specifies the pod's tolerations | `[]` | 81 | | `pod.env` | List of environment variables to set in the container | `[]` | 82 | | `pod.annotations` | Specifies the annotations to attach to pods | `{}` | 83 | | `pod.securityContext` | Specifies the security context for the entire pod | `{}` | 84 | | `pod.terminationGracePeriodSeconds` | Amount of time given to the pod to shutdown normally | `30` | 85 | | `pod.serviceAccountName` | Name for the service account | `zookeeper` | 86 | | `pod.imagePullSecrets` | ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images. | `[]` | 87 | | `clientService` | Defines the policy to create client Service for the zookeeper cluster. | {} | 88 | | `clientService.annotations` | Specifies the annotations to attach to client Service the operator creates. | {} | 89 | | `headlessService` | Defines the policy to create headless Service for the zookeeper cluster. | {} | 90 | | `headlessService.annotations` | Specifies the annotations to attach to headless Service the operator creates. | {} | 91 | | `adminServerService` | Defines the policy to create AdminServer Service for the zookeeper cluster. | {} | 92 | | `adminServerService.annotations` | Specifies the annotations to attach to AdminServer Service the operator creates. | {} | 93 | | `adminServerService.external` | Specifies if LoadBalancer should be created for the AdminServer. True means LoadBalancer will be created, false - only ClusterIP will be used. | false | 94 | | `config.initLimit` | Amount of time (in ticks) to allow followers to connect and sync to a leader | `10` | 95 | | `config.tickTime` | Length of a single tick which is the basic time unit used by Zookeeper (measured in milliseconds) | `2000` | 96 | | `config.syncLimit` | Amount of time (in ticks) to allow followers to sync with Zookeeper | `2` | 97 | | `config.globalOutstandingLimit` | Max limit for outstanding requests | `1000` | 98 | | `config.preAllocSize` | PreAllocSize in kilobytes | `65536` | 99 | | `config.snapCount` | The number of transactions recorded in the transaction log before a snapshot can be taken | `100000` | 100 | | `config.commitLogCount` | The number of committed requests in memory | `500` 101 | | `config.snapSizeLimitInKb` | SnapSizeLimitInKb | `4194304` | 102 | | `config.maxCnxns` | The total number of concurrent connections that can be made to a zookeeper server | `0` | 103 | | `config.maxClientCnxns` | The number of concurrent connections that a single client | `60` | 104 | | `config.minSessionTimeout` | The minimum session timeout in milliseconds that the server will allow the client to negotiate | `4000` | 105 | | `config.maxSessionTimeout` | The maximum session timeout in milliseconds that the server will allow the client to negotiate | `40000` | 106 | | `config.autoPurgeSnapRetainCount` | The number of snapshots to be retained | `3` 107 | | `config.autoPurgePurgeInterval` | The time interval in hours for which the purge task has to be triggered | `1` 108 | | `config.quorumListenOnAllIPs` | Whether Zookeeper server will listen for connections from its peers on all available IP addresses | `false` | 109 | | `config.additionalConfig` | Additional zookeeper coniguration parameters that should be defined in generated zoo.cfg file | `{}` | 110 | | `storageType` | Type of storage that can be used it can take either ephemeral or persistence as value | `persistence` | 111 | | `persistence.reclaimPolicy` | Reclaim policy for persistent volumes | `Delete` | 112 | | `persistence.annotations` | Specifies the annotations to attach to pvcs | `{}` |` 113 | | `persistence.storageClassName` | Storage class for persistent volumes | `` | 114 | | `persistence.volumeSize` | Size of the volume requested for persistent volumes | `20Gi` | 115 | | `ephemeral.emptydirvolumesource.medium` | What type of storage medium should back the directory. | `""` | 116 | | `ephemeral.emptydirvolumesource.sizeLimit` | Total amount of local storage required for the EmptyDir volume. | `20Gi` | 117 | | `containers` | Application containers run with the zookeeper pod | `[]` | 118 | | `initContainers` | Init Containers to add to the zookeeper pods | `[]` | 119 | | `volumes` | Named volumes that may be accessed by any container in the pod | `[]` | 120 | | `volumeMounts` | Customized volumeMounts for zookeeper container that can be configured to mount volumes to zookeeper container | `[]` | 121 | -------------------------------------------------------------------------------- /charts/zookeeper/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "zookeeper.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | If release name contains chart name it will be used as a full name. 13 | */}} 14 | {{- define "zookeeper.fullname" -}} 15 | {{- if .Values.fullnameOverride -}} 16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} 17 | {{- else -}} 18 | {{- $name := default .Chart.Name .Values.nameOverride -}} 19 | {{- if contains $name .Release.Name -}} 20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}} 21 | {{- else -}} 22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 23 | {{- end -}} 24 | {{- end -}} 25 | {{- end -}} 26 | 27 | {{/* 28 | Common labels 29 | */}} 30 | {{- define "zookeeper.commonLabels" -}} 31 | app.kubernetes.io/name: {{ include "zookeeper.name" . }} 32 | {{- if .Chart.AppVersion }} 33 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} 34 | {{- end }} 35 | app.kubernetes.io/managed-by: {{ .Release.Service }} 36 | helm.sh/chart: "{{ .Chart.Name }}-{{ .Chart.Version | replace "+" "_" }}" 37 | {{- end -}} 38 | -------------------------------------------------------------------------------- /charts/zookeeper/templates/post-install-upgrade-hooks.yaml: -------------------------------------------------------------------------------- 1 | kind: Role 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: {{ template "zookeeper.fullname" . }}-post-install-upgrade 5 | namespace: {{ .Release.Namespace }} 6 | annotations: 7 | "helm.sh/hook": post-install, post-upgrade 8 | "helm.sh/hook-weight": "1" 9 | "helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation, hook-failed 10 | rules: 11 | - apiGroups: 12 | - zookeeper.pravega.io 13 | resources: 14 | - "*" 15 | verbs: 16 | - get 17 | --- 18 | 19 | kind: RoleBinding 20 | apiVersion: rbac.authorization.k8s.io/v1 21 | metadata: 22 | name: {{ template "zookeeper.fullname" . }}-post-install-upgrade 23 | namespace: {{ .Release.Namespace }} 24 | annotations: 25 | "helm.sh/hook": post-install, post-upgrade 26 | "helm.sh/hook-weight": "1" 27 | "helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation, hook-failed 28 | subjects: 29 | - kind: ServiceAccount 30 | name: {{ template "zookeeper.fullname" . }}-post-install-upgrade 31 | namespace: {{.Release.Namespace}} 32 | roleRef: 33 | kind: Role 34 | name: {{ template "zookeeper.fullname" . }}-post-install-upgrade 35 | apiGroup: rbac.authorization.k8s.io 36 | 37 | --- 38 | 39 | apiVersion: v1 40 | kind: ServiceAccount 41 | metadata: 42 | name: {{ template "zookeeper.fullname" . }}-post-install-upgrade 43 | namespace: {{ .Release.Namespace }} 44 | annotations: 45 | "helm.sh/hook": post-install, post-upgrade 46 | "helm.sh/hook-weight": "1" 47 | "helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation, hook-failed 48 | 49 | --- 50 | 51 | apiVersion: v1 52 | kind: ConfigMap 53 | metadata: 54 | name: {{ template "zookeeper.fullname" . }}-post-install-upgrade 55 | namespace: {{ .Release.Namespace }} 56 | annotations: 57 | "helm.sh/hook": post-install, post-upgrade 58 | "helm.sh/hook-weight": "1" 59 | "helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation, hook-failed 60 | data: 61 | validations.sh: | 62 | #!/bin/sh 63 | set +e 64 | sleep 30 65 | echo "Checking for ready ZK replicas" 66 | kubectl get ZookeeperCluster -n {{ .Release.Namespace }} {{ template "zookeeper.fullname" . }} -o jsonpath='{.status.conditions[?(@.type=="PodsReady")].status}' | grep True 67 | if [ $? != 0 ]; then 68 | echo "ZK replicas not ready" 69 | exit 1 70 | fi 71 | set -e 72 | replicasZk=`kubectl get ZookeeperCluster -n {{ .Release.Namespace }} {{ template "zookeeper.fullname" . }} -o jsonpath='{.spec.replicas}'` 73 | readyReplicasZk=`kubectl get ZookeeperCluster -n {{ .Release.Namespace }} {{ template "zookeeper.fullname" . }} -o jsonpath='{.status.readyReplicas}'` 74 | currentVersionZk=`kubectl get ZookeeperCluster -n {{ .Release.Namespace }} {{ template "zookeeper.fullname" . }} -o jsonpath='{.status.currentVersion}'` 75 | targetVersionZk=`kubectl get ZookeeperCluster -n {{ .Release.Namespace }} {{ template "zookeeper.fullname" . }} -o jsonpath='{.spec.image.tag}'` 76 | 77 | echo 78 | echo "ZookeeperCluster replicas: $replicasZk" 79 | echo "ZookeeperCluster readyReplicas: $readyReplicasZk" 80 | echo "ZookeeperCluster currentVersion: $currentVersionZk" 81 | echo "ZookeeperCluster targetVersion: $targetVersionZk" 82 | echo 83 | if [[ -z $readyReplicasZk ]]; then 84 | echo "No ready ZK replicas" 85 | exit 2 86 | fi 87 | if [[ $replicasZk != $readyReplicasZk ]]; then 88 | echo "Wrong number of ZK replicas" 89 | exit 3 90 | fi 91 | if [[ -z $currentVersionZk ]]; then 92 | echo "Current ZK version not set" 93 | exit 4 94 | fi 95 | if [[ $currentVersionZk != $targetVersionZk ]]; then 96 | echo "Wrong current ZK version" 97 | exit 5 98 | fi 99 | 100 | echo "ZookeeperCluster is ready" 101 | --- 102 | 103 | apiVersion: batch/v1 104 | kind: Job 105 | metadata: 106 | name: {{ template "zookeeper.fullname" . }}-post-install-upgrade 107 | namespace: {{ .Release.Namespace }} 108 | annotations: 109 | "helm.sh/hook": post-install, post-upgrade 110 | "helm.sh/hook-weight": "2" 111 | "helm.sh/hook-delete-policy": hook-succeeded, before-hook-creation, hook-failed 112 | spec: 113 | backoffLimit: {{ .Values.hooks.backoffLimit }} 114 | template: 115 | metadata: 116 | name: {{ template "zookeeper.fullname" . }}-post-install-upgrade 117 | {{- if .Values.hooks.pod.annotations }} 118 | annotations: 119 | {{ toYaml .Values.hooks.pod.annotations | indent 8 }} 120 | {{- end }} 121 | spec: 122 | serviceAccountName: {{ template "zookeeper.fullname" . }}-post-install-upgrade 123 | restartPolicy: Never 124 | containers: 125 | - name: post-install-upgrade-job 126 | image: "{{ .Values.hooks.image.repository }}:{{ .Values.hooks.image.tag }}" 127 | {{- if .Values.hooks.securityContext }} 128 | securityContext: 129 | {{ toYaml .Values.hooks.securityContext | indent 10 }} 130 | {{- end }} 131 | command: 132 | - /scripts/validations.sh 133 | volumeMounts: 134 | - name: sh 135 | mountPath: /scripts 136 | readOnly: true 137 | volumes: 138 | - name: sh 139 | configMap: 140 | name: {{ template "zookeeper.fullname" . }}-post-install-upgrade 141 | defaultMode: 0555 142 | -------------------------------------------------------------------------------- /charts/zookeeper/templates/zookeeper.yaml: -------------------------------------------------------------------------------- 1 | {{- $storageType := .Values.storageType | default "persistence" -}} 2 | apiVersion: "zookeeper.pravega.io/v1beta1" 3 | kind: "ZookeeperCluster" 4 | metadata: 5 | name: {{ template "zookeeper.fullname" . }} 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | {{ include "zookeeper.commonLabels" . | indent 4 }} 9 | spec: 10 | replicas: {{ .Values.replicas }} 11 | {{- if .Values.maxUnavailableReplicas }} 12 | maxUnavailableReplicas: {{ .Values.maxUnavailableReplicas }} 13 | {{- end }} 14 | image: 15 | repository: {{ .Values.image.repository }} 16 | tag: {{ .Values.image.tag }} 17 | pullPolicy: {{ .Values.image.pullPolicy }} 18 | {{- if .Values.domainName }} 19 | domainName: {{ .Values.domainName }} 20 | {{- end }} 21 | kubernetesClusterDomain: {{ default "cluster.local" .Values.kubernetesClusterDomain }} 22 | {{- if .Values.probes }} 23 | probes: 24 | {{- if .Values.probes.readiness }} 25 | readinessProbe: 26 | initialDelaySeconds: {{ .Values.probes.readiness.initialDelaySeconds | default 10 }} 27 | periodSeconds: {{ .Values.probes.readiness.periodSeconds | default 10 }} 28 | failureThreshold: {{ .Values.probes.readiness.failureThreshold | default 3}} 29 | successThreshold: {{ .Values.probes.readiness.successThreshold | default 1 }} 30 | timeoutSeconds: {{ .Values.probes.readiness.timeoutSeconds | default 10}} 31 | {{- end }} 32 | {{- if .Values.probes.liveness }} 33 | livenessProbe: 34 | initialDelaySeconds: {{ .Values.probes.liveness.initialDelaySeconds | default 10 }} 35 | periodSeconds: {{ .Values.probes.liveness.periodSeconds | default 10 }} 36 | failureThreshold: {{ .Values.probes.liveness.failureThreshold | default 3 }} 37 | timeoutSeconds: {{ .Values.probes.liveness.timeoutSeconds | default 10 }} 38 | {{- end }} 39 | {{- end }} 40 | {{- if .Values.containers }} 41 | containers: 42 | {{ toYaml .Values.containers | indent 4 }} 43 | {{- end }} 44 | {{- if .Values.volumes }} 45 | volumes: 46 | {{ toYaml .Values.volumes | indent 4 }} 47 | {{- end }} 48 | {{- if .Values.volumeMounts }} 49 | volumeMounts: 50 | {{ toYaml .Values.volumeMounts | indent 4 }} 51 | {{- end }} 52 | {{- if .Values.initContainers }} 53 | initContainers: 54 | {{ toYaml .Values.initContainers| indent 4 }} 55 | {{- end }} 56 | {{- if .Values.labels }} 57 | labels: 58 | {{ toYaml .Values.labels | indent 4 }} 59 | {{- end }} 60 | {{- if .Values.ports }} 61 | ports: 62 | {{ toYaml .Values.ports | indent 4 }} 63 | {{- end }} 64 | {{- if .Values.triggerRollingRestart }} 65 | triggerRollingRestart: {{ .Values.triggerRollingRestart }} 66 | {{- end }} 67 | pod: 68 | {{- if .Values.pod.labels }} 69 | labels: 70 | {{ toYaml .Values.pod.labels | indent 6 }} 71 | {{- end }} 72 | {{- if .Values.pod.nodeSelector }} 73 | nodeSelector: 74 | {{ toYaml .Values.pod.nodeSelector | indent 6 }} 75 | {{- end }} 76 | {{- if .Values.pod.affinity }} 77 | affinity: 78 | {{ toYaml .Values.pod.affinity | indent 6 }} 79 | {{- end }} 80 | {{- if .Values.pod.topologySpreadConstraints }} 81 | topologySpreadConstraints: 82 | {{ toYaml .Values.pod.topologySpreadConstraints | indent 6 }} 83 | {{- end }} 84 | {{- if .Values.pod.resources }} 85 | resources: 86 | {{ toYaml .Values.pod.resources | indent 6 }} 87 | {{- end }} 88 | {{- if .Values.pod.tolerations }} 89 | tolerations: 90 | {{ toYaml .Values.pod.tolerations | indent 6 }} 91 | {{- end }} 92 | {{- if .Values.pod.env }} 93 | env: 94 | {{ toYaml .Values.pod.env | indent 6 }} 95 | {{- end }} 96 | {{- if .Values.pod.annotations }} 97 | annotations: 98 | {{ toYaml .Values.pod.annotations | indent 6 }} 99 | {{- end }} 100 | {{- if .Values.pod.securityContext }} 101 | securityContext: 102 | {{ toYaml .Values.pod.securityContext | indent 6 }} 103 | {{- end }} 104 | {{- if .Values.pod.terminationGracePeriodSeconds }} 105 | terminationGracePeriodSeconds: {{ .Values.pod.terminationGracePeriodSeconds }} 106 | {{- end }} 107 | serviceAccountName: {{ default "zookeeper" .Values.pod.serviceAccountName }} 108 | {{- if .Values.pod.imagePullSecrets }} 109 | imagePullSecrets: 110 | {{ toYaml .Values.pod.imagePullSecrets | indent 6 }} 111 | {{- end }} 112 | {{- if .Values.clientService }} 113 | clientService: 114 | {{- if .Values.clientService.annotations }} 115 | annotations: 116 | {{ toYaml .Values.clientService.annotations | indent 6 }} 117 | {{- end }} 118 | {{- end }} 119 | {{- if .Values.headlessService }} 120 | headlessService: 121 | {{- if .Values.headlessService.annotations }} 122 | annotations: 123 | {{ toYaml .Values.headlessService.annotations | indent 6 }} 124 | {{- end }} 125 | {{- end }} 126 | {{- if .Values.adminServerService }} 127 | adminServerService: 128 | {{- if .Values.adminServerService.annotations }} 129 | annotations: 130 | {{ toYaml .Values.adminServerService.annotations | indent 6 }} 131 | {{- end }} 132 | {{- if .Values.adminServerService.external }} 133 | external: {{ .Values.adminServerService.external }} 134 | {{- end }} 135 | {{- end }} 136 | {{- if .Values.config }} 137 | config: 138 | {{- toYaml .Values.config | nindent 4 }} 139 | {{- end }} 140 | storageType: {{ $storageType }} 141 | {{- if eq $storageType "ephemeral" }} 142 | ephemeral: 143 | {{- if .Values.ephemeral.emptydirvolumesource }} 144 | emptydirvolumesource: 145 | {{- if .Values.ephemeral.emptydirvolumesource.medium }} 146 | medium: {{ .Values.ephemeral.emptydirvolumesource.medium }} 147 | {{- end }} 148 | {{- if .Values.ephemeral.emptydirvolumesource.sizeLimit }} 149 | sizeLimit: {{ .Values.ephemeral.emptydirvolumesource.sizeLimit }} 150 | {{- end }} 151 | {{- end }} 152 | {{- else }} 153 | persistence: 154 | reclaimPolicy: {{ .Values.persistence.reclaimPolicy }} 155 | {{- if .Values.persistence.annotations }} 156 | annotations: 157 | {{ toYaml .Values.persistence.annotations | indent 6 }} 158 | {{- end }} 159 | {{- if or .Values.persistence.storageClassName .Values.persistence.volumeSize }} 160 | spec: 161 | {{- if .Values.persistence.storageClassName }} 162 | storageClassName: {{ .Values.persistence.storageClassName }} 163 | {{- end }} 164 | {{- if .Values.persistence.volumeSize }} 165 | resources: 166 | requests: 167 | storage: {{ .Values.persistence.volumeSize }} 168 | {{- end }} 169 | {{- end }} 170 | {{- end }} 171 | -------------------------------------------------------------------------------- /charts/zookeeper/values.yaml: -------------------------------------------------------------------------------- 1 | replicas: 3 2 | maxUnavailableReplicas: 3 | 4 | image: 5 | repository: pravega/zookeeper 6 | tag: 0.2.15 7 | pullPolicy: IfNotPresent 8 | 9 | triggerRollingRestart: false 10 | 11 | domainName: 12 | labels: {} 13 | ports: [] 14 | # - containerPort: 2181 15 | # name: client 16 | # - containerPort: 2888 17 | # name: quorum 18 | # - containerPort: 8080 19 | # name: admin-server 20 | # - containerPort: 7000 21 | # name: metrics 22 | kubernetesClusterDomain: "cluster.local" 23 | probes: 24 | readiness: 25 | initialDelaySeconds: 10 26 | periodSeconds: 10 27 | failureThreshold: 3 28 | successThreshold: 1 29 | timeoutSeconds: 10 30 | liveness: 31 | initialDelaySeconds: 10 32 | periodSeconds: 10 33 | failureThreshold: 3 34 | timeoutSeconds: 10 35 | pod: 36 | # labels: {} 37 | # nodeSelector: {} 38 | # affinity: {} 39 | # topologySpreadConstraints: {} 40 | # resources: {} 41 | # tolerations: [] 42 | # env: [] 43 | # annotations: {} 44 | # securityContext: {} 45 | # terminationGracePeriodSeconds: 30 46 | serviceAccountName: zookeeper 47 | # imagePullSecrets: [] 48 | 49 | adminServerService: {} 50 | # annotations: {} 51 | # external: false 52 | 53 | clientService: {} 54 | # annotations: {} 55 | 56 | headlessService: {} 57 | # annotations: {} 58 | 59 | config: 60 | # initLimit: 10 61 | # tickTime: 2000 62 | # syncLimit: 2 63 | # globalOutstandingLimit: 1000 64 | preAllocSize: 16384 # default 65536 65 | # snapCount: 100000 66 | # commitLogCount: 500 67 | # snapSizeLimitInKb: 4194304 68 | # maxCnxns: 0 69 | # maxClientCnxns: 60 70 | # minSessionTimeout: 4000 71 | # maxSessionTimeout: 40000 72 | # autoPurgeSnapRetainCount: 3 73 | # autoPurgePurgeInterval: 1 74 | # quorumListenOnAllIPs: false 75 | # additionalConfig: {} 76 | 77 | ## configure the storage type 78 | ## accepted values : persistence/ephemeral 79 | ## default option is persistence 80 | storageType: persistence 81 | 82 | persistence: 83 | storageClassName: 84 | ## specifying reclaim policy for PersistentVolumes 85 | ## accepted values - Delete / Retain 86 | reclaimPolicy: Delete 87 | annotations: {} 88 | volumeSize: 20Gi 89 | 90 | ephemeral: 91 | emptydirvolumesource: 92 | ## specifying Medium for emptydirvolumesource 93 | ## accepted values - ""/Memory 94 | medium: "" 95 | sizeLimit: 20Gi 96 | 97 | hooks: 98 | image: 99 | repository: lachlanevenson/k8s-kubectl 100 | tag: v1.23.2 101 | securityContext: {} 102 | # runAsUser: 1001 103 | # runAsGroup: 1001 104 | backoffLimit: 10 105 | pod: 106 | annotations: {} 107 | 108 | containers: [] 109 | volumes: [] 110 | volumeMounts: [] 111 | initContainers: [] 112 | -------------------------------------------------------------------------------- /charts/zookeeper/values/minikube.yaml: -------------------------------------------------------------------------------- 1 | replicas: 1 2 | 3 | persistence: 4 | storageClassName: standard 5 | ## specifying reclaim policy for PersistentVolumes 6 | ## accepted values - Delete / Retain 7 | reclaimPolicy: Delete 8 | volumeSize: 10Gi 9 | -------------------------------------------------------------------------------- /cmd/exporter/main.go: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | */ 10 | 11 | package main 12 | 13 | import ( 14 | "flag" 15 | "fmt" 16 | "math/rand" 17 | "os" 18 | "runtime" 19 | "strconv" 20 | "time" 21 | 22 | "k8s.io/apimachinery/pkg/types" 23 | 24 | zookeepercluster "github.com/pravega/zookeeper-operator/controllers" 25 | "github.com/pravega/zookeeper-operator/pkg/version" 26 | "github.com/pravega/zookeeper-operator/pkg/yamlexporter" 27 | _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" 28 | _ "k8s.io/client-go/plugin/pkg/client/auth/oidc" 29 | logf "sigs.k8s.io/controller-runtime/pkg/log" 30 | ) 31 | 32 | var ( 33 | log = logf.Log.WithName("cmd") 34 | versionFlag bool 35 | ) 36 | 37 | func init() { 38 | flag.BoolVar(&versionFlag, "version", false, "Show version and quit") 39 | } 40 | 41 | func printVersion() { 42 | log.Info(fmt.Sprintf("zookeeper-operator Version: %v", version.Version)) 43 | log.Info(fmt.Sprintf("Git SHA: %s", version.GitSHA)) 44 | log.Info(fmt.Sprintf("Go Version: %s", runtime.Version())) 45 | log.Info(fmt.Sprintf("Go OS/Arch: %s/%s", runtime.GOOS, runtime.GOARCH)) 46 | } 47 | 48 | func main() { 49 | flags := flag.NewFlagSet("myFlagSet", flag.ExitOnError) 50 | ifilePtr := flags.String("i", "./ZookeeperCluster.yaml", "Input YAML file") 51 | odirPtr := flags.String("o", ".", "YAML output directory") 52 | 53 | _ = flags.Parse(os.Args[1:]) 54 | 55 | log.Info(fmt.Sprintf("Input YAML file -i:%s", *ifilePtr)) 56 | log.Info(fmt.Sprintf("Output YAML Directory -o:%s", *odirPtr)) 57 | 58 | // Read input YAML file -- This is the ZookeeperCluster Resource YAML file 59 | log.Info(fmt.Sprintf("Reading YAML file from the file:%s", *ifilePtr)) 60 | z, err := yamlexporter.ReadInputClusterYAMLFile(*ifilePtr) 61 | if err != nil { 62 | log.Error(err, "read input YAML file failed") 63 | os.Exit(1) 64 | } 65 | 66 | // create base output directory and sub-directories named based on the deployment phase 67 | log.Info(fmt.Sprintf("create base output dir:%s and phase based subdirs", *odirPtr)) 68 | err = yamlexporter.CreateYAMLOutputDir(*odirPtr) 69 | if err != nil { 70 | log.Error(err, "create output dir failed") 71 | os.Exit(1) 72 | } 73 | 74 | // we need to provide our own UID for ECSCluster Resource, since the rest of the resources will reference UID of ECSCluster and it must be there. 75 | rand.Seed(time.Now().UnixNano()) 76 | uid := strconv.FormatUint(rand.Uint64(), 10) 77 | log.Info(fmt.Sprintf("UID of the ECSCluster Resource:%s\n", uid)) 78 | z.UID = types.UID(uid) 79 | 80 | yamlexporter.YAMLOutputDir = *odirPtr 81 | 82 | reconcilerZookeeper := zookeepercluster.YAMLExporterReconciler(z) 83 | 84 | // Generate YAML files 85 | err = reconcilerZookeeper.GenerateYAML(z) 86 | if err != nil { 87 | log.Error(err, "YAML file generation failed") 88 | os.Exit(1) 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /config/crd/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # This kustomization.yaml is not intended to be run by itself, 2 | # since it depends on service name and namespace that are out of this kustomize package. 3 | # It should be run by config/default 4 | resources: 5 | - bases/zookeeper.pravega.io_zookeeperclusters.yaml 6 | # +kubebuilder:scaffold:crdkustomizeresource 7 | 8 | patchesStrategicMerge: 9 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. 10 | # patches here are for enabling the conversion webhook for each CRD 11 | #- patches/webhook_in_zookeeperclusters.yaml 12 | # +kubebuilder:scaffold:crdkustomizewebhookpatch 13 | 14 | # [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix. 15 | # patches here are for enabling the CA injection for each CRD 16 | #- patches/cainjection_in_zookeeperclusters.yaml 17 | # +kubebuilder:scaffold:crdkustomizecainjectionpatch 18 | 19 | -------------------------------------------------------------------------------- /config/default/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # Adds namespace to all resources. 2 | namespace: default 3 | 4 | # Value of this field is prepended to the 5 | # names of all resources, e.g. a deployment named 6 | # "wordpress" becomes "alices-wordpress". 7 | # Note that it should also match with the prefix (text before '-') of the namespace 8 | 9 | # Labels to add to all resources and selectors. 10 | #commonLabels: 11 | # someName: someValue 12 | 13 | bases: 14 | - ../crd 15 | - ../rbac 16 | - ../manager 17 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in 18 | # crd/kustomization.yaml 19 | #- ../webhook 20 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. 21 | #- ../certmanager 22 | # [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. 23 | #- ../prometheus 24 | 25 | patchesStrategicMerge: 26 | # Protect the /metrics endpoint by putting it behind auth. 27 | # If you want your controller-manager to expose the /metrics 28 | # # endpoint w/o any authn/z, please comment the following line. 29 | #- manager_auth_proxy_patch.yaml 30 | 31 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in 32 | # crd/kustomization.yaml 33 | #- manager_webhook_patch.yaml 34 | 35 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 36 | # Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. 37 | # 'CERTMANAGER' needs to be enabled to use ca injection 38 | #- webhookcainjection_patch.yaml 39 | 40 | -------------------------------------------------------------------------------- /config/manager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - manager.yaml 3 | apiVersion: kustomize.config.k8s.io/v1beta1 4 | kind: Kustomization 5 | images: 6 | - name: pravega/zookeeper-operator 7 | newName: testzkop/zookeeper-operator-testimages 8 | newTag: check 9 | -------------------------------------------------------------------------------- /config/manager/manager.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: zookeeper-operator 5 | spec: 6 | replicas: 1 7 | selector: 8 | matchLabels: 9 | name: zookeeper-operator 10 | template: 11 | metadata: 12 | labels: 13 | name: zookeeper-operator 14 | spec: 15 | serviceAccountName: zookeeper-operator 16 | containers: 17 | - name: zookeeper-operator 18 | # Replace this with the built image name 19 | image: pravega/zookeeper-operator:0.2.15 20 | ports: 21 | - containerPort: 60000 22 | name: metrics 23 | command: 24 | - zookeeper-operator 25 | imagePullPolicy: Always 26 | env: 27 | - name: WATCH_NAMESPACE 28 | valueFrom: 29 | fieldRef: 30 | fieldPath: metadata.namespace 31 | - name: POD_NAME 32 | valueFrom: 33 | fieldRef: 34 | fieldPath: metadata.name 35 | - name: OPERATOR_NAME 36 | value: "zookeeper-operator" 37 | -------------------------------------------------------------------------------- /config/rbac/all_ns_rbac.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: zookeeper-operator 5 | 6 | --- 7 | 8 | kind: ClusterRole 9 | apiVersion: rbac.authorization.k8s.io/v1 10 | metadata: 11 | name: zookeeper-operator 12 | rules: 13 | - apiGroups: 14 | - zookeeper.pravega.io 15 | resources: 16 | - "*" 17 | verbs: 18 | - "*" 19 | - apiGroups: 20 | - "" 21 | resources: 22 | - nodes 23 | - pods 24 | - services 25 | - endpoints 26 | - persistentvolumeclaims 27 | - events 28 | - configmaps 29 | - secrets 30 | - serviceaccounts 31 | verbs: 32 | - "*" 33 | - apiGroups: 34 | - apps 35 | resources: 36 | - deployments 37 | - daemonsets 38 | - replicasets 39 | - statefulsets 40 | verbs: 41 | - "*" 42 | - apiGroups: 43 | - policy 44 | resources: 45 | - poddisruptionbudgets 46 | verbs: 47 | - "*" 48 | 49 | --- 50 | 51 | kind: ClusterRoleBinding 52 | apiVersion: rbac.authorization.k8s.io/v1 53 | metadata: 54 | name: zookeeper-operator-cluster-role-binding 55 | subjects: 56 | - kind: ServiceAccount 57 | name: zookeeper-operator 58 | namespace: default 59 | roleRef: 60 | kind: ClusterRole 61 | name: zookeeper-operator 62 | apiGroup: rbac.authorization.k8s.io 63 | -------------------------------------------------------------------------------- /config/rbac/default_ns_rbac.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: zookeeper-operator 5 | 6 | --- 7 | 8 | kind: Role 9 | apiVersion: rbac.authorization.k8s.io/v1 10 | metadata: 11 | name: zookeeper-operator 12 | rules: 13 | - apiGroups: 14 | - zookeeper.pravega.io 15 | resources: 16 | - "*" 17 | verbs: 18 | - "*" 19 | - apiGroups: 20 | - "" 21 | resources: 22 | - pods 23 | - services 24 | - endpoints 25 | - persistentvolumeclaims 26 | - events 27 | - configmaps 28 | - secrets 29 | - serviceaccounts 30 | verbs: 31 | - "*" 32 | - apiGroups: 33 | - apps 34 | resources: 35 | - deployments 36 | - daemonsets 37 | - replicasets 38 | - statefulsets 39 | verbs: 40 | - "*" 41 | - apiGroups: 42 | - policy 43 | resources: 44 | - poddisruptionbudgets 45 | verbs: 46 | - "*" 47 | --- 48 | kind: RoleBinding 49 | apiVersion: rbac.authorization.k8s.io/v1 50 | metadata: 51 | name: zookeeper-operator 52 | subjects: 53 | - kind: ServiceAccount 54 | name: zookeeper-operator 55 | namespace: default 56 | roleRef: 57 | kind: Role 58 | name: zookeeper-operator 59 | apiGroup: rbac.authorization.k8s.io 60 | -------------------------------------------------------------------------------- /config/rbac/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - default_ns_rbac.yaml 3 | - role.yaml 4 | - role_binding.yaml 5 | -------------------------------------------------------------------------------- /config/rbac/role.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | creationTimestamp: null 6 | name: manager-role 7 | rules: 8 | - apiGroups: 9 | - zookeeper.pravega.io.zookeeper.pravega.io 10 | resources: 11 | - zookeeperclusters 12 | verbs: 13 | - create 14 | - delete 15 | - get 16 | - list 17 | - patch 18 | - update 19 | - watch 20 | - apiGroups: 21 | - zookeeper.pravega.io.zookeeper.pravega.io 22 | resources: 23 | - zookeeperclusters/status 24 | verbs: 25 | - get 26 | - patch 27 | - update 28 | -------------------------------------------------------------------------------- /config/rbac/role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: manager-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: manager-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: default 12 | namespace: system 13 | -------------------------------------------------------------------------------- /config/rbac/service_account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: zookeeper-operator 5 | -------------------------------------------------------------------------------- /config/samples/ECS/zookeeper_v1beta1_zookeepercluster_cr.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: zookeeper.pravega.io/v1beta1 2 | kind: ZookeeperCluster 3 | metadata: 4 | name: zookeeper 5 | spec: 6 | replicas: 3 7 | image: 8 | repository: pravega/zookeeper 9 | tag: 0.2.15 10 | storageType: persistence 11 | persistence: 12 | reclaimPolicy: Retain 13 | spec: 14 | storageClassName: "standard" 15 | resources: 16 | requests: 17 | storage: 20Gi 18 | -------------------------------------------------------------------------------- /config/samples/kustomization.yaml: -------------------------------------------------------------------------------- 1 | ## This file is auto-generated, do not modify ## 2 | resources: 3 | - pravega/zookeeper_v1beta1_zookeepercluster_cr.yaml 4 | -------------------------------------------------------------------------------- /config/samples/pravega/zookeeper_v1beta1_zookeepercluster_cr.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: zookeeper.pravega.io/v1beta1 2 | kind: ZookeeperCluster 3 | metadata: 4 | name: zookeeper 5 | spec: 6 | replicas: 3 7 | image: 8 | repository: pravega/zookeeper 9 | tag: 0.2.15 10 | storageType: persistence 11 | persistence: 12 | reclaimPolicy: Delete 13 | spec: 14 | storageClassName: "standard" 15 | resources: 16 | requests: 17 | storage: 20Gi 18 | -------------------------------------------------------------------------------- /config/test/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # Adds namespace to all resources. 2 | namespace: default 3 | 4 | # Value of this field is prepended to the 5 | # names of all resources, e.g. a deployment named 6 | # "wordpress" becomes "alices-wordpress". 7 | # Note that it should also match with the prefix (text before '-') of the namespace 8 | 9 | # Labels to add to all resources and selectors. 10 | #commonLabels: 11 | # someName: someValue 12 | 13 | bases: 14 | - ../crd 15 | - ../rbac 16 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in 17 | # crd/kustomization.yaml 18 | #- ../webhook 19 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. 20 | #- ../certmanager 21 | # [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. 22 | #- ../prometheus 23 | 24 | patchesStrategicMerge: 25 | # Protect the /metrics endpoint by putting it behind auth. 26 | # If you want your controller-manager to expose the /metrics 27 | # # endpoint w/o any authn/z, please comment the following line. 28 | #- manager_auth_proxy_patch.yaml 29 | 30 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in 31 | # crd/kustomization.yaml 32 | #- manager_webhook_patch.yaml 33 | 34 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 35 | # Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. 36 | # 'CERTMANAGER' needs to be enabled to use ca injection 37 | #- webhookcainjection_patch.yaml 38 | 39 | -------------------------------------------------------------------------------- /doc/operator-upgrade.md: -------------------------------------------------------------------------------- 1 | # Upgrading Operator 2 | zookeeperoperator can be upgraded to a version **[VERSION]** via helm using the following command 3 | 4 | ``` 5 | $ helm upgrade [ZOOKEEPER_OPERATOR_RELEASE_NAME] pravega/zookeeper-operator --version=[VERSION] 6 | ``` 7 | The zookeeper operator with deployment name **[DEPLOYMENT_NAME]** can also be upgraded manually by modifying the image tag using kubectl edit, patch or apply 8 | ``` 9 | $ kubectl edit deploy [DEPLOYMENT_NAME] 10 | ``` 11 | > Note: If you are upgrading zookeeper operator version to 0.2.9 or above manually, clusterrole has to be updated to include serviceaccounts. After updating clusterroles, zookeeper operator pod has to be restarted for the changes to take effect. 12 | 13 | ``` 14 | - apiGroups: 15 | - "" 16 | resources: 17 | - pods 18 | - services 19 | - endpoints 20 | - persistentvolumeclaims 21 | - events 22 | - configmaps 23 | - secrets 24 | - serviceaccounts 25 | verbs: 26 | - "*" 27 | ``` 28 | -------------------------------------------------------------------------------- /docker/Dockerfile: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | 11 | ARG DOCKER_REGISTRY 12 | FROM ${DOCKER_REGISTRY:+$DOCKER_REGISTRY/}openjdk:11-jdk 13 | RUN mkdir /zu 14 | COPY zu /zu 15 | WORKDIR /zu 16 | RUN ./gradlew --console=verbose --info shadowJar 17 | 18 | FROM ${DOCKER_REGISTRY:+$DOCKER_REGISTRY/}zookeeper:3.9.3 19 | COPY bin /usr/local/bin 20 | RUN chmod +x /usr/local/bin/* 21 | COPY --from=0 /zu/build/libs/zu.jar /opt/libs/ 22 | 23 | RUN apt-get -q update && \ 24 | apt-get install -y dnsutils curl procps socat 25 | -------------------------------------------------------------------------------- /docker/Dockerfile-swarm: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) Dell Inc., or its subsidiaries. All Rights Reserved. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | ARG DOCKER_REGISTRY 11 | FROM ${DOCKER_REGISTRY:+$DOCKER_REGISTRY/}zookeeper:3.6.1 12 | COPY zoo.cfg.swarm /conf/zoo.cfg 13 | -------------------------------------------------------------------------------- /docker/bin/zookeeperFunctions.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # 4 | # Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | 13 | set -ex 14 | 15 | function zkConfig() { 16 | echo "$HOST.$DOMAIN:$QUORUM_PORT:$LEADER_PORT:$ROLE;$CLIENT_PORT" 17 | } 18 | 19 | function zkConnectionString() { 20 | # If the client service address is not yet available, then return localhost 21 | set +e 22 | getent hosts "${CLIENT_HOST}" 2>/dev/null 1>/dev/null 23 | if [[ $? -ne 0 ]]; then 24 | set -e 25 | echo "localhost:${CLIENT_PORT}" 26 | else 27 | set -e 28 | echo "${CLIENT_HOST}:${CLIENT_PORT}" 29 | fi 30 | } -------------------------------------------------------------------------------- /docker/bin/zookeeperLive.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | 12 | set -ex 13 | 14 | source /conf/env.sh 15 | 16 | OK=$(echo ruok | socat stdio tcp:localhost:$CLIENT_PORT) 17 | 18 | # Check to see if zookeeper service answers 19 | if [[ "$OK" == "imok" ]]; then 20 | exit 0 21 | 22 | else 23 | exit 1 24 | 25 | fi 26 | -------------------------------------------------------------------------------- /docker/bin/zookeeperMetrics.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | 12 | set -ex 13 | 14 | source /conf/env.sh 15 | 16 | echo mntr | socat stdio tcp:localhost:$CLIENT_PORT >& 1 17 | -------------------------------------------------------------------------------- /docker/bin/zookeeperReady.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | 12 | set -ex 13 | 14 | source /conf/env.sh 15 | source /usr/local/bin/zookeeperFunctions.sh 16 | 17 | HOST=`hostname -s` 18 | DATA_DIR=/data 19 | MYID_FILE=$DATA_DIR/myid 20 | LOG4J_CONF=/conf/log4j-quiet.properties 21 | STATIC_CONFIG=/data/conf/zoo.cfg 22 | 23 | OK=$(echo ruok | socat stdio tcp:localhost:$CLIENT_PORT) 24 | 25 | # Check to see if zookeeper service answers 26 | if [[ "$OK" == "imok" ]]; then 27 | set +e 28 | getent hosts $DOMAIN 29 | if [[ $? -ne 0 ]]; then 30 | set -e 31 | echo "There is no active ensemble, skipping readiness probe..." 32 | exit 0 33 | else 34 | set -e 35 | # An ensemble exists, check to see if this node is already a member. 36 | # Check to see if zookeeper service for this node is a participant 37 | set +e 38 | # Extract resource name and this members' ordinal value from pod hostname 39 | HOST=`hostname -s` 40 | if [[ $HOST =~ (.*)-([0-9]+)$ ]]; then 41 | NAME=${BASH_REMATCH[1]} 42 | ORD=${BASH_REMATCH[2]} 43 | else 44 | echo Failed to parse name and ordinal of Pod 45 | exit 1 46 | fi 47 | MYID=$((ORD+1)) 48 | ONDISK_CONFIG=false 49 | if [ -f $MYID_FILE ]; then 50 | EXISTING_ID="`cat $DATA_DIR/myid`" 51 | if [[ "$EXISTING_ID" == "$MYID" && -f $STATIC_CONFIG ]]; then 52 | #If Id is correct and configuration is present under `/data/conf` 53 | ONDISK_CONFIG=true 54 | DYN_CFG_FILE_LINE=`cat $STATIC_CONFIG|grep "dynamicConfigFile\="` 55 | DYN_CFG_FILE=${DYN_CFG_FILE_LINE##dynamicConfigFile=} 56 | SERVER_FOUND=`cat $DYN_CFG_FILE | grep "server.${MYID}=" | wc -l` 57 | if [[ "$SERVER_FOUND" == "0" ]]; then 58 | echo "Server not found in ensemble. Exiting ..." 59 | exit 1 60 | fi 61 | SERVER=`cat $DYN_CFG_FILE | grep "server.${MYID}="` 62 | if [[ "$SERVER" == *"participant"* ]]; then 63 | ROLE=participant 64 | elif [[ "$SERVER" == *"observer"* ]]; then 65 | ROLE=observer 66 | fi 67 | fi 68 | fi 69 | 70 | if [[ "$ROLE" == "participant" ]]; then 71 | echo "Zookeeper service is available and an active participant" 72 | exit 0 73 | elif [[ "$ROLE" == "observer" ]]; then 74 | echo "Zookeeper service is ready to be upgraded from observer to participant." 75 | ROLE=participant 76 | ZKURL=$(zkConnectionString) 77 | ZKCONFIG=$(zkConfig) 78 | java -Dlog4j.configuration=file:"$LOG4J_CONF" -jar /opt/libs/zu.jar remove $ZKURL $MYID 79 | sleep 1 80 | java -Dlog4j.configuration=file:"$LOG4J_CONF" -jar /opt/libs/zu.jar add $ZKURL $MYID $ZKCONFIG 81 | exit 0 82 | else 83 | echo "Something has gone wrong. Unable to determine zookeeper role." 84 | exit 1 85 | fi 86 | fi 87 | 88 | else 89 | echo "Zookeeper service is not available for requests" 90 | exit 1 91 | fi 92 | -------------------------------------------------------------------------------- /docker/bin/zookeeperStart.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | 12 | set -ex 13 | 14 | source /conf/env.sh 15 | source /usr/local/bin/zookeeperFunctions.sh 16 | 17 | HOST=`hostname -s` 18 | DATA_DIR=/data 19 | MYID_FILE=$DATA_DIR/myid 20 | LOG4J_CONF=/conf/log4j-quiet.properties 21 | DYNCONFIG=$DATA_DIR/zoo.cfg.dynamic 22 | STATIC_CONFIG=/data/conf/zoo.cfg 23 | 24 | # Extract resource name and this members ordinal value from pod hostname 25 | if [[ $HOST =~ (.*)-([0-9]+)$ ]]; then 26 | NAME=${BASH_REMATCH[1]} 27 | ORD=${BASH_REMATCH[2]} 28 | else 29 | echo Failed to parse name and ordinal of Pod 30 | exit 1 31 | fi 32 | 33 | MYID=$((ORD+1)) 34 | 35 | # Values for first startup 36 | WRITE_CONFIGURATION=true 37 | REGISTER_NODE=true 38 | ONDISK_MYID_CONFIG=false 39 | ONDISK_DYN_CONFIG=false 40 | 41 | # Check validity of on-disk configuration 42 | if [ -f $MYID_FILE ]; then 43 | EXISTING_ID="`cat $DATA_DIR/myid`" 44 | if [[ "$EXISTING_ID" == "$MYID" && -f $STATIC_CONFIG ]]; then 45 | # If Id is correct and configuration is present under `/data/conf` 46 | ONDISK_MYID_CONFIG=true 47 | fi 48 | fi 49 | 50 | if [ -f $DYNCONFIG ]; then 51 | ONDISK_DYN_CONFIG=true 52 | fi 53 | 54 | set +e 55 | # Check if envoy is up and running 56 | if [[ -n "$ENVOY_SIDECAR_STATUS" ]]; then 57 | COUNT=0 58 | MAXCOUNT=${1:-30} 59 | HEALTHYSTATUSCODE="200" 60 | while true; do 61 | COUNT=$(expr $COUNT + 1) 62 | SC=$(curl -s -o /dev/null -w "%{http_code}" http://localhost:15000/ready) 63 | echo "waiting for envoy proxy to come up"; 64 | sleep 1; 65 | if (( "$SC" == "$HEALTHYSTATUSCODE" || "$MAXCOUNT" == "$COUNT" )); then 66 | break 67 | fi 68 | done 69 | fi 70 | set -e 71 | 72 | # Determine if there is an ensemble available to join by checking the service domain 73 | set +e 74 | getent hosts $DOMAIN # This only performs a dns lookup 75 | if [[ $? -eq 0 ]]; then 76 | ACTIVE_ENSEMBLE=true 77 | elif nslookup $DOMAIN | grep -q "server can't find $DOMAIN"; then 78 | echo "there is no active ensemble" 79 | ACTIVE_ENSEMBLE=false 80 | else 81 | # If an nslookup of the headless service domain fails, then there is no 82 | # active ensemble yet, but in certain cases nslookup of headless service 83 | # takes a while to come up even if there is active ensemble 84 | ACTIVE_ENSEMBLE=false 85 | declare -i count=20 86 | while [[ $count -ge 0 ]] 87 | do 88 | sleep 2 89 | ((count=count-1)) 90 | getent hosts $DOMAIN 91 | if [[ $? -eq 0 ]]; then 92 | ACTIVE_ENSEMBLE=true 93 | break 94 | fi 95 | done 96 | fi 97 | 98 | if [[ "$ONDISK_MYID_CONFIG" == true && "$ONDISK_DYN_CONFIG" == true ]]; then 99 | # If Configuration is present, we assume, there is no need to write configuration. 100 | WRITE_CONFIGURATION=false 101 | else 102 | WRITE_CONFIGURATION=true 103 | fi 104 | 105 | if [[ "$ACTIVE_ENSEMBLE" == false ]]; then 106 | # This is the first node being added to the cluster or headless service not yet available 107 | REGISTER_NODE=false 108 | else 109 | # An ensemble exists, check to see if this node is already a member. 110 | if [[ "$ONDISK_MYID_CONFIG" == false || "$ONDISK_DYN_CONFIG" == false ]]; then 111 | REGISTER_NODE=true 112 | else 113 | REGISTER_NODE=false 114 | fi 115 | fi 116 | 117 | if [[ "$WRITE_CONFIGURATION" == true ]]; then 118 | echo "Writing myid: $MYID to: $MYID_FILE." 119 | echo $MYID > $MYID_FILE 120 | if [[ $MYID -eq 1 ]]; then 121 | ROLE=participant 122 | echo Initial initialization of ordinal 0 pod, creating new config. 123 | ZKCONFIG=$(zkConfig) 124 | echo Writing bootstrap configuration with the following config: 125 | echo $ZKCONFIG 126 | echo $MYID > $MYID_FILE 127 | echo "server.${MYID}=${ZKCONFIG}" > $DYNCONFIG 128 | fi 129 | fi 130 | 131 | if [[ "$REGISTER_NODE" == true ]]; then 132 | ROLE=observer 133 | ZKURL=$(zkConnectionString) 134 | ZKCONFIG=$(zkConfig) 135 | set -e 136 | echo Registering node and writing local configuration to disk. 137 | java -Dlog4j.configuration=file:"$LOG4J_CONF" -jar /opt/libs/zu.jar add $ZKURL $MYID $ZKCONFIG $DYNCONFIG 138 | set +e 139 | fi 140 | 141 | ZOOCFGDIR=/data/conf 142 | export ZOOCFGDIR 143 | echo Copying /conf contents to writable directory, to support Zookeeper dynamic reconfiguration 144 | if [[ ! -d "$ZOOCFGDIR" ]]; then 145 | mkdir $ZOOCFGDIR 146 | cp -f /conf/zoo.cfg $ZOOCFGDIR 147 | else 148 | echo Copying the /conf/zoo.cfg contents except the dynamic config file during restart 149 | echo -e "$( head -n -1 /conf/zoo.cfg )""\n""$( tail -n 1 "$STATIC_CONFIG" )" > $STATIC_CONFIG 150 | fi 151 | cp -f /conf/log4j.properties $ZOOCFGDIR 152 | cp -f /conf/log4j-quiet.properties $ZOOCFGDIR 153 | cp -f /conf/env.sh $ZOOCFGDIR 154 | 155 | if [ -f $DYNCONFIG ]; then 156 | # Node registered, start server 157 | echo Starting zookeeper service 158 | zkServer.sh --config $ZOOCFGDIR start-foreground 159 | else 160 | echo "Node failed to register!" 161 | exit 1 162 | fi 163 | -------------------------------------------------------------------------------- /docker/bin/zookeeperTeardown.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | 12 | set -ex 13 | 14 | source /conf/env.sh 15 | source /usr/local/bin/zookeeperFunctions.sh 16 | 17 | DATA_DIR=/data 18 | MYID_FILE=$DATA_DIR/myid 19 | LOG4J_CONF=/conf/log4j-quiet.properties 20 | 21 | # Wait for client connections to drain. Kubernetes will wait until the confiugred 22 | # "terminationGracePeriodSeconds" before focibly killing the container 23 | for (( i = 0; i < 6; i++ )); do 24 | CONN_COUNT=`echo cons | socat stdio tcp:localhost:$CLIENT_PORT | grep -v "^$" |grep -v "/127.0.0.1:" | wc -l` 25 | if [[ "$CONN_COUNT" -gt 0 ]]; then 26 | echo "$CONN_COUNT non-local connections still connected." 27 | sleep 5 28 | else 29 | echo "$CONN_COUNT non-local connections" 30 | break 31 | fi 32 | done 33 | 34 | # Check to see if zookeeper service for this node is a participant 35 | set +e 36 | ZKURL=$(zkConnectionString) 37 | set -e 38 | MYID=`cat $MYID_FILE` 39 | 40 | ZNODE_PATH="/zookeeper-operator/$CLUSTER_NAME" 41 | CLUSTERSIZE=`java -Dlog4j.configuration=file:"$LOG4J_CONF" -jar /opt/libs/zu.jar sync $ZKURL $ZNODE_PATH` 42 | echo "CLUSTER_SIZE=$CLUSTERSIZE, MyId=$MYID" 43 | if [[ -n "$CLUSTERSIZE" && "$CLUSTERSIZE" -lt "$MYID" ]]; then 44 | # If ClusterSize < MyId, this server is being permanantly removed. 45 | java -Dlog4j.configuration=file:"$LOG4J_CONF" -jar /opt/libs/zu.jar remove $ZKURL $MYID 46 | echo $? 47 | fi 48 | 49 | # Kill the primary process ourselves to circumvent the terminationGracePeriodSeconds 50 | ps -ef | grep zoo.cfg | grep -v grep | awk '{print $2}' | xargs kill 51 | -------------------------------------------------------------------------------- /docker/zoo.cfg.swarm: -------------------------------------------------------------------------------- 1 | dataDir=/data 2 | dataLogDir=/datalog 3 | tickTime=2000 4 | initLimit=5 5 | syncLimit=2 6 | autopurge.snapRetainCount=3 7 | autopurge.purgeInterval=0 8 | maxClientCnxns=60 9 | standaloneEnabled=true 10 | admin.enableServer=true 11 | server.1=localhost:2888:3888;2181 12 | 4lw.commands.whitelist=cons, envi, conf, crst, srvr, stat, mntr, ruok 13 | -------------------------------------------------------------------------------- /docker/zu/build.gradle.kts: -------------------------------------------------------------------------------- 1 | import com.github.jengelman.gradle.plugins.shadow.tasks.ShadowJar 2 | import org.jetbrains.kotlin.gradle.tasks.KotlinCompile 3 | 4 | plugins { 5 | kotlin("jvm") version "1.5.31" 6 | id("com.github.johnrengelman.shadow") version "7.1.0" 7 | } 8 | 9 | repositories { 10 | mavenCentral() 11 | } 12 | 13 | dependencies { 14 | implementation(kotlin("stdlib")) 15 | implementation("org.apache.zookeeper:zookeeper:3.9.3") 16 | } 17 | 18 | tasks.withType() { 19 | classifier = null 20 | manifest { 21 | attributes["Main-Class"] = "io.pravega.zookeeper.MainKt" 22 | } 23 | } 24 | 25 | tasks.withType { 26 | kotlinOptions { 27 | jvmTarget = "11" 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /docker/zu/gradle/wrapper/gradle-wrapper.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pravega/zookeeper-operator/462635bcc436dc9da7573119693743068ec3394d/docker/zu/gradle/wrapper/gradle-wrapper.jar -------------------------------------------------------------------------------- /docker/zu/gradle/wrapper/gradle-wrapper.properties: -------------------------------------------------------------------------------- 1 | distributionBase=GRADLE_USER_HOME 2 | distributionPath=wrapper/dists 3 | distributionUrl=https\://services.gradle.org/distributions/gradle-7.2-bin.zip 4 | zipStoreBase=GRADLE_USER_HOME 5 | zipStorePath=wrapper/dists 6 | -------------------------------------------------------------------------------- /docker/zu/gradlew: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # 4 | # Copyright © 2015-2021 the original authors. 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # https://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | # 18 | 19 | ############################################################################## 20 | # 21 | # Gradle start up script for POSIX generated by Gradle. 22 | # 23 | # Important for running: 24 | # 25 | # (1) You need a POSIX-compliant shell to run this script. If your /bin/sh is 26 | # noncompliant, but you have some other compliant shell such as ksh or 27 | # bash, then to run this script, type that shell name before the whole 28 | # command line, like: 29 | # 30 | # ksh Gradle 31 | # 32 | # Busybox and similar reduced shells will NOT work, because this script 33 | # requires all of these POSIX shell features: 34 | # * functions; 35 | # * expansions «$var», «${var}», «${var:-default}», «${var+SET}», 36 | # «${var#prefix}», «${var%suffix}», and «$( cmd )»; 37 | # * compound commands having a testable exit status, especially «case»; 38 | # * various built-in commands including «command», «set», and «ulimit». 39 | # 40 | # Important for patching: 41 | # 42 | # (2) This script targets any POSIX shell, so it avoids extensions provided 43 | # by Bash, Ksh, etc; in particular arrays are avoided. 44 | # 45 | # The "traditional" practice of packing multiple parameters into a 46 | # space-separated string is a well documented source of bugs and security 47 | # problems, so this is (mostly) avoided, by progressively accumulating 48 | # options in "$@", and eventually passing that to Java. 49 | # 50 | # Where the inherited environment variables (DEFAULT_JVM_OPTS, JAVA_OPTS, 51 | # and GRADLE_OPTS) rely on word-splitting, this is performed explicitly; 52 | # see the in-line comments for details. 53 | # 54 | # There are tweaks for specific operating systems such as AIX, CygWin, 55 | # Darwin, MinGW, and NonStop. 56 | # 57 | # (3) This script is generated from the Groovy template 58 | # https://github.com/gradle/gradle/blob/master/subprojects/plugins/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt 59 | # within the Gradle project. 60 | # 61 | # You can find Gradle at https://github.com/gradle/gradle/. 62 | # 63 | ############################################################################## 64 | 65 | # Attempt to set APP_HOME 66 | 67 | # Resolve links: $0 may be a link 68 | app_path=$0 69 | 70 | # Need this for daisy-chained symlinks. 71 | while 72 | APP_HOME=${app_path%"${app_path##*/}"} # leaves a trailing /; empty if no leading path 73 | [ -h "$app_path" ] 74 | do 75 | ls=$( ls -ld "$app_path" ) 76 | link=${ls#*' -> '} 77 | case $link in #( 78 | /*) app_path=$link ;; #( 79 | *) app_path=$APP_HOME$link ;; 80 | esac 81 | done 82 | 83 | APP_HOME=$( cd "${APP_HOME:-./}" && pwd -P ) || exit 84 | 85 | APP_NAME="Gradle" 86 | APP_BASE_NAME=${0##*/} 87 | 88 | # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. 89 | DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' 90 | 91 | # Use the maximum available, or set MAX_FD != -1 to use that value. 92 | MAX_FD=maximum 93 | 94 | warn () { 95 | echo "$*" 96 | } >&2 97 | 98 | die () { 99 | echo 100 | echo "$*" 101 | echo 102 | exit 1 103 | } >&2 104 | 105 | # OS specific support (must be 'true' or 'false'). 106 | cygwin=false 107 | msys=false 108 | darwin=false 109 | nonstop=false 110 | case "$( uname )" in #( 111 | CYGWIN* ) cygwin=true ;; #( 112 | Darwin* ) darwin=true ;; #( 113 | MSYS* | MINGW* ) msys=true ;; #( 114 | NONSTOP* ) nonstop=true ;; 115 | esac 116 | 117 | CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar 118 | 119 | 120 | # Determine the Java command to use to start the JVM. 121 | if [ -n "$JAVA_HOME" ] ; then 122 | if [ -x "$JAVA_HOME/jre/sh/java" ] ; then 123 | # IBM's JDK on AIX uses strange locations for the executables 124 | JAVACMD=$JAVA_HOME/jre/sh/java 125 | else 126 | JAVACMD=$JAVA_HOME/bin/java 127 | fi 128 | if [ ! -x "$JAVACMD" ] ; then 129 | die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME 130 | 131 | Please set the JAVA_HOME variable in your environment to match the 132 | location of your Java installation." 133 | fi 134 | else 135 | JAVACMD=java 136 | which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 137 | 138 | Please set the JAVA_HOME variable in your environment to match the 139 | location of your Java installation." 140 | fi 141 | 142 | # Increase the maximum file descriptors if we can. 143 | if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then 144 | case $MAX_FD in #( 145 | max*) 146 | MAX_FD=$( ulimit -H -n ) || 147 | warn "Could not query maximum file descriptor limit" 148 | esac 149 | case $MAX_FD in #( 150 | '' | soft) :;; #( 151 | *) 152 | ulimit -n "$MAX_FD" || 153 | warn "Could not set maximum file descriptor limit to $MAX_FD" 154 | esac 155 | fi 156 | 157 | # Collect all arguments for the java command, stacking in reverse order: 158 | # * args from the command line 159 | # * the main class name 160 | # * -classpath 161 | # * -D...appname settings 162 | # * --module-path (only if needed) 163 | # * DEFAULT_JVM_OPTS, JAVA_OPTS, and GRADLE_OPTS environment variables. 164 | 165 | # For Cygwin or MSYS, switch paths to Windows format before running java 166 | if "$cygwin" || "$msys" ; then 167 | APP_HOME=$( cygpath --path --mixed "$APP_HOME" ) 168 | CLASSPATH=$( cygpath --path --mixed "$CLASSPATH" ) 169 | 170 | JAVACMD=$( cygpath --unix "$JAVACMD" ) 171 | 172 | # Now convert the arguments - kludge to limit ourselves to /bin/sh 173 | for arg do 174 | if 175 | case $arg in #( 176 | -*) false ;; # don't mess with options #( 177 | /?*) t=${arg#/} t=/${t%%/*} # looks like a POSIX filepath 178 | [ -e "$t" ] ;; #( 179 | *) false ;; 180 | esac 181 | then 182 | arg=$( cygpath --path --ignore --mixed "$arg" ) 183 | fi 184 | # Roll the args list around exactly as many times as the number of 185 | # args, so each arg winds up back in the position where it started, but 186 | # possibly modified. 187 | # 188 | # NB: a `for` loop captures its iteration list before it begins, so 189 | # changing the positional parameters here affects neither the number of 190 | # iterations, nor the values presented in `arg`. 191 | shift # remove old arg 192 | set -- "$@" "$arg" # push replacement arg 193 | done 194 | fi 195 | 196 | # Collect all arguments for the java command; 197 | # * $DEFAULT_JVM_OPTS, $JAVA_OPTS, and $GRADLE_OPTS can contain fragments of 198 | # shell script including quotes and variable substitutions, so put them in 199 | # double quotes to make sure that they get re-expanded; and 200 | # * put everything else in single quotes, so that it's not re-expanded. 201 | 202 | set -- \ 203 | "-Dorg.gradle.appname=$APP_BASE_NAME" \ 204 | -classpath "$CLASSPATH" \ 205 | org.gradle.wrapper.GradleWrapperMain \ 206 | "$@" 207 | 208 | # Use "xargs" to parse quoted args. 209 | # 210 | # With -n1 it outputs one arg per line, with the quotes and backslashes removed. 211 | # 212 | # In Bash we could simply go: 213 | # 214 | # readarray ARGS < <( xargs -n1 <<<"$var" ) && 215 | # set -- "${ARGS[@]}" "$@" 216 | # 217 | # but POSIX shell has neither arrays nor command substitution, so instead we 218 | # post-process each arg (as a line of input to sed) to backslash-escape any 219 | # character that might be a shell metacharacter, then use eval to reverse 220 | # that process (while maintaining the separation between arguments), and wrap 221 | # the whole thing up as a single "set" statement. 222 | # 223 | # This will of course break if any of these variables contains a newline or 224 | # an unmatched quote. 225 | # 226 | 227 | eval "set -- $( 228 | printf '%s\n' "$DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS" | 229 | xargs -n1 | 230 | sed ' s~[^-[:alnum:]+,./:=@_]~\\&~g; ' | 231 | tr '\n' ' ' 232 | )" '"$@"' 233 | 234 | exec "$JAVACMD" "$@" 235 | -------------------------------------------------------------------------------- /docker/zu/src/main/java/io/pravega/zookeeper/Main.kt: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | */ 10 | 11 | package io.pravega.zookeeper 12 | 13 | import org.apache.zookeeper.data.Stat 14 | import org.apache.zookeeper.AsyncCallback.VoidCallback 15 | import java.io.File 16 | 17 | const val OBSERVER = "observer" 18 | const val PARTICIPANT= "participant" 19 | 20 | /** 21 | * Utility to Register a server with the Zookeeper Ensemble 22 | */ 23 | fun main(args: Array) { 24 | val message = "Usage: zu [options...]" 25 | if (args.isEmpty()) { 26 | help(message) 27 | } 28 | 29 | when { 30 | "add" == args[0] -> runAdd(args) 31 | "get-all" == args[0] -> runGetAll(args) 32 | "get" == args[0] -> runGet(args) 33 | "remove" == args[0] -> runRemove(args) 34 | "get-role" == args[0] -> runGetRole(args) 35 | "sync" == args[0] -> runSync(args) 36 | else -> help(message) 37 | } 38 | } 39 | 40 | fun runSync(args: Array, suppressOutput: Boolean = false): String { 41 | if (args.size < 3) { 42 | help("Usage: zu sync ") 43 | } 44 | var (_, zkUrl, path) = args 45 | return try { 46 | val zk = newZookeeperAdminClient(zkUrl) 47 | zk.sync(path, null, null) 48 | val dataArr = zk.getData(path, null, null) 49 | val clusterSize = String(dataArr).substringAfter("=").trim() 50 | if (! suppressOutput) { 51 | print(clusterSize) 52 | } 53 | clusterSize 54 | } catch (e: Exception) { 55 | System.err.println("Error performing zookeeper sync operation:") 56 | e.printStackTrace(System.err) 57 | System.exit(1) 58 | "" 59 | } 60 | } 61 | 62 | fun runGetAll(args: Array, suppressOutput: Boolean = false): String { 63 | if (args.size != 2) { 64 | help("Usage: zu get-all ") 65 | } 66 | 67 | val (_, zkUrl) = args 68 | return try { 69 | val zk = newZookeeperAdminClient(zkUrl) 70 | val zkCfg = String(zk.getConfig(false, Stat())) 71 | if (! suppressOutput) { 72 | print(zkCfg) 73 | } 74 | zkCfg 75 | } catch (e: Exception) { 76 | System.err.println("Error getting server config") 77 | e.printStackTrace(System.err) 78 | System.exit(1) 79 | "" 80 | } 81 | } 82 | 83 | fun runGet(args: Array, suppressOutput: Boolean = false): String? { 84 | if (args.size != 3) { 85 | help("Usage: zu get ") 86 | } 87 | 88 | val (_, zkUrl, serverId) = args 89 | val that = runGetAll(arrayOf("get-all", zkUrl), true) 90 | .split("\n") 91 | .map {it.split("=")} 92 | .filter { it[0] != "version" } 93 | .find{ it[0].split(".")[1] == serverId } 94 | ?.getOrNull(1) 95 | when { 96 | that == null -> { 97 | System.err.println("Server not found in zookeeper config") 98 | System.exit(1) 99 | } 100 | ! suppressOutput -> { 101 | println(that.toString()) 102 | } 103 | } 104 | return that.toString() 105 | } 106 | 107 | fun runGetRole(args: Array) { 108 | if (args.size != 3) { 109 | help("Usage: zu get-role ") 110 | } 111 | val cfgStr = runGet(args, true) 112 | if(Regex(".*observer.*") matches cfgStr.toString()) { 113 | println(OBSERVER) 114 | } else { 115 | println(PARTICIPANT) 116 | } 117 | } 118 | 119 | fun reconfigure(zkUrl: String, joining: String?, leaving: String?, outputFile: String?) { 120 | try { 121 | val zk = newZookeeperAdminClient(zkUrl) 122 | val cfg = zk.reconfigure(joining, leaving, null, -1, null) 123 | val cfgStr = String(cfg) 124 | .split("\n") 125 | .filter{!(Regex("^version=.+") matches it)} 126 | .joinToString("\n") 127 | if (outputFile == null) { 128 | println(cfgStr) 129 | } else { 130 | File(outputFile).bufferedWriter().use {it.write(cfgStr + "\n")} 131 | } 132 | } catch (e: Exception) { 133 | System.err.println("Error performing zookeeper reconfiguration:") 134 | e.printStackTrace(System.err) 135 | System.exit(1) 136 | } 137 | } 138 | 139 | fun runAdd(args: Array) { 140 | val message = "Usage: zu add [output-file]" 141 | if (args.size < 4 || args.size > 5) { 142 | help(message) 143 | } 144 | val (_, zkUrl, newServerId, newServerDetails) = args 145 | reconfigure(zkUrl, "server.$newServerId=$newServerDetails", null, args.getOrNull(4)) 146 | } 147 | 148 | fun runRemove(args: Array) { 149 | val message = "Usage: zu remove [output-file]" 150 | if (args.size < 3 || args.size > 4) { 151 | help(message) 152 | } 153 | val (_, zkUrl, newServerId) = args 154 | reconfigure(zkUrl, null, newServerId, args.getOrNull(2)) 155 | } 156 | 157 | fun help(message: String) { 158 | System.err.println(message) 159 | System.exit(1) 160 | } 161 | -------------------------------------------------------------------------------- /docker/zu/src/main/java/io/pravega/zookeeper/Zookeeper.kt: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | */ 10 | 11 | package io.pravega.zookeeper 12 | 13 | import org.apache.zookeeper.WatchedEvent 14 | import org.apache.zookeeper.Watcher 15 | import org.apache.zookeeper.ZooKeeper 16 | import org.apache.zookeeper.admin.ZooKeeperAdmin 17 | import java.util.concurrent.CompletableFuture 18 | import java.util.concurrent.TimeUnit 19 | 20 | const val ZK_CONNECTION_TIMEOUT_MINS: Long = 3 21 | 22 | /** 23 | * Creates a new Zookeeper Admin client and waits until it's in a connected state 24 | */ 25 | fun newZookeeperAdminClient(zkUrl: String) : ZooKeeperAdmin { 26 | System.err.println("Connecting to Zookeeper $zkUrl") 27 | 28 | val connectionWatcher = ConnectionWatcher() 29 | val zk = ZooKeeperAdmin(zkUrl, 3000, connectionWatcher) 30 | 31 | connectionWatcher.waitUntilConnected() 32 | 33 | return zk 34 | } 35 | 36 | class ConnectionWatcher : Watcher { 37 | private val connected = CompletableFuture() 38 | 39 | override fun process(event: WatchedEvent?) { 40 | if (event?.type == Watcher.Event.EventType.None) { 41 | if (event.state == Watcher.Event.KeeperState.SyncConnected) { 42 | connected.complete(true) 43 | } 44 | } 45 | } 46 | 47 | fun waitUntilConnected() { 48 | connected.get(ZK_CONNECTION_TIMEOUT_MINS, TimeUnit.MINUTES) 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/pravega/zookeeper-operator 2 | 3 | go 1.21 4 | 5 | require ( 6 | github.com/ghodss/yaml v1.0.0 7 | github.com/go-logr/logr v1.2.4 8 | github.com/onsi/ginkgo v1.16.5 9 | github.com/onsi/gomega v1.27.7 10 | github.com/operator-framework/operator-lib v0.11.0 11 | github.com/pkg/errors v0.9.1 12 | github.com/samuel/go-zookeeper v0.0.0-20201211165307-7117e9ea2414 13 | github.com/sirupsen/logrus v1.9.0 14 | golang.org/x/net v0.17.0 15 | k8s.io/api v0.27.5 16 | k8s.io/apimachinery v0.27.5 17 | k8s.io/client-go v0.27.5 18 | sigs.k8s.io/controller-runtime v0.15.2 19 | ) 20 | 21 | require ( 22 | github.com/beorn7/perks v1.0.1 // indirect 23 | github.com/cespare/xxhash/v2 v2.2.0 // indirect 24 | github.com/davecgh/go-spew v1.1.1 // indirect 25 | github.com/emicklei/go-restful/v3 v3.9.0 // indirect 26 | github.com/evanphx/json-patch v5.6.0+incompatible // indirect 27 | github.com/evanphx/json-patch/v5 v5.6.0 // indirect 28 | github.com/fsnotify/fsnotify v1.6.0 // indirect 29 | github.com/go-logr/zapr v1.2.4 // indirect 30 | github.com/go-openapi/jsonpointer v0.19.6 // indirect 31 | github.com/go-openapi/jsonreference v0.20.1 // indirect 32 | github.com/go-openapi/swag v0.22.3 // indirect 33 | github.com/gogo/protobuf v1.3.2 // indirect 34 | github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect 35 | github.com/golang/protobuf v1.5.3 // indirect 36 | github.com/google/gnostic v0.6.9 // indirect 37 | github.com/google/go-cmp v0.5.9 // indirect 38 | github.com/google/gofuzz v1.2.0 // indirect 39 | github.com/google/uuid v1.3.0 // indirect 40 | github.com/imdario/mergo v0.3.13 // indirect 41 | github.com/josharian/intern v1.0.0 // indirect 42 | github.com/json-iterator/go v1.1.12 // indirect 43 | github.com/mailru/easyjson v0.7.7 // indirect 44 | github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect 45 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 46 | github.com/modern-go/reflect2 v1.0.2 // indirect 47 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 48 | github.com/nxadm/tail v1.4.8 // indirect 49 | github.com/prometheus/client_golang v1.15.1 // indirect 50 | github.com/prometheus/client_model v0.4.0 // indirect 51 | github.com/prometheus/common v0.42.0 // indirect 52 | github.com/prometheus/procfs v0.9.0 // indirect 53 | github.com/spf13/pflag v1.0.5 // indirect 54 | go.uber.org/atomic v1.9.0 // indirect 55 | go.uber.org/multierr v1.8.0 // indirect 56 | go.uber.org/zap v1.24.0 // indirect 57 | golang.org/x/oauth2 v0.5.0 // indirect 58 | golang.org/x/sys v0.13.0 // indirect 59 | golang.org/x/term v0.13.0 // indirect 60 | golang.org/x/text v0.13.0 // indirect 61 | golang.org/x/time v0.3.0 // indirect 62 | gomodules.xyz/jsonpatch/v2 v2.3.0 // indirect 63 | google.golang.org/appengine v1.6.7 // indirect 64 | google.golang.org/protobuf v1.33.0 // indirect 65 | gopkg.in/inf.v0 v0.9.1 // indirect 66 | gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect 67 | gopkg.in/yaml.v2 v2.4.0 // indirect 68 | gopkg.in/yaml.v3 v3.0.1 // indirect 69 | k8s.io/apiextensions-apiserver v0.27.5 // indirect 70 | k8s.io/component-base v0.27.5 // indirect 71 | k8s.io/klog/v2 v2.90.1 // indirect 72 | k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f // indirect 73 | k8s.io/utils v0.0.0-20230209194617-a36077c30491 // indirect 74 | sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect 75 | sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect 76 | sigs.k8s.io/yaml v1.3.0 // indirect 77 | ) 78 | -------------------------------------------------------------------------------- /hack/boilerplate.go.txt: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (&the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | */ 10 | 11 | package main 12 | 13 | import ( 14 | "context" 15 | "errors" 16 | "flag" 17 | "fmt" 18 | "os" 19 | "runtime" 20 | "strings" 21 | 22 | zkConfig "github.com/pravega/zookeeper-operator/pkg/controller/config" 23 | "github.com/pravega/zookeeper-operator/pkg/utils" 24 | "github.com/pravega/zookeeper-operator/pkg/version" 25 | zkClient "github.com/pravega/zookeeper-operator/pkg/zk" 26 | "github.com/sirupsen/logrus" 27 | apimachineryruntime "k8s.io/apimachinery/pkg/runtime" 28 | utilruntime "k8s.io/apimachinery/pkg/util/runtime" 29 | clientgoscheme "k8s.io/client-go/kubernetes/scheme" 30 | _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" 31 | _ "k8s.io/client-go/plugin/pkg/client/auth/oidc" 32 | ctrl "sigs.k8s.io/controller-runtime" 33 | "sigs.k8s.io/controller-runtime/pkg/cache" 34 | "sigs.k8s.io/controller-runtime/pkg/client/config" 35 | "sigs.k8s.io/controller-runtime/pkg/log/zap" 36 | 37 | api "github.com/pravega/zookeeper-operator/api/v1beta1" 38 | "github.com/pravega/zookeeper-operator/controllers" 39 | // +kubebuilder:scaffold:imports 40 | ) 41 | 42 | var ( 43 | log = ctrl.Log.WithName("cmd") 44 | versionFlag bool 45 | scheme = apimachineryruntime.NewScheme() 46 | ) 47 | 48 | func init() { 49 | flag.BoolVar(&versionFlag, "version", false, "Show version and quit") 50 | flag.BoolVar(&zkConfig.DisableFinalizer, "disableFinalizer", false, 51 | "Disable finalizers for zookeeperclusters. Use this flag with awareness of the consequences") 52 | utilruntime.Must(clientgoscheme.AddToScheme(scheme)) 53 | utilruntime.Must(api.AddToScheme(scheme)) 54 | } 55 | 56 | func printVersion() { 57 | log.Info(fmt.Sprintf("zookeeper-operator Version: %v", version.Version)) 58 | log.Info(fmt.Sprintf("Git SHA: %s", version.GitSHA)) 59 | log.Info(fmt.Sprintf("Go Version: %s", runtime.Version())) 60 | log.Info(fmt.Sprintf("Go OS/Arch: %s/%s", runtime.GOOS, runtime.GOARCH)) 61 | } 62 | 63 | func main() { 64 | var metricsAddr string 65 | flag.StringVar(&metricsAddr, "metrics-bind-address", "127.0.0.1:6000", "The address the metric endpoint binds to.") 66 | flag.Parse() 67 | 68 | ctrl.SetLogger(zap.New(zap.UseDevMode(false))) 69 | 70 | namespaces, err := getWatchNamespace() 71 | if err != nil { 72 | log.Error(err, "unable to get WatchNamespace, "+ 73 | "the manager will watch and manage resources in all namespaces") 74 | } 75 | 76 | printVersion() 77 | 78 | if versionFlag { 79 | os.Exit(0) 80 | } 81 | 82 | if zkConfig.DisableFinalizer { 83 | logrus.Warn("----- Running with finalizer disabled. -----") 84 | } 85 | 86 | //When operator is started to watch resources in a specific set of namespaces, we use the MultiNamespacedCacheBuilder cache. 87 | //In this scenario, it is also suggested to restrict the provided authorization to this namespace by replacing the default 88 | //ClusterRole and ClusterRoleBinding to Role and RoleBinding respectively 89 | //For further information see the kubernetes documentation about 90 | //Using [RBAC Authorization](https://kubernetes.io/docs/reference/access-authn-authz/rbac/). 91 | managerNamespaces := []string{} 92 | if namespaces != "" { 93 | ns := strings.Split(namespaces, ",") 94 | for i := range ns { 95 | ns[i] = strings.TrimSpace(ns[i]) 96 | } 97 | managerNamespaces = ns 98 | } 99 | 100 | // Get a config to talk to the apiserver 101 | cfg, err := config.GetConfig() 102 | if err != nil { 103 | logrus.Fatal(err) 104 | } 105 | 106 | operatorNs, err := GetOperatorNamespace() 107 | if err != nil { 108 | log.Error(err, "failed to get operator namespace") 109 | os.Exit(1) 110 | } 111 | 112 | // Become the leader before proceeding 113 | err = utils.BecomeLeader(context.TODO(), cfg, "zookeeper-operator-lock", operatorNs) 114 | if err != nil { 115 | log.Error(err, "") 116 | os.Exit(1) 117 | } 118 | 119 | mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ 120 | Scheme: scheme, 121 | Cache: cache.Options{Namespaces: managerNamespaces}, 122 | MetricsBindAddress: metricsAddr, 123 | }) 124 | if err != nil { 125 | log.Error(err, "unable to start manager") 126 | os.Exit(1) 127 | } 128 | 129 | if err = (&controllers.ZookeeperClusterReconciler{ 130 | Client: mgr.GetClient(), 131 | Log: ctrl.Log.WithName("controllers").WithName("ZookeeperCluster"), 132 | Scheme: mgr.GetScheme(), 133 | ZkClient: new(zkClient.DefaultZookeeperClient), 134 | }).SetupWithManager(mgr); err != nil { 135 | log.Error(err, "unable to create controller", "controller", "ZookeeperCluster") 136 | os.Exit(1) 137 | } 138 | // +kubebuilder:scaffold:builder 139 | 140 | log.Info("starting manager") 141 | if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { 142 | log.Error(err, "problem running manager") 143 | os.Exit(1) 144 | } 145 | } 146 | 147 | // getWatchNamespace returns the Namespace the operator should be watching for changes 148 | func getWatchNamespace() (string, error) { 149 | // WatchNamespaceEnvVar is the constant for env variable WATCH_NAMESPACE 150 | // which specifies the Namespace to watch. 151 | // An empty value means the operator is running with cluster scope. 152 | var watchNamespaceEnvVar = "WATCH_NAMESPACE" 153 | 154 | ns, found := os.LookupEnv(watchNamespaceEnvVar) 155 | if !found { 156 | return "", fmt.Errorf("%s must be set", watchNamespaceEnvVar) 157 | } 158 | return ns, nil 159 | } 160 | 161 | func GetOperatorNamespace() (string, error) { 162 | nsBytes, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace") 163 | if err != nil { 164 | if os.IsNotExist(err) { 165 | return "", errors.New("file does not exist") 166 | } 167 | return "", err 168 | } 169 | ns := strings.TrimSpace(string(nsBytes)) 170 | return ns, nil 171 | } 172 | -------------------------------------------------------------------------------- /pkg/controller/config/config.go: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | */ 10 | 11 | package config 12 | 13 | // DisableFinalizer disables the finalizers for zookeeper clusters and 14 | // skips the pvc deletion phase when zookeeper cluster get deleted. 15 | // This is useful when operator deletion may happen before zookeeper clusters deletion. 16 | // NOTE: enabling this flag with caution! It causes pvc of zk undeleted. 17 | var DisableFinalizer bool 18 | -------------------------------------------------------------------------------- /pkg/test/e2e/e2eutil/spec_util.go: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | */ 10 | 11 | package e2eutil 12 | 13 | import ( 14 | api "github.com/pravega/zookeeper-operator/api/v1beta1" 15 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 16 | ) 17 | 18 | // NewDefaultCluster returns a cluster with an empty spec, which will be filled 19 | // with default values 20 | func NewDefaultCluster(namespace string) *api.ZookeeperCluster { 21 | return &api.ZookeeperCluster{ 22 | TypeMeta: metav1.TypeMeta{ 23 | Kind: "ZookeeperCluster", 24 | APIVersion: "zookeeper.pravega.io/v1beta1", 25 | }, 26 | ObjectMeta: metav1.ObjectMeta{ 27 | Name: "zookeeper", 28 | Namespace: namespace, 29 | }, 30 | Spec: api.ZookeeperClusterSpec{}, 31 | } 32 | } 33 | 34 | func NewClusterWithVersion(namespace, version string) *api.ZookeeperCluster { 35 | cluster := NewDefaultCluster(namespace) 36 | cluster.Spec = api.ZookeeperClusterSpec{ 37 | Image: api.ContainerImage{ 38 | Tag: version, 39 | }, 40 | } 41 | return cluster 42 | } 43 | 44 | func NewClusterWithEmptyDir(namespace string) *api.ZookeeperCluster { 45 | cluster := NewDefaultCluster(namespace) 46 | cluster.Spec = api.ZookeeperClusterSpec{ 47 | StorageType: "ephemeral", 48 | } 49 | return cluster 50 | } 51 | -------------------------------------------------------------------------------- /pkg/test/e2e/e2eutil/zookeepercluster_util.go: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | */ 10 | 11 | package e2eutil 12 | 13 | import ( 14 | goctx "context" 15 | "fmt" 16 | "golang.org/x/net/context" 17 | "time" 18 | 19 | "github.com/go-logr/logr" 20 | corev1 "k8s.io/api/core/v1" 21 | "k8s.io/apimachinery/pkg/labels" 22 | "sigs.k8s.io/controller-runtime/pkg/client" 23 | 24 | "k8s.io/apimachinery/pkg/types" 25 | "k8s.io/apimachinery/pkg/util/wait" 26 | 27 | api "github.com/pravega/zookeeper-operator/api/v1beta1" 28 | ) 29 | 30 | var ( 31 | RetryInterval = time.Second * 15 32 | ReadyTimeout = time.Minute * 15 33 | UpgradeTimeout = time.Minute * 25 34 | TerminateTimeout = time.Minute * 15 35 | ) 36 | 37 | // CreateCluster creates a ZookeeperCluster CR with the desired spec 38 | func CreateCluster(logger logr.Logger, k8client client.Client, z *api.ZookeeperCluster) (*api.ZookeeperCluster, error) { 39 | logger.Info(fmt.Sprintf("creating zookeeper cluster: %s\n", z.Name)) 40 | err := k8client.Create(goctx.TODO(), z) 41 | if err != nil { 42 | return nil, fmt.Errorf("failed to create CR: %v", err) 43 | } 44 | 45 | zk := &api.ZookeeperCluster{} 46 | err = k8client.Get(goctx.TODO(), types.NamespacedName{Namespace: z.Namespace, Name: z.Name}, zk) 47 | if err != nil { 48 | return nil, fmt.Errorf("failed to obtain created CR: %v", err) 49 | } 50 | logger.Info(fmt.Sprintf("created zookeeper cluster: %s\n", zk.Name)) 51 | return z, nil 52 | } 53 | 54 | // DeleteCluster deletes the ZookeeperCluster CR specified by cluster spec 55 | func DeleteCluster(logger logr.Logger, k8client client.Client, z *api.ZookeeperCluster) error { 56 | logger.Info(fmt.Sprintf("deleting zookeeper cluster: %s", z.Name)) 57 | err := k8client.Delete(goctx.TODO(), z) 58 | if err != nil { 59 | return fmt.Errorf("failed to delete CR: %v", err) 60 | } 61 | logger.Info(fmt.Sprintf("deleted zookeeper cluster: %s", z.Name)) 62 | return nil 63 | } 64 | 65 | // UpdateCluster updates the ZookeeperCluster CR 66 | func UpdateCluster(logger logr.Logger, k8client client.Client, z *api.ZookeeperCluster) error { 67 | logger.Info(fmt.Sprintf("updating zookeeper cluster: %s", z.Name)) 68 | err := k8client.Update(goctx.TODO(), z) 69 | if err != nil { 70 | return fmt.Errorf("failed to update CR: %v", err) 71 | } 72 | logger.Info(fmt.Sprintf("updated zookeeper cluster: %s", z.Name)) 73 | return nil 74 | } 75 | 76 | // GetCluster returns the latest ZookeeperCluster CR 77 | func GetCluster(logger logr.Logger, k8client client.Client, z *api.ZookeeperCluster) (*api.ZookeeperCluster, error) { 78 | zk := &api.ZookeeperCluster{} 79 | err := k8client.Get(goctx.TODO(), types.NamespacedName{Namespace: z.Namespace, Name: z.Name}, zk) 80 | if err != nil { 81 | return nil, fmt.Errorf("failed to obtain created CR: %v", err) 82 | } 83 | logger.Info(fmt.Sprintf("zk cluster has ready replicas %v", zk.Status.ReadyReplicas)) 84 | return zk, nil 85 | } 86 | 87 | // WaitForClusterToBecomeReady will wait until all cluster pods are ready 88 | func WaitForClusterToBecomeReady(logger logr.Logger, k8client client.Client, z *api.ZookeeperCluster, size int) error { 89 | logger.Info(fmt.Sprintf("waiting for cluster pods to become ready: %s", z.Name)) 90 | err := wait.PollUntilContextTimeout(context.TODO(), RetryInterval, ReadyTimeout, false, func(ctx context.Context) (done bool, err error) { 91 | cluster, err := GetCluster(logger, k8client, z) 92 | if err != nil { 93 | return false, err 94 | } 95 | 96 | logger.Info(fmt.Sprintf("waiting for pods to become ready (%d/%d), pods (%v)", cluster.Status.ReadyReplicas, size, cluster.Status.Members.Ready)) 97 | _, condition := cluster.Status.GetClusterCondition(api.ClusterConditionPodsReady) 98 | if condition != nil && condition.Status == corev1.ConditionTrue && cluster.Status.ReadyReplicas == int32(size) { 99 | return true, nil 100 | } 101 | return false, nil 102 | }) 103 | 104 | if err != nil { 105 | return err 106 | } 107 | logger.Info(fmt.Sprintf("zookeeper cluster ready: %s", z.Name)) 108 | return nil 109 | 110 | } 111 | 112 | // WaitForClusterToUpgrade will wait until all pods are upgraded 113 | func WaitForClusterToUpgrade(logger logr.Logger, k8client client.Client, z *api.ZookeeperCluster, targetVersion string) error { 114 | logger.Info(fmt.Sprintf("waiting for cluster to upgrade: %s", z.Name)) 115 | err := wait.PollUntilContextTimeout(context.TODO(), RetryInterval, UpgradeTimeout, false, func(ctx context.Context) (done bool, err error) { 116 | cluster, err := GetCluster(logger, k8client, z) 117 | if err != nil { 118 | return false, err 119 | } 120 | 121 | _, upgradeCondition := cluster.Status.GetClusterCondition(api.ClusterConditionUpgrading) 122 | _, errorCondition := cluster.Status.GetClusterCondition(api.ClusterConditionError) 123 | 124 | logger.Info(fmt.Sprintf("waiting for cluster to upgrade (upgrading: %s; error: %s)", upgradeCondition.Status, errorCondition.Status)) 125 | 126 | if errorCondition.Status == corev1.ConditionTrue { 127 | return false, fmt.Errorf("failed upgrading cluster: [%s] %s", errorCondition.Reason, errorCondition.Message) 128 | } 129 | 130 | if upgradeCondition.Status == corev1.ConditionFalse && cluster.Status.CurrentVersion == targetVersion { 131 | // Cluster upgraded 132 | return true, nil 133 | } 134 | return false, nil 135 | }) 136 | 137 | if err != nil { 138 | return err 139 | } 140 | 141 | logger.Info(fmt.Sprintf("zookeeper cluster upgraded: %s", z.Name)) 142 | return nil 143 | } 144 | 145 | // WaitForClusterToTerminate will wait until all cluster pods are terminated 146 | func WaitForClusterToTerminate(logger logr.Logger, k8client client.Client, z *api.ZookeeperCluster) error { 147 | logger.Info(fmt.Sprintf("waiting for zookeeper cluster to terminate: %s", z.Name)) 148 | listOptions := []client.ListOption{ 149 | client.InNamespace(z.GetNamespace()), 150 | client.MatchingLabelsSelector{Selector: labels.SelectorFromSet(map[string]string{"app": z.GetName()})}, 151 | } 152 | 153 | // Wait for Pods to terminate 154 | err := wait.PollUntilContextTimeout(context.TODO(), RetryInterval, TerminateTimeout, false, func(ctx context.Context) (done bool, err error) { 155 | podList := corev1.PodList{} 156 | err = k8client.List(goctx.TODO(), &podList, listOptions...) 157 | if err != nil { 158 | return false, err 159 | } 160 | 161 | var names []string 162 | for i := range podList.Items { 163 | pod := &podList.Items[i] 164 | names = append(names, pod.Name) 165 | } 166 | logger.Info(fmt.Sprintf("waiting for pods to terminate, running pods (%v)", names)) 167 | if len(names) != 0 { 168 | return false, nil 169 | } 170 | return true, nil 171 | }) 172 | 173 | if err != nil { 174 | return err 175 | } 176 | 177 | // Wait for PVCs to terminate 178 | err = wait.PollUntilContextTimeout(context.TODO(), RetryInterval, TerminateTimeout, false, func(ctx context.Context) (done bool, err error) { 179 | pvcList := corev1.PersistentVolumeClaimList{} 180 | err = k8client.List(goctx.TODO(), &pvcList, listOptions...) 181 | if err != nil { 182 | return false, err 183 | } 184 | 185 | var names []string 186 | for i := range pvcList.Items { 187 | pvc := &pvcList.Items[i] 188 | names = append(names, pvc.Name) 189 | } 190 | logger.Info(fmt.Sprintf("waiting for pvc to terminate (%v)", names)) 191 | if len(names) != 0 { 192 | return false, nil 193 | } 194 | return true, nil 195 | 196 | }) 197 | 198 | if err != nil { 199 | return err 200 | } 201 | 202 | logger.Info(fmt.Sprintf("zookeeper cluster terminated: %s", z.Name)) 203 | return nil 204 | } 205 | func DeletePods(logger logr.Logger, k8client client.Client, z *api.ZookeeperCluster, size int) error { 206 | listOptions := []client.ListOption{ 207 | client.InNamespace(z.GetNamespace()), 208 | client.MatchingLabelsSelector{Selector: labels.SelectorFromSet(map[string]string{"app": z.GetName()})}, 209 | } 210 | podList := corev1.PodList{} 211 | err := k8client.List(goctx.TODO(), &podList, listOptions...) 212 | if err != nil { 213 | return err 214 | } 215 | pod := &corev1.Pod{} 216 | 217 | for i := 0; i < size; i++ { 218 | pod = &podList.Items[i] 219 | logger.Info(fmt.Sprintf("podname: %v", pod.Name)) 220 | err = k8client.Delete(goctx.TODO(), pod) 221 | if err != nil { 222 | return fmt.Errorf("failed to delete pod: %v", err) 223 | } 224 | 225 | logger.Info(fmt.Sprintf("deleted zookeeper pod: %s", pod.Name)) 226 | 227 | } 228 | return nil 229 | } 230 | func GetPods(k8client client.Client, z *api.ZookeeperCluster) (*corev1.PodList, error) { 231 | listOptions := []client.ListOption{ 232 | client.InNamespace(z.GetNamespace()), 233 | client.MatchingLabels(map[string]string{"app": z.GetName()}), 234 | } 235 | podList := corev1.PodList{} 236 | err := k8client.List(goctx.TODO(), &podList, listOptions...) 237 | return &podList, err 238 | } 239 | func CheckAdminService(logger logr.Logger, k8client client.Client, z *api.ZookeeperCluster) error { 240 | serviceList := corev1.ServiceList{} 241 | listOptions := []client.ListOption{client.InNamespace(z.GetNamespace()), 242 | client.MatchingLabelsSelector{Selector: labels.SelectorFromSet(map[string]string{"app": z.GetName()})}} 243 | err := k8client.List(goctx.TODO(), &serviceList, listOptions...) 244 | if err != nil { 245 | return err 246 | } 247 | 248 | for _, sn := range serviceList.Items { 249 | if sn.Name == "zookeeper-admin-server" { 250 | logger.Info(fmt.Sprintf("Admin service is enabled servicenameis %v", sn.Name)) 251 | return nil 252 | } 253 | } 254 | return fmt.Errorf("Admin Service is not enabled") 255 | } 256 | -------------------------------------------------------------------------------- /pkg/utils/finalizer_utils.go: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | */ 10 | 11 | package utils 12 | 13 | import ( 14 | "strconv" 15 | "strings" 16 | ) 17 | 18 | const ( 19 | ZkFinalizer = "cleanUpZookeeperPVC" 20 | ) 21 | 22 | func ContainsString(slice []string, str string) bool { 23 | for _, item := range slice { 24 | if item == str { 25 | return true 26 | } 27 | } 28 | return false 29 | } 30 | 31 | func RemoveString(slice []string, str string) (result []string) { 32 | for _, item := range slice { 33 | if item == str { 34 | continue 35 | } 36 | result = append(result, item) 37 | } 38 | return result 39 | } 40 | 41 | func IsPVCOrphan(zkPvcName string, replicas int32) bool { 42 | index := strings.LastIndexAny(zkPvcName, "-") 43 | if index == -1 { 44 | return false 45 | } 46 | 47 | ordinal, err := strconv.Atoi(zkPvcName[index+1:]) 48 | if err != nil { 49 | return false 50 | } 51 | 52 | return int32(ordinal) >= replicas 53 | } 54 | -------------------------------------------------------------------------------- /pkg/utils/finalizer_utils_test.go: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | */ 10 | package utils 11 | 12 | import ( 13 | . "github.com/onsi/ginkgo" 14 | . "github.com/onsi/gomega" 15 | ) 16 | 17 | var _ = Describe("Zookeeper Finalizers", func() { 18 | 19 | Context("creating strSlice", func() { 20 | var val1, val2 bool 21 | BeforeEach(func() { 22 | var strSlice = []string{"10", "20"} 23 | val1 = ContainsString(strSlice, "10") 24 | val2 = ContainsString(strSlice, "30") 25 | }) 26 | It("should return true for value 10", func() { 27 | Ω(val1).To(Equal(true)) 28 | }) 29 | It("should return false for value 30", func() { 30 | Ω(val2).To(Equal(false)) 31 | }) 32 | }) 33 | 34 | Context("creating strSlice", func() { 35 | var result []string 36 | BeforeEach(func() { 37 | var strSlice = []string{"10", "20"} 38 | result = RemoveString(strSlice, "10") 39 | }) 40 | It("should return false for value 10", func() { 41 | Ω(ContainsString(result, "10")).To(Equal(false)) 42 | }) 43 | }) 44 | Context("IsPvCOrphan", func() { 45 | var result1, result2, result3, result4 bool 46 | BeforeEach(func() { 47 | var zkPvcName string = "zk" 48 | result1 = IsPVCOrphan(zkPvcName, 3) 49 | zkPvcName = "zk-2" 50 | result2 = IsPVCOrphan(zkPvcName, 3) 51 | zkPvcName = "zk-5" 52 | result3 = IsPVCOrphan(zkPvcName, 3) 53 | zkPvcName = "zk-" 54 | result4 = IsPVCOrphan(zkPvcName, 3) 55 | 56 | }) 57 | It("should return false for result1", func() { 58 | Ω(result1).To(Equal(false)) 59 | }) 60 | It("should return false for result2", func() { 61 | Ω(result2).To(Equal(false)) 62 | }) 63 | It("should return true for result3", func() { 64 | Ω(result3).To(Equal(true)) 65 | }) 66 | It("should return false for result4", func() { 67 | Ω(result4).To(Equal(false)) 68 | }) 69 | }) 70 | }) 71 | -------------------------------------------------------------------------------- /pkg/utils/leader.go: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | */ 10 | 11 | package utils 12 | 13 | import ( 14 | "context" 15 | "fmt" 16 | "os" 17 | 18 | "github.com/operator-framework/operator-lib/leader" 19 | log "github.com/sirupsen/logrus" 20 | corev1 "k8s.io/api/core/v1" 21 | apierrors "k8s.io/apimachinery/pkg/api/errors" 22 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 23 | "k8s.io/client-go/rest" 24 | k8sClient "sigs.k8s.io/controller-runtime/pkg/client" 25 | ) 26 | 27 | // BecomeLeader with pre-check cluster status - is there a previous pod in bad state? 28 | func BecomeLeader(ctx context.Context, cfg *rest.Config, lockName, namespace string) error { 29 | client, _ := k8sClient.New(cfg, k8sClient.Options{}) 30 | 31 | err := precheckLeaderLock(ctx, client, lockName, namespace) 32 | if err != nil { 33 | log.Printf("Error while pre-checking leader lock: %v", err) 34 | } 35 | 36 | // pre-checks done, proceed with SDK-provided election procedure 37 | err = leader.Become(ctx, lockName) 38 | return err 39 | } 40 | 41 | func precheckLeaderLock(ctx context.Context, client k8sClient.Client, lockName, ns string) error { 42 | existingConfigMap, e := getConfigMapWithLock(ctx, client, lockName, ns) 43 | if existingConfigMap == nil || e != nil { 44 | return e 45 | } 46 | 47 | currentPod := os.Getenv("POD_NAME") 48 | if currentPod == "" { 49 | return fmt.Errorf("required env POD_NAME not set") 50 | } 51 | 52 | log.Printf("Current pod name: %s", currentPod) 53 | 54 | for _, lockOwner := range existingConfigMap.GetOwnerReferences() { 55 | if lockOwner.Name == currentPod { 56 | log.Printf("Leader lock is owned by current pod - am I restarted?") 57 | return nil 58 | } 59 | log.Printf("Leader lock owner is %s %s", lockOwner.Kind, lockOwner.Name) 60 | e := checkupLeaderPodStatus(ctx, client, lockOwner, existingConfigMap, ns) 61 | if e != nil { 62 | return e 63 | } 64 | } 65 | 66 | return nil 67 | } 68 | 69 | // checkupLeaderPodStatus checks if leader pod status is marked with VMware-specific reason 'ProviderFailed' 70 | // then deletes lock and pod 71 | func checkupLeaderPodStatus(ctx context.Context, client k8sClient.Client, leaderRef metav1.OwnerReference, existingLock *corev1.ConfigMap, ns string) error { 72 | if leaderRef.Kind != "Pod" { 73 | log.Printf("Existing lock references non-pod object! Kind: %s", leaderRef.Kind) 74 | return nil 75 | } 76 | 77 | leaderPod := &corev1.Pod{} 78 | err := client.Get(ctx, k8sClient.ObjectKey{Namespace: ns, Name: leaderRef.Name}, leaderPod) 79 | if err != nil { 80 | if apierrors.IsNotFound(err) { 81 | log.Printf("Leader pod %s not found in namespace %s", leaderRef.Name, ns) 82 | return nil 83 | } 84 | log.Printf("Error while reading leader pod: %v", err) 85 | return err 86 | } 87 | 88 | log.Printf("Leader pod is in %s:%s status", leaderPod.Status.Phase, leaderPod.Status.Reason) 89 | 90 | if leaderPod.Status.Reason == "ProviderFailed" { 91 | log.Printf("Leader pod status reason is '%s' - deleting pod and lock config map to unblock leader election", leaderPod.Status.Reason) 92 | if err := deleteLeader(ctx, client, leaderPod, existingLock); err != nil { 93 | return err 94 | } 95 | } 96 | 97 | return nil 98 | } 99 | 100 | func getConfigMapWithLock(ctx context.Context, client k8sClient.Client, lockName, ns string) (*corev1.ConfigMap, error) { 101 | existingConfigMap := &corev1.ConfigMap{} 102 | e := client.Get(ctx, k8sClient.ObjectKey{Namespace: ns, Name: lockName}, existingConfigMap) 103 | if e != nil { 104 | if apierrors.IsNotFound(e) { 105 | log.Printf("Leader lock %s not found in namespace %s", lockName, ns) 106 | return nil, nil 107 | } 108 | log.Printf("Unknown error trying to get lock config map: %v", e) 109 | return nil, e 110 | } 111 | return existingConfigMap, nil 112 | } 113 | 114 | // deleteLeader tries to delete pod and config map 115 | func deleteLeader(ctx context.Context, client k8sClient.Client, leaderPod *corev1.Pod, configMapWithLock *corev1.ConfigMap) error { 116 | err := client.Delete(ctx, leaderPod) 117 | if err != nil { 118 | log.Printf("Error deleting leader pod %s: %v", leaderPod.Name, err) 119 | return err 120 | } 121 | 122 | err = client.Delete(ctx, configMapWithLock) 123 | switch { 124 | case apierrors.IsNotFound(err): 125 | log.Printf("Config map has already been deleted") 126 | return nil 127 | case err != nil: 128 | return err 129 | } 130 | 131 | return nil 132 | } 133 | -------------------------------------------------------------------------------- /pkg/utils/leader_test.go: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | */ 10 | 11 | package utils 12 | 13 | import ( 14 | "context" 15 | "os" 16 | 17 | . "github.com/onsi/ginkgo" 18 | . "github.com/onsi/gomega" 19 | corev1 "k8s.io/api/core/v1" 20 | apierrors "k8s.io/apimachinery/pkg/api/errors" 21 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 22 | "k8s.io/apimachinery/pkg/runtime" 23 | clientscheme "k8s.io/client-go/kubernetes/scheme" 24 | k8sClient "sigs.k8s.io/controller-runtime/pkg/client" 25 | "sigs.k8s.io/controller-runtime/pkg/client/fake" 26 | ) 27 | 28 | const ( 29 | configmapName = "test-map" 30 | namespace = "ns-1" 31 | currentPodName = "current-pod" 32 | otherPodName = "some-other-pod" 33 | ) 34 | 35 | var _ = Describe("Leader election utils", func() { 36 | Context("Election prechecks", func() { 37 | var ( 38 | client k8sClient.Client 39 | err error 40 | ctx context.Context 41 | lockConfigMap *corev1.ConfigMap 42 | currentPod *corev1.Pod 43 | otherPod *corev1.Pod 44 | ) 45 | BeforeEach(func() { 46 | currentPod = &corev1.Pod{ 47 | ObjectMeta: metav1.ObjectMeta{ 48 | Name: currentPodName, 49 | UID: "Uid-" + currentPodName, 50 | Namespace: namespace, 51 | }, 52 | } 53 | otherPod = &corev1.Pod{ 54 | ObjectMeta: metav1.ObjectMeta{ 55 | Name: otherPodName, 56 | UID: "Uid-" + otherPodName, 57 | Namespace: namespace, 58 | }, 59 | } 60 | _ = os.Setenv("POD_NAME", currentPodName) 61 | ctx = context.TODO() 62 | }) 63 | 64 | When("leader lock owned by current pod", func() { 65 | BeforeEach(func() { 66 | lockConfigMap = &corev1.ConfigMap{ 67 | TypeMeta: metav1.TypeMeta{ 68 | Kind: "ConfigMap", 69 | APIVersion: "v1", 70 | }, 71 | ObjectMeta: metav1.ObjectMeta{ 72 | Name: configmapName, 73 | Namespace: namespace, 74 | OwnerReferences: []metav1.OwnerReference{ 75 | {Name: currentPodName, Kind: "Pod"}, 76 | }, 77 | }, 78 | } 79 | 80 | client = fake.NewClientBuilder().WithScheme(clientscheme.Scheme).WithRuntimeObjects( 81 | []runtime.Object{currentPod, otherPod, lockConfigMap}...).Build() 82 | 83 | err = precheckLeaderLock(ctx, client, configmapName, namespace) 84 | }) 85 | It(" must do nothing", func() { 86 | Expect(err).ShouldNot(HaveOccurred()) 87 | }) 88 | }) 89 | 90 | When("leader lock owned by other pod", func() { 91 | BeforeEach(func() { 92 | lockConfigMap = &corev1.ConfigMap{ 93 | TypeMeta: metav1.TypeMeta{ 94 | Kind: "ConfigMap", 95 | APIVersion: "v1", 96 | }, 97 | ObjectMeta: metav1.ObjectMeta{ 98 | Name: configmapName, 99 | Namespace: namespace, 100 | OwnerReferences: []metav1.OwnerReference{ 101 | {Name: otherPodName, Kind: "Pod"}, 102 | }, 103 | }, 104 | } 105 | client = fake.NewClientBuilder().WithScheme(clientscheme.Scheme).WithRuntimeObjects([]runtime.Object{currentPod, otherPod, lockConfigMap}...).Build() 106 | err = precheckLeaderLock(ctx, client, configmapName, namespace) 107 | }) 108 | 109 | Context("when that node is Ready", func() { 110 | It(" must do nothing", func() { 111 | Expect(err).ShouldNot(HaveOccurred()) 112 | 113 | pod := &corev1.Pod{} 114 | err = client.Get(ctx, k8sClient.ObjectKey{Namespace: namespace, Name: otherPodName}, pod) 115 | Expect(err).Should(BeNil()) 116 | 117 | cm := &corev1.ConfigMap{} 118 | err = client.Get(ctx, k8sClient.ObjectKey{Namespace: namespace, Name: configmapName}, cm) 119 | Expect(err).Should(BeNil()) 120 | }) 121 | }) 122 | 123 | Context("when that node is in ProviderFailed state", func() { 124 | BeforeEach(func() { 125 | otherPod.Status.Reason = "ProviderFailed" 126 | _ = client.Update(ctx, otherPod) 127 | 128 | err = precheckLeaderLock(ctx, client, configmapName, namespace) 129 | }) 130 | It(" must delete otherPod and config map", func() { 131 | Expect(err).ShouldNot(HaveOccurred()) 132 | 133 | pod := &corev1.Pod{} 134 | err = client.Get(ctx, k8sClient.ObjectKey{Namespace: namespace, Name: otherPodName}, pod) 135 | Expect(err).ShouldNot(BeNil()) 136 | Expect(apierrors.IsNotFound(err)).To(BeTrue()) 137 | 138 | cm := &corev1.ConfigMap{} 139 | err = client.Get(ctx, k8sClient.ObjectKey{Namespace: namespace, Name: configmapName}, cm) 140 | Expect(err).ShouldNot(BeNil()) 141 | Expect(apierrors.IsNotFound(err)).To(BeTrue()) 142 | }) 143 | }) 144 | }) 145 | }) 146 | }) 147 | -------------------------------------------------------------------------------- /pkg/utils/test_utils.go: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | */ 10 | 11 | package utils 12 | 13 | import ( 14 | "fmt" 15 | 16 | corev1 "k8s.io/api/core/v1" 17 | ) 18 | 19 | // ServicePortByName returns a container port of name provided 20 | func ServicePortByName(ports []corev1.ServicePort, name string) (port corev1.ServicePort, err error) { 21 | for _, port := range ports { 22 | if port.Name == name { 23 | return port, nil 24 | } 25 | } 26 | return port, fmt.Errorf("port not found") 27 | } 28 | -------------------------------------------------------------------------------- /pkg/utils/test_utils_test.go: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | */ 10 | package utils 11 | 12 | import ( 13 | "fmt" 14 | 15 | . "github.com/onsi/ginkgo" 16 | . "github.com/onsi/gomega" 17 | "github.com/pravega/zookeeper-operator/api/v1beta1" 18 | "github.com/pravega/zookeeper-operator/pkg/zk" 19 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 20 | ) 21 | 22 | var _ = Describe("Zookeeper Test_Utils", func() { 23 | Context("with defaults", func() { 24 | var port string 25 | var err string 26 | BeforeEach(func() { 27 | z := &v1beta1.ZookeeperCluster{ 28 | ObjectMeta: metav1.ObjectMeta{ 29 | Name: "example", 30 | Namespace: "default", 31 | }, 32 | } 33 | z.WithDefaults() 34 | s := zk.MakeClientService(z) 35 | p, e := ServicePortByName(s.Spec.Ports, "temp") 36 | err = e.Error() 37 | p, e = ServicePortByName(s.Spec.Ports, "tcp-client") 38 | port = fmt.Sprintf("%v", p.Port) 39 | }) 40 | It("should return error port not found for temp", func() { 41 | Ω(err).To(Equal("port not found")) 42 | }) 43 | It("should set the serviceportbyname to 2181 for tcp-client", func() { 44 | Ω(port).To(Equal("2181")) 45 | }) 46 | }) 47 | }) 48 | -------------------------------------------------------------------------------- /pkg/utils/utils_suite_test.go: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2021 Dell Inc., or its subsidiaries. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | */ 10 | 11 | package utils 12 | 13 | import ( 14 | "testing" 15 | 16 | . "github.com/onsi/ginkgo" 17 | . "github.com/onsi/gomega" 18 | ) 19 | 20 | func TestUtils(t *testing.T) { 21 | RegisterFailHandler(Fail) 22 | RunSpecs(t, "ZookeeperCluster Utils Tests") 23 | } 24 | -------------------------------------------------------------------------------- /pkg/utils/zookeeper_util.go: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | */ 10 | 11 | package utils 12 | 13 | import ( 14 | "fmt" 15 | "strconv" 16 | 17 | v1beta1 "github.com/pravega/zookeeper-operator/api/v1beta1" 18 | corev1 "k8s.io/api/core/v1" 19 | ) 20 | 21 | const ( 22 | // Root ZNode for storing all zookeeper-operator related metadata. 23 | ZKMetaRoot = "/zookeeper-operator" 24 | ) 25 | 26 | func GetZkServiceUri(zoo *v1beta1.ZookeeperCluster) (zkUri string) { 27 | zkClientPort, _ := ContainerPortByName(zoo.Spec.Ports, "client") 28 | zkUri = zoo.GetClientServiceName() + "." + zoo.GetNamespace() + ".svc." + zoo.GetKubernetesClusterDomain() + ":" + strconv.Itoa(int(zkClientPort)) 29 | return zkUri 30 | } 31 | 32 | func GetMetaPath(zoo *v1beta1.ZookeeperCluster) (path string) { 33 | return fmt.Sprintf("%s/%s", ZKMetaRoot, zoo.Name) 34 | } 35 | 36 | // ContainerPortByName returns a container port of name provided 37 | func ContainerPortByName(ports []corev1.ContainerPort, name string) (cPort int32, err error) { 38 | for _, port := range ports { 39 | if port.Name == name { 40 | return port.ContainerPort, nil 41 | } 42 | } 43 | return cPort, fmt.Errorf("port not found") 44 | } 45 | -------------------------------------------------------------------------------- /pkg/utils/zookeeper_util_test.go: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | */ 10 | package utils 11 | 12 | import ( 13 | "github.com/pravega/zookeeper-operator/api/v1beta1" 14 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 15 | 16 | . "github.com/onsi/ginkgo" 17 | . "github.com/onsi/gomega" 18 | ) 19 | 20 | var _ = Describe("Zookeeper Utils", func() { 21 | 22 | Context("with defaults", func() { 23 | var zkuri, path, containerport string 24 | BeforeEach(func() { 25 | z := &v1beta1.ZookeeperCluster{ 26 | ObjectMeta: metav1.ObjectMeta{ 27 | Name: "example", 28 | Namespace: "default", 29 | }, 30 | } 31 | z.WithDefaults() 32 | zkuri = GetZkServiceUri(z) 33 | path = GetMetaPath(z) 34 | _, err := ContainerPortByName(z.Spec.Ports, "cl") 35 | if err != nil { 36 | containerport = err.Error() 37 | } 38 | }) 39 | It("should set the zkuri", func() { 40 | Ω(zkuri).To(Equal("example-client.default.svc.cluster.local:2181")) 41 | }) 42 | It("should set the path", func() { 43 | Ω(path).To(Equal("/zookeeper-operator/example")) 44 | }) 45 | It("should give error message", func() { 46 | Ω(containerport).To(Equal("port not found")) 47 | }) 48 | }) 49 | }) 50 | -------------------------------------------------------------------------------- /pkg/version/version.go: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | */ 10 | 11 | package version 12 | 13 | // Version represents the software version of the Zookeeper Operator 14 | var Version = "0.2.1" 15 | 16 | // GitSHA represents the Git commit hash in short format 17 | var GitSHA string 18 | -------------------------------------------------------------------------------- /pkg/yamlexporter/exporterutil_test.go: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | */ 10 | 11 | package yamlexporter 12 | 13 | import ( 14 | "os" 15 | "testing" 16 | 17 | "github.com/pravega/zookeeper-operator/api/v1beta1" 18 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 19 | 20 | . "github.com/onsi/ginkgo" 21 | . "github.com/onsi/gomega" 22 | ) 23 | 24 | func TestExporter(t *testing.T) { 25 | RegisterFailHandler(Fail) 26 | RunSpecs(t, "ZookeeperCluster yamlExporter") 27 | } 28 | 29 | var _ = Describe("ZookeeperCluster yamlExporter", func() { 30 | Context("with defaults", func() { 31 | var err, err2, err3, err4 error 32 | BeforeEach(func() { 33 | z1 := &v1beta1.ZookeeperCluster{ 34 | ObjectMeta: metav1.ObjectMeta{ 35 | Name: "example", 36 | Namespace: "default", 37 | }, 38 | } 39 | z1.WithDefaults() 40 | err = CreateYAMLOutputDir("test") 41 | _, err2 = ReadInputClusterYAMLFile("test") 42 | err3 = GenerateOutputYAMLFile("test", "test", z1.GetObjectMeta()) 43 | _, err4 = CreateOutputSubDir(z1.GetName(), "test") 44 | _ = os.RemoveAll("test") 45 | _ = os.RemoveAll("example") 46 | }) 47 | It("Err should be nil", func() { 48 | Ω(err).To(BeNil()) 49 | }) 50 | It("Err2 should give test: is a directory", func() { 51 | Ω(err2.Error()).To(Equal("read test: is a directory")) 52 | }) 53 | It("Err3 should be nil", func() { 54 | Ω(err3).To(BeNil()) 55 | }) 56 | It("Err4 should be nil", func() { 57 | Ω(err4).To(BeNil()) 58 | }) 59 | }) 60 | }) 61 | -------------------------------------------------------------------------------- /pkg/yamlexporter/exportutil.go: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2019 Dell Inc., or its subsidiaries. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | */ 10 | 11 | package yamlexporter 12 | 13 | import ( 14 | "bufio" 15 | "os" 16 | "path/filepath" 17 | 18 | "github.com/ghodss/yaml" 19 | "github.com/pkg/errors" 20 | "github.com/pravega/zookeeper-operator/api/v1beta1" 21 | ) 22 | 23 | // YAMLOutputDir where the zookeeper YAML resources will get generated 24 | var YAMLOutputDir string 25 | 26 | // CreateOutputSubDir creates a subdirectories where we want create the YAML file 27 | func CreateOutputSubDir(clusterName string, compName string) (string, error) { 28 | fpath := filepath.Join(clusterName, compName) 29 | return fpath, createDirIfNotExist(fpath) 30 | } 31 | 32 | // GenerateOutputYAMLFile writes YAML output for a resource 33 | func GenerateOutputYAMLFile(subdir string, depType string, data interface{}) error { 34 | filename := filepath.Join(subdir, depType+"."+"yaml") 35 | fileFd, err := os.Create(filename) 36 | if err != nil { 37 | return err 38 | } 39 | defer func() { 40 | _ = fileFd.Close() 41 | }() 42 | yamlWriter := bufio.NewWriter(fileFd) 43 | defer yamlWriter.Flush() 44 | yamlData, err := yaml.Marshal(data) 45 | if err != nil { 46 | return err 47 | } 48 | n, err := yamlWriter.Write(yamlData) 49 | if err != nil { 50 | return errors.Wrapf(err, "write failed total bytes written:%d", n) 51 | } 52 | return nil 53 | } 54 | 55 | func createDirIfNotExist(dir string) error { 56 | if _, err := os.Stat(dir); os.IsNotExist(err) { 57 | err = os.MkdirAll(dir, 0755) 58 | if err != nil { 59 | return err 60 | } 61 | } 62 | return nil 63 | } 64 | 65 | // CreateYAMLOutputDir create output directory for YAML output 66 | func CreateYAMLOutputDir(maindir string) error { 67 | err := createDirIfNotExist(maindir) 68 | if err != nil { 69 | return err 70 | } 71 | return nil 72 | } 73 | 74 | // ReadInputClusterYAMLFile will read input YAML file and returns Go struct for ZookeeperCluster 75 | func ReadInputClusterYAMLFile(inyamlfile string) (*v1beta1.ZookeeperCluster, error) { 76 | if _, err := os.Stat(inyamlfile); os.IsNotExist(err) { 77 | return nil, err 78 | } 79 | var z v1beta1.ZookeeperCluster 80 | source, err := os.ReadFile(inyamlfile) 81 | if err != nil { 82 | return nil, err 83 | } 84 | err = yaml.Unmarshal(source, &z) 85 | if err != nil { 86 | return nil, err 87 | } 88 | return &z, err 89 | } 90 | -------------------------------------------------------------------------------- /pkg/zk/synchronizers.go: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | */ 10 | 11 | package zk 12 | 13 | import ( 14 | appsv1 "k8s.io/api/apps/v1" 15 | v1 "k8s.io/api/core/v1" 16 | ) 17 | 18 | // SyncStatefulSet synchronizes any updates to the stateful-set 19 | func SyncStatefulSet(curr *appsv1.StatefulSet, next *appsv1.StatefulSet) { 20 | curr.Spec.Replicas = next.Spec.Replicas 21 | curr.Spec.Template = next.Spec.Template 22 | curr.Spec.UpdateStrategy = next.Spec.UpdateStrategy 23 | } 24 | 25 | // SyncService synchronizes a service with an updated spec and validates it 26 | func SyncService(curr *v1.Service, next *v1.Service) { 27 | curr.Spec.Ports = next.Spec.Ports 28 | curr.Spec.Type = next.Spec.Type 29 | curr.SetAnnotations(next.GetAnnotations()) 30 | } 31 | 32 | // SyncConfigMap synchronizes a configmap with an updated spec and validates it 33 | func SyncConfigMap(curr *v1.ConfigMap, next *v1.ConfigMap) { 34 | curr.Data = next.Data 35 | curr.BinaryData = next.BinaryData 36 | } 37 | -------------------------------------------------------------------------------- /pkg/zk/synchronizers_test.go: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | */ 10 | 11 | package zk_test 12 | 13 | import ( 14 | "github.com/pravega/zookeeper-operator/api/v1beta1" 15 | "github.com/pravega/zookeeper-operator/pkg/zk" 16 | appsv1 "k8s.io/api/apps/v1" 17 | v1 "k8s.io/api/core/v1" 18 | "k8s.io/apimachinery/pkg/api/resource" 19 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 20 | 21 | . "github.com/onsi/ginkgo" 22 | . "github.com/onsi/gomega" 23 | ) 24 | 25 | var _ = Describe("Synchronizers", func() { 26 | 27 | Context("with a valid update specs", func() { 28 | var sts1 *appsv1.StatefulSet 29 | 30 | BeforeEach(func() { 31 | z := &v1beta1.ZookeeperCluster{ 32 | ObjectMeta: metav1.ObjectMeta{ 33 | Name: "example", 34 | Namespace: "default", 35 | }, 36 | } 37 | z.WithDefaults() 38 | z.Spec.Pod.Resources.Limits = v1.ResourceList{ 39 | v1.ResourceStorage: resource.MustParse("20Gi"), 40 | } 41 | sts1 = zk.MakeStatefulSet(z) 42 | sts2 := zk.MakeStatefulSet(z) 43 | reps := int32(4) 44 | sts2.Spec.Replicas = &reps 45 | sts2.Spec.Template.Spec.Containers[0].Image = "repo/newimage:latest" 46 | zk.SyncStatefulSet(sts1, sts2) 47 | }) 48 | 49 | It("should have the updated fields", func() { 50 | Ω(*sts1.Spec.Replicas).To(BeEquivalentTo(4)) 51 | Ω(sts1.Spec.Template.Spec.Containers[0].Image). 52 | To(Equal("repo/newimage:latest")) 53 | }) 54 | }) 55 | 56 | Context("with a valid update of Service port", func() { 57 | var port int32 58 | var value string 59 | 60 | BeforeEach(func() { 61 | z := &v1beta1.ZookeeperCluster{ 62 | ObjectMeta: metav1.ObjectMeta{ 63 | Name: "example", 64 | Namespace: "default", 65 | }, 66 | } 67 | z.WithDefaults() 68 | svc1 := zk.MakeClientService(z) 69 | svc2 := svc1.DeepCopy() 70 | svc2.Spec.Ports[0].Port = int32(4000) 71 | svc2.Spec.Type = "temp" 72 | zk.SyncService(svc1, svc2) 73 | port = svc1.Spec.Ports[0].Port 74 | value = string(svc1.Spec.Type) 75 | }) 76 | 77 | It("should have the updated fields for service", func() { 78 | Ω(port).To(BeEquivalentTo(4000)) 79 | Ω(value).To(BeEquivalentTo("temp")) 80 | }) 81 | }) 82 | 83 | Context("with a valid update of config map", func() { 84 | var value string 85 | BeforeEach(func() { 86 | z := &v1beta1.ZookeeperCluster{ 87 | ObjectMeta: metav1.ObjectMeta{ 88 | Name: "example", 89 | Namespace: "default", 90 | }, 91 | } 92 | z.WithDefaults() 93 | cm1 := zk.MakeConfigMap(z) 94 | cm2 := cm1.DeepCopy() 95 | cm2.Data["k1"] = "v1" 96 | zk.SyncConfigMap(cm1, cm2) 97 | value = cm1.Data["k1"] 98 | }) 99 | It("should have value as v1 for cm1.Data's key k1", func() { 100 | Ω(value).To(BeEquivalentTo("v1")) 101 | }) 102 | }) 103 | }) 104 | -------------------------------------------------------------------------------- /pkg/zk/zk_suite_test.go: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2021 Dell Inc., or its subsidiaries. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | */ 10 | 11 | package zk 12 | 13 | import ( 14 | "testing" 15 | 16 | . "github.com/onsi/ginkgo" 17 | . "github.com/onsi/gomega" 18 | ) 19 | 20 | func TestZookeeper(t *testing.T) { 21 | RegisterFailHandler(Fail) 22 | RunSpecs(t, "ZookeeperCluster Tests") 23 | } 24 | -------------------------------------------------------------------------------- /pkg/zk/zookeeper_client.go: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2020 Dell Inc., or its subsidiaries. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | */ 10 | package zk 11 | 12 | import ( 13 | "fmt" 14 | "strconv" 15 | "strings" 16 | "time" 17 | 18 | "github.com/pravega/zookeeper-operator/api/v1beta1" 19 | "github.com/samuel/go-zookeeper/zk" 20 | ) 21 | 22 | type ZookeeperClient interface { 23 | Connect(string) error 24 | CreateNode(*v1beta1.ZookeeperCluster, string) error 25 | NodeExists(string) (int32, error) 26 | UpdateNode(string, string, int32) error 27 | Close() 28 | } 29 | 30 | type DefaultZookeeperClient struct { 31 | conn *zk.Conn 32 | } 33 | 34 | func (client *DefaultZookeeperClient) Connect(zkUri string) (err error) { 35 | host := []string{zkUri} 36 | conn, _, err := zk.Connect(host, time.Second*5) 37 | if err != nil { 38 | return fmt.Errorf("Failed to connect to zookeeper: %s, Reason: %v", zkUri, err) 39 | } 40 | client.conn = conn 41 | return nil 42 | } 43 | 44 | func (client *DefaultZookeeperClient) CreateNode(zoo *v1beta1.ZookeeperCluster, zNodePath string) (err error) { 45 | paths := strings.Split(zNodePath, "/") 46 | pathLength := len(paths) 47 | var parentPath string 48 | for i := 1; i < pathLength-1; i++ { 49 | parentPath += "/" + paths[i] 50 | if _, err := client.conn.Create(parentPath, nil, 0, zk.WorldACL(zk.PermAll)); err != nil { 51 | return fmt.Errorf("Error creating parent zkNode: %s: %v", parentPath, err) 52 | } 53 | } 54 | data := "CLUSTER_SIZE=" + strconv.Itoa(int(zoo.Spec.Replicas)) 55 | childNode := parentPath + "/" + paths[pathLength-1] 56 | if _, err := client.conn.Create(childNode, []byte(data), 0, zk.WorldACL(zk.PermAll)); err != nil { 57 | return fmt.Errorf("Error creating sub zkNode: %s: %v", childNode, err) 58 | } 59 | return nil 60 | } 61 | 62 | func (client *DefaultZookeeperClient) UpdateNode(path string, data string, version int32) (err error) { 63 | if _, err := client.conn.Set(path, []byte(data), version); err != nil { 64 | return fmt.Errorf("Error updating zkNode: %v", err) 65 | } 66 | return nil 67 | } 68 | 69 | func (client *DefaultZookeeperClient) NodeExists(zNodePath string) (version int32, err error) { 70 | exists, zNodeStat, err := client.conn.Exists(zNodePath) 71 | if err != nil || !exists { 72 | return -1, fmt.Errorf("Znode exists check failed for path %s: %v", zNodePath, err) 73 | } 74 | return zNodeStat.Version, err 75 | } 76 | 77 | func (client *DefaultZookeeperClient) Close() { 78 | client.conn.Close() 79 | } 80 | -------------------------------------------------------------------------------- /pkg/zk/zookeeper_client_test.go: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | */ 10 | 11 | package zk_test 12 | 13 | import ( 14 | . "github.com/onsi/ginkgo" 15 | . "github.com/onsi/gomega" 16 | "github.com/pravega/zookeeper-operator/api/v1beta1" 17 | "github.com/pravega/zookeeper-operator/pkg/zk" 18 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 19 | ) 20 | 21 | var _ = Describe("Zookeeper Client", func() { 22 | 23 | Context("with a valid update of Service port", func() { 24 | var err1, err2, err3, err4, err5 error 25 | BeforeEach(func() { 26 | z := &v1beta1.ZookeeperCluster{ 27 | ObjectMeta: metav1.ObjectMeta{ 28 | Name: "example", 29 | Namespace: "default", 30 | }, 31 | } 32 | zkclient := new(zk.DefaultZookeeperClient) 33 | z.WithDefaults() 34 | err1 = zkclient.Connect("127.0.0.0:2181") 35 | err2 = zkclient.CreateNode(z, "temp/tmp/tmp") 36 | err5 = zkclient.CreateNode(z, "temp/tmp") 37 | err3 = zkclient.UpdateNode("temp/tem/temp", "dasd", 2) 38 | _, err4 = zkclient.NodeExists("temp") 39 | zkclient.Close() 40 | }) 41 | It("err1 should be nil", func() { 42 | Ω(err1).Should(BeNil()) 43 | }) 44 | It("err2 should be not nil", func() { 45 | Ω(err2).ShouldNot(BeNil()) 46 | }) 47 | It("err3 should be not nil", func() { 48 | Ω(err3).ShouldNot(BeNil()) 49 | }) 50 | It("err4 should be not nil", func() { 51 | Ω(err4).ShouldNot(BeNil()) 52 | }) 53 | It("err5 should be not nil", func() { 54 | Ω(err5).ShouldNot(BeNil()) 55 | }) 56 | }) 57 | }) 58 | -------------------------------------------------------------------------------- /scripts/check_format.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # exit immediately when a command fails 3 | set -e 4 | # only exit with zero if all commands of the pipeline exit successfully 5 | set -o pipefail 6 | # error on unset variables 7 | set -u 8 | 9 | goFiles=$(find . -name \*.go -not -path "./vendor/*" -print) 10 | invalidFiles=$(gofmt -l $goFiles) 11 | 12 | if [ "$invalidFiles" ]; then 13 | echo -e "These files did not pass the 'go fmt' check, please run 'go fmt' on them:" 14 | echo -e $invalidFiles 15 | exit 1 16 | fi 17 | -------------------------------------------------------------------------------- /scripts/check_license.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # exit immediately when a command fails 3 | set -e 4 | # only exit with zero if all commands of the pipeline exit successfully 5 | set -o pipefail 6 | # error on unset variables 7 | set -u 8 | 9 | licRes=$( 10 | find . -type f -iname '*.go' ! -path '*/vendor/*' -exec \ 11 | sh -c 'head -n3 $1 | grep -Eq "(Copyright|generated|GENERATED)" || echo -e $1' {} {} \; 12 | ) 13 | 14 | if [ -n "${licRes}" ]; then 15 | echo -e "license header checking failed:\\n${licRes}" 16 | exit 255 17 | fi 18 | -------------------------------------------------------------------------------- /test/e2e/basic_test.go: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | */ 10 | 11 | package e2e 12 | 13 | import ( 14 | . "github.com/onsi/ginkgo" 15 | . "github.com/onsi/gomega" 16 | zk_e2eutil "github.com/pravega/zookeeper-operator/pkg/test/e2e/e2eutil" 17 | ) 18 | 19 | // Test create and recreate a Zookeeper cluster with the same name 20 | var _ = Describe("Basic test controller", func() { 21 | Context("Check create/delete operations", func() { 22 | It("should create and recreate a Zookeeper cluster with the same name", func() { 23 | By("create Zookeeper cluster") 24 | defaultCluster := zk_e2eutil.NewDefaultCluster(testNamespace) 25 | defaultCluster.WithDefaults() 26 | defaultCluster.Status.Init() 27 | defaultCluster.Spec.Persistence.VolumeReclaimPolicy = "Delete" 28 | 29 | zk, err := zk_e2eutil.CreateCluster(logger, k8sClient, defaultCluster) 30 | Expect(err).NotTo(HaveOccurred()) 31 | 32 | podSize := 3 33 | Expect(zk_e2eutil.WaitForClusterToBecomeReady(logger, k8sClient, defaultCluster, podSize)).NotTo(HaveOccurred()) 34 | Expect(zk_e2eutil.CheckAdminService(logger, k8sClient, zk)).NotTo(HaveOccurred()) 35 | 36 | By("delete created Zookeeper cluster") 37 | Expect(k8sClient.Delete(ctx, zk)).Should(Succeed()) 38 | Expect(zk_e2eutil.WaitForClusterToTerminate(logger, k8sClient, zk)).NotTo(HaveOccurred()) 39 | 40 | By("create Zookeeper cluster with the same name") 41 | defaultCluster = zk_e2eutil.NewDefaultCluster(testNamespace) 42 | defaultCluster.WithDefaults() 43 | defaultCluster.Status.Init() 44 | defaultCluster.Spec.Persistence.VolumeReclaimPolicy = "Delete" 45 | 46 | zk, err = zk_e2eutil.CreateCluster(logger, k8sClient, defaultCluster) 47 | Expect(err).NotTo(HaveOccurred()) 48 | Expect(zk_e2eutil.WaitForClusterToBecomeReady(logger, k8sClient, defaultCluster, podSize)).NotTo(HaveOccurred()) 49 | 50 | By("delete created Zookeeper cluster") 51 | Expect(k8sClient.Delete(ctx, zk)).Should(Succeed()) 52 | Expect(zk_e2eutil.WaitForClusterToTerminate(logger, k8sClient, zk)).NotTo(HaveOccurred()) 53 | }) 54 | }) 55 | }) 56 | -------------------------------------------------------------------------------- /test/e2e/ephemeral_test.go: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | */ 10 | 11 | package e2e 12 | 13 | import ( 14 | . "github.com/onsi/ginkgo" 15 | . "github.com/onsi/gomega" 16 | zk_e2eutil "github.com/pravega/zookeeper-operator/pkg/test/e2e/e2eutil" 17 | ) 18 | 19 | var _ = Describe("Ephemeral test controller", func() { 20 | Context("Check create/scale operations", func() { 21 | It("should create and scale up and down a Zookeeper cluster", func() { 22 | defaultCluster := zk_e2eutil.NewClusterWithEmptyDir(testNamespace) 23 | defaultCluster.WithDefaults() 24 | defaultCluster.Status.Init() 25 | By("create zk cluster") 26 | zk, err := zk_e2eutil.CreateCluster(logger, k8sClient, defaultCluster) 27 | Expect(err).NotTo(HaveOccurred()) 28 | // 29 | // A default Zookeeper cluster should have 3 replicas 30 | podSize := 3 31 | Expect(zk_e2eutil.WaitForClusterToBecomeReady(logger, k8sClient, defaultCluster, podSize)).NotTo(HaveOccurred()) 32 | 33 | // This is to get the latest zk cluster object 34 | zk, err = zk_e2eutil.GetCluster(logger, k8sClient, zk) 35 | Expect(err).NotTo(HaveOccurred()) 36 | 37 | // Scale up zk cluster, increase replicas to 5 38 | By("Scale up zk cluster, increase replicas to 5") 39 | zk.Spec.Replicas = 5 40 | podSize = 5 41 | Expect(zk_e2eutil.UpdateCluster(logger, k8sClient, zk)).NotTo(HaveOccurred()) 42 | Expect(zk_e2eutil.WaitForClusterToBecomeReady(logger, k8sClient, zk, podSize)).NotTo(HaveOccurred()) 43 | 44 | // This is to get the latest zk cluster object 45 | zk, err = zk_e2eutil.GetCluster(logger, k8sClient, zk) 46 | Expect(err).NotTo(HaveOccurred()) 47 | 48 | // Scale down zk cluster back to default 49 | By("Scale down zk cluster back to default") 50 | zk.Spec.Replicas = 3 51 | podSize = 3 52 | Expect(zk_e2eutil.UpdateCluster(logger, k8sClient, zk)).NotTo(HaveOccurred()) 53 | Expect(zk_e2eutil.WaitForClusterToBecomeReady(logger, k8sClient, zk, podSize)).NotTo(HaveOccurred()) 54 | 55 | // Delete cluster 56 | By("delete zk cluster") 57 | Expect(zk_e2eutil.DeleteCluster(logger, k8sClient, zk)).NotTo(HaveOccurred()) 58 | Expect(zk_e2eutil.WaitForClusterToTerminate(logger, k8sClient, zk)).NotTo(HaveOccurred()) 59 | }) 60 | }) 61 | }) 62 | -------------------------------------------------------------------------------- /test/e2e/image_pullsecret_test.go: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | */ 10 | 11 | package e2e 12 | 13 | import ( 14 | . "github.com/onsi/ginkgo" 15 | . "github.com/onsi/gomega" 16 | zk_e2eutil "github.com/pravega/zookeeper-operator/pkg/test/e2e/e2eutil" 17 | v1 "k8s.io/api/core/v1" 18 | ) 19 | 20 | // Test create and recreate a Zookeeper cluster with the same name 21 | var _ = Describe("Image pull secret check", func() { 22 | Context("Check create cluster with specified ImagePullSecrets ", func() { 23 | It("should create cluster with specified ImagePullSecrets specs", func() { 24 | 25 | defaultCluster := zk_e2eutil.NewDefaultCluster(testNamespace) 26 | defaultCluster.WithDefaults() 27 | defaultCluster.Status.Init() 28 | defaultCluster.Spec.Persistence.VolumeReclaimPolicy = "Delete" 29 | defaultCluster.Spec.Image.Repository = "testanisha/zookeeper" 30 | defaultCluster.Spec.Image.Tag = "checksecret_1" 31 | defaultCluster.Spec.Pod.ImagePullSecrets = []v1.LocalObjectReference{ 32 | { 33 | Name: "regcred", 34 | }, 35 | } 36 | By("create zk cluster with non-default spec") 37 | zk, err := zk_e2eutil.CreateCluster(logger, k8sClient, defaultCluster) 38 | Expect(err).NotTo(HaveOccurred()) 39 | 40 | // A default Zookeeper cluster should have 3 replicas 41 | podSize := 3 42 | Expect(zk_e2eutil.WaitForClusterToBecomeReady(logger, k8sClient, zk, podSize)).NotTo(HaveOccurred()) 43 | Expect(zk_e2eutil.CheckAdminService(logger, k8sClient, zk)).NotTo(HaveOccurred()) 44 | 45 | By("delete zk cluster") 46 | Expect(zk_e2eutil.DeleteCluster(logger, k8sClient, zk)).NotTo(HaveOccurred()) 47 | Expect(zk_e2eutil.WaitForClusterToTerminate(logger, k8sClient, zk)).NotTo(HaveOccurred()) 48 | }) 49 | }) 50 | }) 51 | -------------------------------------------------------------------------------- /test/e2e/multiple_zk_test.go: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | */ 10 | 11 | package e2e 12 | 13 | import ( 14 | . "github.com/onsi/ginkgo" 15 | . "github.com/onsi/gomega" 16 | api "github.com/pravega/zookeeper-operator/api/v1beta1" 17 | zk_e2eutil "github.com/pravega/zookeeper-operator/pkg/test/e2e/e2eutil" 18 | "time" 19 | ) 20 | 21 | // Test create and recreate a Zookeeper cluster with the same name 22 | var _ = Describe("Operations with multiple cluster", func() { 23 | Context("Perform create, update, delete and recreate operations on 3 clusters", func() { 24 | It("should create cluster several clusters", func() { 25 | 26 | defaultCluster := zk_e2eutil.NewDefaultCluster(testNamespace) 27 | 28 | defaultCluster.WithDefaults() 29 | defaultCluster.Status.Init() 30 | defaultCluster.ObjectMeta.Name = "zk1" 31 | defaultCluster.Spec.Persistence.VolumeReclaimPolicy = "Delete" 32 | 33 | zk1, err := zk_e2eutil.CreateCluster(logger, k8sClient, defaultCluster) 34 | Expect(err).NotTo(HaveOccurred()) 35 | 36 | // A default zookeeper cluster should have 3 replicas 37 | podSize := 3 38 | Expect(zk_e2eutil.WaitForClusterToBecomeReady(logger, k8sClient, zk1, podSize)).NotTo(HaveOccurred()) 39 | 40 | defaultCluster = zk_e2eutil.NewDefaultCluster(testNamespace) 41 | 42 | defaultCluster.WithDefaults() 43 | defaultCluster.Status.Init() 44 | defaultCluster.Spec.Persistence.VolumeReclaimPolicy = "Delete" 45 | defaultCluster.ObjectMeta.Name = "zk2" 46 | initialVersion := "0.2.7" 47 | upgradeVersion := "0.2.9" 48 | defaultCluster.Spec.Image = api.ContainerImage{ 49 | Repository: "pravega/zookeeper", 50 | Tag: initialVersion, 51 | } 52 | zk2, err := zk_e2eutil.CreateCluster(logger, k8sClient, defaultCluster) 53 | Expect(err).NotTo(HaveOccurred()) 54 | 55 | Expect(zk_e2eutil.WaitForClusterToBecomeReady(logger, k8sClient, zk2, podSize)).NotTo(HaveOccurred()) 56 | 57 | // This is to get the latest Zookeeper cluster object 58 | zk2, err = zk_e2eutil.GetCluster(logger, k8sClient, zk2) 59 | Expect(err).NotTo(HaveOccurred()) 60 | Expect(zk2.Status.CurrentVersion).To(Equal(initialVersion)) 61 | 62 | defaultCluster = zk_e2eutil.NewDefaultCluster(testNamespace) 63 | 64 | defaultCluster.WithDefaults() 65 | defaultCluster.Status.Init() 66 | defaultCluster.ObjectMeta.Name = "zk3" 67 | defaultCluster.Spec.Persistence.VolumeReclaimPolicy = "Delete" 68 | 69 | zk3, err := zk_e2eutil.CreateCluster(logger, k8sClient, defaultCluster) 70 | Expect(err).NotTo(HaveOccurred()) 71 | 72 | Expect(zk_e2eutil.WaitForClusterToBecomeReady(logger, k8sClient, zk3, podSize)).NotTo(HaveOccurred()) 73 | 74 | // This is to get the latest zk cluster object 75 | zk1, err = zk_e2eutil.GetCluster(logger, k8sClient, zk1) 76 | 77 | // scale up the replicas in first cluster 78 | zk1.Spec.Replicas = 5 79 | podSize = 5 80 | 81 | Expect(zk_e2eutil.UpdateCluster(logger, k8sClient, zk1)).NotTo(HaveOccurred()) 82 | 83 | Expect(zk_e2eutil.WaitForClusterToBecomeReady(logger, k8sClient, zk1, podSize)).NotTo(HaveOccurred()) 84 | 85 | zk1, err = zk_e2eutil.GetCluster(logger, k8sClient, zk1) 86 | Expect(err).NotTo(HaveOccurred()) 87 | 88 | //scale down the replicas back to 3 89 | zk1.Spec.Replicas = 3 90 | podSize = 3 91 | 92 | Expect(zk_e2eutil.UpdateCluster(logger, k8sClient, zk1)).NotTo(HaveOccurred()) 93 | 94 | Expect(zk_e2eutil.WaitForClusterToBecomeReady(logger, k8sClient, zk1, podSize)).NotTo(HaveOccurred()) 95 | 96 | // This is to get the latest Zookeeper cluster object 97 | zk2, err = zk_e2eutil.GetCluster(logger, k8sClient, zk2) 98 | Expect(err).NotTo(HaveOccurred()) 99 | 100 | //upgrade the image in second Cluster 101 | zk2.Spec.Image.Tag = upgradeVersion 102 | 103 | Expect(zk_e2eutil.UpdateCluster(logger, k8sClient, zk2)).NotTo(HaveOccurred()) 104 | 105 | Expect(zk_e2eutil.WaitForClusterToUpgrade(logger, k8sClient, zk2, upgradeVersion)).NotTo(HaveOccurred()) 106 | 107 | // This is to get the latest Zookeeper cluster object 108 | zk2, err = zk_e2eutil.GetCluster(logger, k8sClient, zk2) 109 | Expect(err).NotTo(HaveOccurred()) 110 | 111 | Expect(zk2.Spec.Image.Tag).To(Equal(upgradeVersion)) 112 | Expect(zk2.Status.CurrentVersion).To(Equal(upgradeVersion)) 113 | Expect(zk2.Status.TargetVersion).To(Equal("")) 114 | 115 | // This is to get the latest zk cluster object 116 | zk3, err = zk_e2eutil.GetCluster(logger, k8sClient, zk3) 117 | 118 | //Delete all pods in the 3rd Cluster 119 | podDeleteCount := 3 120 | Expect(zk_e2eutil.DeletePods(logger, k8sClient, zk3, podDeleteCount)).NotTo(HaveOccurred()) 121 | 122 | time.Sleep(60 * time.Second) 123 | Expect(zk_e2eutil.WaitForClusterToBecomeReady(logger, k8sClient, zk3, podSize)).NotTo(HaveOccurred()) 124 | 125 | Expect(zk_e2eutil.DeleteCluster(logger, k8sClient, zk1)).NotTo(HaveOccurred()) 126 | 127 | Expect(zk_e2eutil.WaitForClusterToTerminate(logger, k8sClient, zk1)).NotTo(HaveOccurred()) 128 | 129 | Expect(zk_e2eutil.DeleteCluster(logger, k8sClient, zk2)).NotTo(HaveOccurred()) 130 | 131 | Expect(zk_e2eutil.WaitForClusterToTerminate(logger, k8sClient, zk2)).NotTo(HaveOccurred()) 132 | 133 | Expect(zk_e2eutil.DeleteCluster(logger, k8sClient, zk3)).NotTo(HaveOccurred()) 134 | 135 | Expect(zk_e2eutil.WaitForClusterToTerminate(logger, k8sClient, zk3)).NotTo(HaveOccurred()) 136 | 137 | //Recreating cluster with same name 138 | defaultCluster = zk_e2eutil.NewDefaultCluster(testNamespace) 139 | defaultCluster.WithDefaults() 140 | defaultCluster.Status.Init() 141 | defaultCluster.ObjectMeta.Name = "zk1" 142 | defaultCluster.Spec.Persistence.VolumeReclaimPolicy = "Delete" 143 | 144 | zk1, err = zk_e2eutil.CreateCluster(logger, k8sClient, defaultCluster) 145 | Expect(err).NotTo(HaveOccurred()) 146 | 147 | Expect(zk_e2eutil.WaitForClusterToBecomeReady(logger, k8sClient, zk1, podSize)).NotTo(HaveOccurred()) 148 | 149 | Expect(zk_e2eutil.DeleteCluster(logger, k8sClient, zk1)).NotTo(HaveOccurred()) 150 | Expect(zk_e2eutil.WaitForClusterToTerminate(logger, k8sClient, zk1)).NotTo(HaveOccurred()) 151 | }) 152 | }) 153 | }) 154 | -------------------------------------------------------------------------------- /test/e2e/pod_deletion_test.go: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | */ 10 | 11 | package e2e 12 | 13 | import ( 14 | "time" 15 | 16 | . "github.com/onsi/ginkgo" 17 | . "github.com/onsi/gomega" 18 | zk_e2eutil "github.com/pravega/zookeeper-operator/pkg/test/e2e/e2eutil" 19 | ) 20 | 21 | // Test create and recreate a Zookeeper cluster with the same name 22 | var _ = Describe("Delete pods in zk clusters", func() { 23 | Context("Delete pods, check that pods get recreated", func() { 24 | It("should keep number of replicas consistent", func() { 25 | defaultCluster := zk_e2eutil.NewDefaultCluster(testNamespace) 26 | 27 | defaultCluster.WithDefaults() 28 | defaultCluster.Status.Init() 29 | defaultCluster.Spec.Persistence.VolumeReclaimPolicy = "Delete" 30 | By("create zk cluster") 31 | zk, err := zk_e2eutil.CreateCluster(logger, k8sClient, defaultCluster) 32 | Expect(err).NotTo(HaveOccurred()) 33 | 34 | // A default zookeeper cluster should have 3 pods 35 | podSize := 3 36 | Expect(zk_e2eutil.WaitForClusterToBecomeReady(logger, k8sClient, zk, podSize)) 37 | 38 | By("Delete one of the pods") 39 | podDeleteCount := 1 40 | Expect(zk_e2eutil.DeletePods(logger, k8sClient, zk, podDeleteCount)) 41 | 42 | time.Sleep(60 * time.Second) 43 | Expect(zk_e2eutil.WaitForClusterToBecomeReady(logger, k8sClient, zk, podSize)) 44 | 45 | By("Delete two of the pods") 46 | podDeleteCount = 2 47 | Expect(zk_e2eutil.DeletePods(logger, k8sClient, zk, podDeleteCount)) 48 | time.Sleep(60 * time.Second) 49 | 50 | Expect(zk_e2eutil.WaitForClusterToBecomeReady(logger, k8sClient, zk, podSize)) 51 | 52 | By("Delete all of the pods") 53 | podDeleteCount = 3 54 | Expect(zk_e2eutil.DeletePods(logger, k8sClient, zk, podDeleteCount)) 55 | time.Sleep(60 * time.Second) 56 | Expect(zk_e2eutil.WaitForClusterToBecomeReady(logger, k8sClient, zk, podSize)) 57 | 58 | Expect(zk_e2eutil.DeleteCluster(logger, k8sClient, zk)).NotTo(HaveOccurred()) 59 | Expect(zk_e2eutil.WaitForClusterToTerminate(logger, k8sClient, zk)).NotTo(HaveOccurred()) 60 | }) 61 | }) 62 | }) 63 | -------------------------------------------------------------------------------- /test/e2e/rolling_restart_test.go: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (&the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | */ 10 | 11 | package e2e 12 | 13 | import ( 14 | . "github.com/onsi/ginkgo" 15 | . "github.com/onsi/gomega" 16 | zk_e2eutil "github.com/pravega/zookeeper-operator/pkg/test/e2e/e2eutil" 17 | "time" 18 | ) 19 | 20 | var _ = Describe("Perform rolling restart on zk cluster", func() { 21 | Context("Check rolling restart operation", func() { 22 | It("should perform rolling restart", func() { 23 | cluster := zk_e2eutil.NewDefaultCluster(testNamespace) 24 | 25 | cluster.WithDefaults() 26 | cluster.Status.Init() 27 | cluster.Spec.Persistence.VolumeReclaimPolicy = "Delete" 28 | 29 | zk, err := zk_e2eutil.CreateCluster(logger, k8sClient, cluster) 30 | Expect(err).NotTo(HaveOccurred()) 31 | 32 | // A default Zookeepercluster should have 3 replicas 33 | podSize := 3 34 | start := time.Now().Minute()*60 + time.Now().Second() 35 | Expect(zk_e2eutil.WaitForClusterToBecomeReady(logger, k8sClient, zk, podSize)).NotTo(HaveOccurred()) 36 | clusterCreateDuration := time.Now().Minute()*60 + time.Now().Second() - start 37 | 38 | // This is to get the latest Zookeeper cluster object 39 | zk, err = zk_e2eutil.GetCluster(logger, k8sClient, zk) 40 | Expect(err).NotTo(HaveOccurred()) 41 | podList, err := zk_e2eutil.GetPods(k8sClient, zk) 42 | Expect(err).NotTo(HaveOccurred()) 43 | for i := 0; i < len(podList.Items); i++ { 44 | Expect(podList.Items[i].Annotations).NotTo(HaveKey("restartTime")) 45 | } 46 | Expect(zk.GetTriggerRollingRestart()).To(Equal(false)) 47 | 48 | // Trigger a rolling restart 49 | zk.Spec.TriggerRollingRestart = true 50 | err = zk_e2eutil.UpdateCluster(logger, k8sClient, zk) 51 | // zk_e2eutil.WaitForClusterToBecomeReady(...) will return as soon as any pod is restarted as the cluster is briefly reported to be healthy even though the restart is not completed. this method is hence called after a sleep to ensure that the restart has completed before asserting the test cases. 52 | time.Sleep(time.Duration(clusterCreateDuration) * 2 * time.Second) 53 | Expect(zk_e2eutil.WaitForClusterToBecomeReady(logger, k8sClient, zk, podSize)).NotTo(HaveOccurred()) 54 | 55 | zk, err = zk_e2eutil.GetCluster(logger, k8sClient, zk) 56 | Expect(err).NotTo(HaveOccurred()) 57 | newPodList, err := zk_e2eutil.GetPods(k8sClient, zk) 58 | Expect(err).NotTo(HaveOccurred()) 59 | var firstRestartTime []string 60 | for i := 0; i < len(newPodList.Items); i++ { 61 | Expect(newPodList.Items[i].Annotations).To(HaveKey("restartTime")) 62 | firstRestartTime = append(firstRestartTime, newPodList.Items[i].Annotations["restartTime"]) 63 | } 64 | Expect(zk.GetTriggerRollingRestart()).To(Equal(false)) 65 | 66 | // Trigger a rolling restart again 67 | zk.Spec.TriggerRollingRestart = true 68 | err = zk_e2eutil.UpdateCluster(logger, k8sClient, zk) 69 | // zk_e2eutil.WaitForClusterToBecomeReady(...) will return as soon as any pod is restarted as the cluster is briefly reported to be healthy even though the complete restart is not completed. this method is hence called after a sleep to ensure that the restart has completed before asserting the test cases. 70 | time.Sleep(time.Duration(clusterCreateDuration) * 2 * time.Second) 71 | Expect(zk_e2eutil.WaitForClusterToBecomeReady(logger, k8sClient, zk, podSize)).NotTo(HaveOccurred()) 72 | 73 | zk, err = zk_e2eutil.GetCluster(logger, k8sClient, zk) 74 | Expect(err).NotTo(HaveOccurred()) 75 | newPodList2, err := zk_e2eutil.GetPods(k8sClient, zk) 76 | Expect(err).NotTo(HaveOccurred()) 77 | for i := 0; i < len(newPodList2.Items); i++ { 78 | Expect(newPodList2.Items[i].Annotations).To(HaveKey("restartTime")) 79 | Expect(newPodList2.Items[i].Annotations["restartTime"]).NotTo(Equal(firstRestartTime[i])) 80 | } 81 | Expect(zk.GetTriggerRollingRestart()).To(Equal(false)) 82 | 83 | // Delete cluster 84 | Expect(zk_e2eutil.DeleteCluster(logger, k8sClient, zk)).NotTo(HaveOccurred()) 85 | 86 | Expect(zk_e2eutil.WaitForClusterToTerminate(logger, k8sClient, zk)).NotTo(HaveOccurred()) 87 | }) 88 | }) 89 | }) 90 | -------------------------------------------------------------------------------- /test/e2e/scale_test.go: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | */ 10 | 11 | package e2e 12 | 13 | import ( 14 | . "github.com/onsi/ginkgo" 15 | . "github.com/onsi/gomega" 16 | zk_e2eutil "github.com/pravega/zookeeper-operator/pkg/test/e2e/e2eutil" 17 | ) 18 | 19 | var _ = Describe("Perform scale for cluster upgrade", func() { 20 | Context("Check zk cluster scale operation", func() { 21 | It("should scale replicas number up and down", func() { 22 | defaultCluster := zk_e2eutil.NewDefaultCluster(testNamespace) 23 | 24 | defaultCluster.WithDefaults() 25 | 26 | defaultCluster.Status.Init() 27 | defaultCluster.Spec.Persistence.VolumeReclaimPolicy = "Delete" 28 | 29 | zk, err := zk_e2eutil.CreateCluster(logger, k8sClient, defaultCluster) 30 | 31 | Expect(err).NotTo(HaveOccurred()) 32 | 33 | // A default zk cluster should have 3 pods 34 | podSize := 3 35 | Expect(zk_e2eutil.WaitForClusterToBecomeReady(logger, k8sClient, zk, podSize)).NotTo(HaveOccurred()) 36 | 37 | // This is to get the latest zk cluster object 38 | zk, err = zk_e2eutil.GetCluster(logger, k8sClient, zk) 39 | Expect(err).NotTo(HaveOccurred()) 40 | 41 | // Scale up zk cluster, increase replicas to 5 42 | 43 | zk.Spec.Replicas = 5 44 | podSize = 5 45 | 46 | Expect(zk_e2eutil.UpdateCluster(logger, k8sClient, zk)).NotTo(HaveOccurred()) 47 | 48 | podDeleteCount := 2 49 | Expect(zk_e2eutil.DeletePods(logger, k8sClient, zk, podDeleteCount)).NotTo(HaveOccurred()) 50 | 51 | Expect(zk_e2eutil.WaitForClusterToBecomeReady(logger, k8sClient, zk, podSize)).NotTo(HaveOccurred()) 52 | 53 | // This is to get the latest zk cluster object 54 | zk, err = zk_e2eutil.GetCluster(logger, k8sClient, zk) 55 | Expect(err).NotTo(HaveOccurred()) 56 | 57 | // Scale down zk cluster back to default 58 | zk.Spec.Replicas = 3 59 | podSize = 3 60 | 61 | Expect(zk_e2eutil.UpdateCluster(logger, k8sClient, zk)).NotTo(HaveOccurred()) 62 | 63 | Expect(zk_e2eutil.WaitForClusterToBecomeReady(logger, k8sClient, zk, podSize)).NotTo(HaveOccurred()) 64 | 65 | // Delete cluster 66 | Expect(zk_e2eutil.DeleteCluster(logger, k8sClient, zk)).NotTo(HaveOccurred()) 67 | 68 | Expect(zk_e2eutil.WaitForClusterToTerminate(logger, k8sClient, zk)).NotTo(HaveOccurred()) 69 | }) 70 | }) 71 | }) 72 | -------------------------------------------------------------------------------- /test/e2e/suite_test.go: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (&the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | */ 10 | 11 | package e2e 12 | 13 | import ( 14 | "context" 15 | "github.com/go-logr/logr" 16 | api "github.com/pravega/zookeeper-operator/api/v1beta1" 17 | zookeeperv1beta1 "github.com/pravega/zookeeper-operator/api/v1beta1" 18 | zookeepercontroller "github.com/pravega/zookeeper-operator/controllers" 19 | zkClient "github.com/pravega/zookeeper-operator/pkg/zk" 20 | "k8s.io/client-go/kubernetes/scheme" 21 | "k8s.io/client-go/rest" 22 | "os" 23 | ctrl "sigs.k8s.io/controller-runtime" 24 | "sigs.k8s.io/controller-runtime/pkg/cache" 25 | "sigs.k8s.io/controller-runtime/pkg/client" 26 | "sigs.k8s.io/controller-runtime/pkg/envtest" 27 | logf "sigs.k8s.io/controller-runtime/pkg/log" 28 | "sigs.k8s.io/controller-runtime/pkg/log/zap" 29 | "testing" 30 | 31 | . "github.com/onsi/ginkgo" 32 | . "github.com/onsi/gomega" 33 | ) 34 | 35 | var ( 36 | cfg *rest.Config 37 | k8sClient client.Client // You'll be using this client in your tests. 38 | testEnv *envtest.Environment 39 | ctx context.Context 40 | cancel context.CancelFunc 41 | testNamespace = "default" 42 | logger logr.Logger 43 | ) 44 | 45 | func TestAPIs(t *testing.T) { 46 | RegisterFailHandler(Fail) 47 | RunSpecs(t, "Controller e2e Suite") 48 | } 49 | 50 | var _ = BeforeSuite(func() { 51 | logger = zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)) 52 | logf.SetLogger(logger) 53 | 54 | ctx, cancel = context.WithCancel(context.TODO()) 55 | 56 | enabled := true 57 | By("bootstrapping test environment") 58 | testEnv = &envtest.Environment{ 59 | Config: cfg, 60 | UseExistingCluster: &enabled, 61 | } 62 | 63 | /* 64 | Then, we start the envtest cluster. 65 | */ 66 | cfg, err := testEnv.Start() 67 | Expect(err).NotTo(HaveOccurred()) 68 | Expect(cfg).NotTo(BeNil()) 69 | 70 | err = zookeeperv1beta1.AddToScheme(scheme.Scheme) 71 | Expect(err).NotTo(HaveOccurred()) 72 | 73 | /* 74 | After the schemas, you will see the following marker. 75 | This marker is what allows new schemas to be added here automatically when a new API is added to the project. 76 | */ 77 | 78 | //+kubebuilder:scaffold:scheme 79 | 80 | /* 81 | A client is created for our test CRUD operations. 82 | */ 83 | k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) 84 | Expect(err).NotTo(HaveOccurred()) 85 | Expect(k8sClient).NotTo(BeNil()) 86 | 87 | if os.Getenv("RUN_LOCAL") == "true" { 88 | k8sManager, err := ctrl.NewManager(cfg, ctrl.Options{ 89 | Scheme: scheme.Scheme, 90 | Cache: cache.Options{Namespaces: []string{testNamespace}}, 91 | }) 92 | Expect(err).ToNot(HaveOccurred()) 93 | 94 | err = (&zookeepercontroller.ZookeeperClusterReconciler{ 95 | Client: k8sManager.GetClient(), 96 | Scheme: k8sManager.GetScheme(), 97 | ZkClient: new(zkClient.DefaultZookeeperClient), 98 | }).SetupWithManager(k8sManager) 99 | Expect(err).ToNot(HaveOccurred()) 100 | 101 | go func() { 102 | defer GinkgoRecover() 103 | err = k8sManager.Start(ctrl.SetupSignalHandler()) 104 | Expect(err).ToNot(HaveOccurred(), "failed to run manager") 105 | }() 106 | } 107 | 108 | }, 60) 109 | 110 | /* 111 | Kubebuilder also generates boilerplate functions for cleaning up envtest and actually running your test files in your controllers/ directory. 112 | You won't need to touch these. 113 | */ 114 | 115 | var _ = AfterSuite(func() { 116 | cancel() 117 | By("tearing down the test environment") 118 | err := testEnv.Stop() 119 | Expect(err).NotTo(HaveOccurred()) 120 | }) 121 | 122 | var _ = AfterEach(func() { 123 | zkList := &api.ZookeeperClusterList{} 124 | listOptions := []client.ListOption{ 125 | client.InNamespace(testNamespace), 126 | } 127 | Expect(k8sClient.List(ctx, zkList, listOptions...)).NotTo(HaveOccurred()) 128 | for _, zk := range zkList.Items { 129 | Expect(k8sClient.Delete(ctx, &zk)).NotTo(HaveOccurred()) 130 | } 131 | }) 132 | -------------------------------------------------------------------------------- /test/e2e/upgrade_test.go: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright (c) 2018 Dell Inc., or its subsidiaries. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | */ 10 | 11 | package e2e 12 | 13 | import ( 14 | . "github.com/onsi/ginkgo" 15 | . "github.com/onsi/gomega" 16 | api "github.com/pravega/zookeeper-operator/api/v1beta1" 17 | zk_e2eutil "github.com/pravega/zookeeper-operator/pkg/test/e2e/e2eutil" 18 | ) 19 | 20 | var _ = Describe("Perform zk cluster upgrade", func() { 21 | Context("Check zk cluster upgrade operation", func() { 22 | It("should update spec image version", func() { 23 | 24 | cluster := zk_e2eutil.NewDefaultCluster(testNamespace) 25 | 26 | cluster.WithDefaults() 27 | cluster.Status.Init() 28 | cluster.Spec.Persistence.VolumeReclaimPolicy = "Delete" 29 | initialVersion := "0.2.7" 30 | upgradeVersion := "0.2.9" 31 | cluster.Spec.Image = api.ContainerImage{ 32 | Repository: "pravega/zookeeper", 33 | Tag: initialVersion, 34 | } 35 | 36 | zk, err := zk_e2eutil.CreateCluster(logger, k8sClient, cluster) 37 | Expect(err).NotTo(HaveOccurred()) 38 | 39 | // A default Zookeepercluster should have 3 replicas 40 | podSize := 3 41 | Expect(zk_e2eutil.WaitForClusterToBecomeReady(logger, k8sClient, zk, podSize)).NotTo(HaveOccurred()) 42 | 43 | // This is to get the latest Zookeeper cluster object 44 | zk, err = zk_e2eutil.GetCluster(logger, k8sClient, zk) 45 | Expect(err).NotTo(HaveOccurred()) 46 | 47 | Expect(zk.Status.CurrentVersion).To(Equal(initialVersion)) 48 | 49 | zk.Spec.Image.Tag = upgradeVersion 50 | 51 | Expect(zk_e2eutil.UpdateCluster(logger, k8sClient, zk)).NotTo(HaveOccurred()) 52 | 53 | Expect(zk_e2eutil.WaitForClusterToUpgrade(logger, k8sClient, zk, upgradeVersion)).NotTo(HaveOccurred()) 54 | 55 | // This is to get the latest Zookeeper cluster object 56 | zk, err = zk_e2eutil.GetCluster(logger, k8sClient, zk) 57 | Expect(err).NotTo(HaveOccurred()) 58 | 59 | Expect(zk.Spec.Image.Tag).To(Equal(upgradeVersion)) 60 | Expect(zk.Status.CurrentVersion).To(Equal(upgradeVersion)) 61 | Expect(zk.Status.TargetVersion).To(Equal("")) 62 | 63 | // Delete cluster 64 | Expect(zk_e2eutil.DeleteCluster(logger, k8sClient, zk)).NotTo(HaveOccurred()) 65 | 66 | Expect(zk_e2eutil.WaitForClusterToTerminate(logger, k8sClient, zk)).NotTo(HaveOccurred()) 67 | }) 68 | }) 69 | }) 70 | --------------------------------------------------------------------------------