├── .github └── workflows │ └── release-chart.yaml ├── .gitignore ├── .gitlab-ci.yml ├── Dockerfile ├── LICENSE ├── Makefile ├── README.md ├── build ├── Dockerfile └── bin │ ├── entrypoint │ └── user_setup ├── charts └── redis-cluster-operator │ ├── .helmignore │ ├── Chart.yaml │ ├── crds │ ├── redis.kun_distributedredisclusters_crd.yaml │ └── redis.kun_redisclusterbackups_crd.yaml │ ├── templates │ ├── _helpers.tpl │ ├── operator.yaml │ ├── role.yaml │ ├── role_binding.yaml │ └── service_account.yaml │ └── values.yaml ├── cmd └── manager │ └── main.go ├── deploy ├── cluster │ ├── cluster_role.yaml │ ├── cluster_role_binding.yaml │ └── operator.yaml ├── crds │ ├── redis.kun_distributedredisclusters_crd.yaml │ └── redis.kun_redisclusterbackups_crd.yaml ├── e2e.yml ├── example │ ├── backup-restore │ │ ├── redisclusterbackup_cr.yaml │ │ ├── redisclusterbackup_topvc.yaml │ │ ├── restore.yaml │ │ └── restore_frompvc.yaml │ ├── custom-config.yaml │ ├── custom-password.yaml │ ├── custom-resources.yaml │ ├── custom-service.yaml │ ├── persistent.yaml │ ├── prometheus-exporter.yaml │ ├── redis.kun_v1alpha1_distributedrediscluster_cr.yaml │ └── securitycontext.yaml ├── namespace │ ├── operator.yaml │ ├── role.yaml │ └── role_binding.yaml └── service_account.yaml ├── doc └── design │ └── zh │ ├── cluster_backup_and_restore.md │ ├── cluster_create.md │ └── cluster_scaling.md ├── go.mod ├── go.sum ├── hack ├── docker │ └── redis-tools │ │ ├── Dockerfile │ │ ├── make.sh │ │ └── redis-tools.sh ├── e2e.sh ├── lib │ ├── image.sh │ └── lib.sh └── webhook │ ├── README.md │ ├── create-signed-cert.sh │ ├── operator.yml │ ├── patch-ca-bundle.sh │ ├── service.yaml │ └── validatingwebhook.yaml ├── pkg ├── apis │ ├── addtoscheme_redis_v1alpha1.go │ ├── apis.go │ └── redis │ │ ├── group.go │ │ └── v1alpha1 │ │ ├── constants.go │ │ ├── default.go │ │ ├── distributedrediscluster_types.go │ │ ├── distributedrediscluster_webhook.go │ │ ├── distributedrediscluster_webhook_test.go │ │ ├── doc.go │ │ ├── redisclusterbackup_types.go │ │ ├── register.go │ │ ├── zz_generated.deepcopy.go │ │ └── zz_generated.openapi.go ├── config │ └── redis.go ├── controller │ ├── add_distributedrediscluster.go │ ├── add_redisclusterbackup.go │ ├── clustering │ │ ├── migration.go │ │ ├── migration_test.go │ │ ├── placement.go │ │ ├── placement_v2.go │ │ ├── rebalance.go │ │ ├── rebalance_test.go │ │ └── roles.go │ ├── controller.go │ ├── distributedrediscluster │ │ ├── distributedrediscluster_controller.go │ │ ├── errors.go │ │ ├── helper.go │ │ ├── status.go │ │ └── sync_handler.go │ ├── heal │ │ ├── clustersplit.go │ │ ├── clustersplit_test.go │ │ ├── failednodes.go │ │ ├── heal.go │ │ ├── terminatingpod.go │ │ └── untrustenodes.go │ ├── manager │ │ ├── checker.go │ │ ├── ensurer.go │ │ ├── ensurer_test.go │ │ └── healer.go │ └── redisclusterbackup │ │ ├── helper.go │ │ ├── redisclusterbackup_controller.go │ │ └── sync_handler.go ├── event │ └── event.go ├── exec │ └── exec.go ├── k8sutil │ ├── batchjob.go │ ├── configmap.go │ ├── customresource.go │ ├── pod.go │ ├── poddisruptionbudget.go │ ├── pvc.go │ ├── service.go │ ├── statefulset.go │ └── util.go ├── osm │ ├── osm.go │ └── rclone.go ├── redisutil │ ├── admin.go │ ├── client.go │ ├── cluster.go │ ├── clusterinfo.go │ ├── connections.go │ ├── errors.go │ ├── node.go │ ├── node_test.go │ ├── slot.go │ └── slot_test.go ├── resources │ ├── configmaps │ │ ├── configmap.go │ │ └── configmap_test.go │ ├── poddisruptionbudgets │ │ └── poddisruptionbudget.go │ ├── services │ │ └── service.go │ └── statefulsets │ │ ├── helper.go │ │ ├── statefulset.go │ │ └── statefulset_test.go └── utils │ ├── compare.go │ ├── labels.go │ ├── math.go │ ├── parse.go │ ├── parse_test.go │ ├── rename_cmd.go │ ├── scoped.go │ ├── string.go │ └── types.go ├── static └── redis-cluster.png ├── test ├── e2e │ ├── Dockerfile │ ├── README.md │ ├── drc │ │ ├── drc_suite_test.go │ │ └── drc_test.go │ ├── drcb │ │ ├── drcb_suite_test.go │ │ └── drcb_test.go │ ├── framework.go │ ├── goredis_util.go │ ├── operator_util.go │ ├── rename.conf │ └── util.go └── testclient │ └── client.go ├── tools.go └── version └── version.go /.github/workflows/release-chart.yaml: -------------------------------------------------------------------------------- 1 | name: Release Charts 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | paths: 8 | - 'charts/**' 9 | 10 | jobs: 11 | release: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: Checkout 15 | uses: actions/checkout@v1 16 | - name: Configure Git 17 | run: | 18 | git config user.name "$GITHUB_ACTOR" 19 | git config user.email "$GITHUB_ACTOR@users.noreply.github.com" 20 | - name: Run chart-releaser 21 | uses: helm/chart-releaser-action@master 22 | env: 23 | CR_TOKEN: '${{ secrets.CR_TOKEN }}' 24 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Temporary Build Files 2 | build/_output 3 | build/_test 4 | # Created by https://www.gitignore.io/api/go,vim,emacs,visualstudiocode 5 | ### Emacs ### 6 | # -*- mode: gitignore; -*- 7 | *~ 8 | \#*\# 9 | /.emacs.desktop 10 | /.emacs.desktop.lock 11 | *.elc 12 | auto-save-list 13 | tramp 14 | .\#* 15 | # Org-mode 16 | .org-id-locations 17 | *_archive 18 | # flymake-mode 19 | *_flymake.* 20 | # eshell files 21 | /eshell/history 22 | /eshell/lastdir 23 | # elpa packages 24 | /elpa/ 25 | # reftex files 26 | *.rel 27 | # AUCTeX auto folder 28 | /auto/ 29 | # cask packages 30 | .cask/ 31 | dist/ 32 | # Flycheck 33 | flycheck_*.el 34 | # server auth directory 35 | /server/ 36 | # projectiles files 37 | .projectile 38 | projectile-bookmarks.eld 39 | # directory configuration 40 | .dir-locals.el 41 | # saveplace 42 | places 43 | # url cache 44 | url/cache/ 45 | # cedet 46 | ede-projects.el 47 | # smex 48 | smex-items 49 | # company-statistics 50 | company-statistics-cache.el 51 | # anaconda-mode 52 | anaconda-mode/ 53 | ### Go ### 54 | # Binaries for programs and plugins 55 | *.exe 56 | *.exe~ 57 | *.dll 58 | *.so 59 | *.dylib 60 | # Test binary, build with 'go test -c' 61 | *.test 62 | # Output of the go coverage tool, specifically when used with LiteIDE 63 | *.out 64 | ### Vim ### 65 | # swap 66 | .sw[a-p] 67 | .*.sw[a-p] 68 | # session 69 | Session.vim 70 | # temporary 71 | .netrwhist 72 | # auto-generated tag files 73 | tags 74 | ### VisualStudioCode ### 75 | .vscode/* 76 | .history 77 | # End of https://www.gitignore.io/api/go,vim,emacs,visualstudiocode 78 | .idea/* 79 | vendor/* 80 | /main 81 | /Dockerfile-withvendor 82 | Dockerfile-TZSH 83 | skaffold.yaml -------------------------------------------------------------------------------- /.gitlab-ci.yml: -------------------------------------------------------------------------------- 1 | variables: 2 | IMAGE_NAME: $REPO/redis-cluster-operator 3 | 4 | stages: 5 | - BuildImage 6 | 7 | docker-image: 8 | stage: BuildImage 9 | tags: 10 | - kun 11 | image: $BUILD_IMAGE 12 | script: 13 | - IMAGE_TAG=$CI_COMMIT_SHA && if [[ -n "$CI_COMMIT_TAG" ]]; then IMAGE_TAG=$CI_COMMIT_TAG ; fi 14 | - /kaniko/executor -c $CI_PROJECT_DIR -f Dockerfile -d $IMAGE_NAME:$IMAGE_TAG 15 | only: 16 | - master 17 | - dev -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.13.3-alpine as go-builder 2 | 3 | RUN apk update && apk upgrade && \ 4 | apk add --no-cache ca-certificates git mercurial 5 | 6 | ARG PROJECT_NAME=redis-cluster-operator 7 | ARG REPO_PATH=github.com/ucloud/$PROJECT_NAME 8 | ARG BUILD_PATH=${REPO_PATH}/cmd/manager 9 | 10 | # Build version and commit should be passed in when performing docker build 11 | ARG VERSION=0.1.1 12 | ARG GIT_SHA=0000000 13 | 14 | WORKDIR /src 15 | 16 | COPY go.mod go.sum ./ 17 | RUN go mod download 18 | 19 | COPY pkg ./ cmd ./ version ./ 20 | 21 | RUN GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -o ${GOBIN}/${PROJECT_NAME} \ 22 | -ldflags "-X ${REPO_PATH}/version.Version=${VERSION} -X ${REPO_PATH}/version.GitSHA=${GIT_SHA}" \ 23 | $BUILD_PATH 24 | 25 | # ============================================================================= 26 | FROM alpine:3.9 AS final 27 | 28 | ARG PROJECT_NAME=redis-cluster-operator 29 | 30 | COPY --from=go-builder ${GOBIN}/${PROJECT_NAME} /usr/local/bin/${PROJECT_NAME} 31 | 32 | RUN adduser -D ${PROJECT_NAME} 33 | USER ${PROJECT_NAME} 34 | 35 | ENTRYPOINT ["/usr/local/bin/redis-cluster-operator"] 36 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | SHELL=/bin/bash -o pipefail 2 | 3 | PROJECT_NAME=redis-cluster-operator 4 | REPO=ucloud/$(PROJECT_NAME) 5 | 6 | # replace with your public registry 7 | ALTREPO=$(DOCKER_REGISTRY)/$(PROJECT_NAME) 8 | E2EALTREPO=$(DOCKER_REGISTRY)/$(PROJECT_NAME)-e2e 9 | 10 | VERSION=$(shell git describe --always --tags --dirty | sed "s/\(.*\)-g`git rev-parse --short HEAD`/\1/") 11 | GIT_SHA=$(shell git rev-parse --short HEAD) 12 | BIN_DIR=build/bin 13 | .PHONY: all build check clean test login build-e2e push push-e2e build-tools 14 | 15 | all: check build 16 | 17 | build: test build-go build-image 18 | 19 | build-go: 20 | GO111MODULE=on CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build \ 21 | -ldflags "-X github.com/$(REPO)/version.Version=$(VERSION) -X github.com/$(REPO)/version.GitSHA=$(GIT_SHA)" \ 22 | -o $(BIN_DIR)/$(PROJECT_NAME)-linux-amd64 cmd/manager/main.go 23 | GO111MODULE=on CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 go build \ 24 | -ldflags "-X github.com/$(REPO)/version.Version=$(VERSION) -X github.com/$(REPO)/version.GitSHA=$(GIT_SHA)" \ 25 | -o $(BIN_DIR)/$(PROJECT_NAME)-darwin-amd64 cmd/manager/main.go 26 | 27 | build-image: 28 | docker build --build-arg VERSION=$(VERSION) --build-arg GIT_SHA=$(GIT_SHA) -t $(ALTREPO):$(VERSION) . 29 | docker tag $(ALTREPO):$(VERSION) $(ALTREPO):latest 30 | 31 | build-e2e: 32 | docker build -t $(E2EALTREPO):$(VERSION) -f test/e2e/Dockerfile . 33 | 34 | build-tools: 35 | bash hack/docker/redis-tools/make.sh build 36 | 37 | test: 38 | GO111MODULE=on go test $$(go list ./... | grep -v /vendor/) -race -coverprofile=coverage.txt -covermode=atomic 39 | 40 | login: 41 | @docker login -u "$(DOCKER_USER)" -p "$(DOCKER_PASS)" 42 | 43 | push: build-image 44 | docker push $(ALTREPO):$(VERSION) 45 | docker push $(ALTREPO):latest 46 | 47 | push-e2e: build-e2e 48 | docker push $(E2EALTREPO):$(VERSION) 49 | 50 | clean: 51 | rm -f $(BIN_DIR)/$(PROJECT_NAME)* 52 | 53 | check: check-format 54 | 55 | check-format: 56 | @test -z "$$(gofmt -s -l . 2>&1 | grep -v -e vendor/ | tee /dev/stderr)" 57 | -------------------------------------------------------------------------------- /build/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM registry.access.redhat.com/ubi7/ubi-minimal:latest 2 | 3 | ENV OPERATOR=/usr/local/bin/redis-cluster-operator \ 4 | USER_UID=1001 \ 5 | USER_NAME=redis-cluster-operator 6 | 7 | # install operator binary 8 | COPY build/_output/bin/redis-cluster-operator ${OPERATOR} 9 | 10 | COPY build/bin /usr/local/bin 11 | RUN /usr/local/bin/user_setup 12 | 13 | ENTRYPOINT ["/usr/local/bin/entrypoint"] 14 | 15 | USER ${USER_UID} 16 | -------------------------------------------------------------------------------- /build/bin/entrypoint: -------------------------------------------------------------------------------- 1 | #!/bin/sh -e 2 | 3 | # This is documented here: 4 | # https://docs.openshift.com/container-platform/3.11/creating_images/guidelines.html#openshift-specific-guidelines 5 | 6 | if ! whoami &>/dev/null; then 7 | if [ -w /etc/passwd ]; then 8 | echo "${USER_NAME:-redis-cluster-operator}:x:$(id -u):$(id -g):${USER_NAME:-redis-cluster-operator} user:${HOME}:/sbin/nologin" >> /etc/passwd 9 | fi 10 | fi 11 | 12 | exec ${OPERATOR} $@ 13 | -------------------------------------------------------------------------------- /build/bin/user_setup: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -x 3 | 4 | # ensure $HOME exists and is accessible by group 0 (we don't know what the runtime UID will be) 5 | mkdir -p ${HOME} 6 | chown ${USER_UID}:0 ${HOME} 7 | chmod ug+rwx ${HOME} 8 | 9 | # runtime user will need to be able to self-insert in /etc/passwd 10 | chmod g+rw /etc/passwd 11 | 12 | # no need for this script to remain in the image after running 13 | rm $0 14 | -------------------------------------------------------------------------------- /charts/redis-cluster-operator/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /charts/redis-cluster-operator/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: redis-cluster-operator 3 | description: A Helm chart for Redis cluster operator deployment 4 | 5 | type: application 6 | 7 | version: 0.1.0 8 | 9 | appVersion: 0.1.0 10 | -------------------------------------------------------------------------------- /charts/redis-cluster-operator/crds/redis.kun_distributedredisclusters_crd.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1beta1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | name: distributedredisclusters.redis.kun 5 | spec: 6 | group: redis.kun 7 | names: 8 | kind: DistributedRedisCluster 9 | listKind: DistributedRedisClusterList 10 | plural: distributedredisclusters 11 | singular: distributedrediscluster 12 | shortNames: 13 | - drc 14 | scope: Namespaced 15 | additionalPrinterColumns: 16 | - JSONPath: .spec.masterSize 17 | description: The number of redis master node in the ensemble 18 | name: MasterSize 19 | type: integer 20 | - JSONPath: .status.status 21 | description: The status of redis cluster 22 | name: Status 23 | type: string 24 | - JSONPath: .metadata.creationTimestamp 25 | name: Age 26 | type: date 27 | - JSONPath: .status.numberOfMaster 28 | priority: 1 29 | description: The current master number of redis cluster 30 | name: CurrentMasters 31 | type: integer 32 | - JSONPath: .spec.image 33 | priority: 1 34 | description: The image of redis cluster 35 | name: Images 36 | type: string 37 | subresources: 38 | status: {} 39 | validation: 40 | openAPIV3Schema: 41 | description: DistributedRedisCluster is the Schema for the distributedredisclusters 42 | API 43 | properties: 44 | apiVersion: 45 | description: 'APIVersion defines the versioned schema of this representation 46 | of an object. Servers should convert recognized schemas to the latest 47 | internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' 48 | type: string 49 | kind: 50 | description: 'Kind is a string value representing the REST resource this 51 | object represents. Servers may infer this from the endpoint the client 52 | submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' 53 | type: string 54 | metadata: 55 | type: object 56 | spec: 57 | description: DistributedRedisClusterSpec defines the desired state of 58 | DistributedRedisCluster 59 | properties: 60 | masterSize: 61 | format: int32 62 | type: integer 63 | minimum: 3 64 | maximum: 10 65 | clusterReplicas: 66 | format: int32 67 | type: integer 68 | minimum: 1 69 | maximum: 3 70 | serviceName: 71 | type: string 72 | pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*' 73 | type: object 74 | status: 75 | description: DistributedRedisClusterStatus defines the observed state 76 | of DistributedRedisCluster 77 | type: object 78 | type: object 79 | version: v1alpha1 80 | versions: 81 | - name: v1alpha1 82 | served: true 83 | storage: true 84 | -------------------------------------------------------------------------------- /charts/redis-cluster-operator/crds/redis.kun_redisclusterbackups_crd.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1beta1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | name: redisclusterbackups.redis.kun 5 | spec: 6 | group: redis.kun 7 | names: 8 | kind: RedisClusterBackup 9 | listKind: RedisClusterBackupList 10 | plural: redisclusterbackups 11 | singular: redisclusterbackup 12 | shortNames: 13 | - drcb 14 | scope: Namespaced 15 | additionalPrinterColumns: 16 | - JSONPath: .metadata.creationTimestamp 17 | name: Age 18 | type: date 19 | - JSONPath: .status.phase 20 | description: The phase of redis cluster backup 21 | name: Phase 22 | type: string 23 | subresources: 24 | status: {} 25 | versions: 26 | - name: v1alpha1 27 | # Each version can be enabled/disabled by Served flag. 28 | served: true 29 | # One and only one version must be marked as the storage version. 30 | storage: true 31 | validation: 32 | openAPIV3Schema: 33 | description: RedisClusterBackup is the Schema for the redisclusterbackups 34 | API 35 | properties: 36 | apiVersion: 37 | description: 'APIVersion defines the versioned schema of this representation 38 | of an object. Servers should convert recognized schemas to the latest 39 | internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' 40 | type: string 41 | kind: 42 | description: 'Kind is a string value representing the REST resource this 43 | object represents. Servers may infer this from the endpoint the client 44 | submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' 45 | type: string 46 | metadata: 47 | type: object 48 | spec: 49 | description: RedisClusterBackupSpec defines the desired state of RedisClusterBackup 50 | type: object 51 | status: 52 | description: RedisClusterBackupStatus defines the observed state of RedisClusterBackup 53 | type: object 54 | type: object 55 | -------------------------------------------------------------------------------- /charts/redis-cluster-operator/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "redis-cluster-operator.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} 7 | {{- end }} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | If release name contains chart name it will be used as a full name. 13 | */}} 14 | {{- define "redis-cluster-operator.fullname" -}} 15 | {{- if .Values.fullnameOverride }} 16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} 17 | {{- else }} 18 | {{- $name := default .Chart.Name .Values.nameOverride }} 19 | {{- if contains $name .Release.Name }} 20 | {{- .Release.Name | trunc 63 | trimSuffix "-" }} 21 | {{- else }} 22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} 23 | {{- end }} 24 | {{- end }} 25 | {{- end }} 26 | 27 | {{/* 28 | Create chart name and version as used by the chart label. 29 | */}} 30 | {{- define "redis-cluster-operator.chart" -}} 31 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} 32 | {{- end }} 33 | 34 | {{/* 35 | Common labels 36 | */}} 37 | {{- define "redis-cluster-operator.labels" -}} 38 | helm.sh/chart: {{ include "redis-cluster-operator.chart" . }} 39 | {{ include "redis-cluster-operator.selectorLabels" . }} 40 | {{- if .Chart.AppVersion }} 41 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} 42 | {{- end }} 43 | app.kubernetes.io/managed-by: {{ .Release.Service }} 44 | {{- end }} 45 | 46 | {{/* 47 | Selector labels 48 | */}} 49 | {{- define "redis-cluster-operator.selectorLabels" -}} 50 | app.kubernetes.io/name: {{ include "redis-cluster-operator.name" . }} 51 | app.kubernetes.io/instance: {{ .Release.Name }} 52 | {{- end }} 53 | 54 | {{/* 55 | Create the name of the service account to use 56 | */}} 57 | {{- define "redis-cluster-operator.serviceAccountName" -}} 58 | {{- if .Values.serviceAccount.create }} 59 | {{- default (include "redis-cluster-operator.fullname" .) .Values.serviceAccount.name }} 60 | {{- else }} 61 | {{- default "default" .Values.serviceAccount.name }} 62 | {{- end }} 63 | {{- end }} 64 | -------------------------------------------------------------------------------- /charts/redis-cluster-operator/templates/operator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: {{ .Values.operator.name }} 5 | spec: 6 | replicas: {{ .Values.operator.replicas }} 7 | selector: 8 | matchLabels: 9 | name: {{ .Values.operator.name }} 10 | template: 11 | metadata: 12 | labels: 13 | name: {{ .Values.operator.name }} 14 | spec: 15 | serviceAccountName: {{ .Values.operator.service_account_name }} 16 | securityContext: 17 | {{- .Values.operator.podsecurityContext | toYaml | nindent 8 }} 18 | containers: 19 | - name: {{ .Values.operator.name }} 20 | # Replace this with the built image name 21 | image: {{ .Values.operator.image_source }}:{{ .Values.operator.image_tag }} 22 | securityContext: 23 | {{- .Values.operator.containersecurityContext | toYaml | nindent 12 }} 24 | command: 25 | - redis-cluster-operator 26 | args: 27 | - --rename-command-path=/etc/redisconf 28 | - --rename-command-file=redis.conf 29 | imagePullPolicy: {{ .Values.operator.imagePullPolicy }} 30 | resources: 31 | {{- .Values.operator.resources | toYaml | nindent 12 }} 32 | env: 33 | - name: WATCH_NAMESPACE 34 | value: {{ .Values.operator.namespace | quote }} 35 | - name: POD_NAME 36 | valueFrom: 37 | fieldRef: 38 | fieldPath: metadata.name 39 | - name: OPERATOR_NAME 40 | value: {{ .Values.operator.name | quote }} 41 | volumeMounts: 42 | - name: redisconf 43 | mountPath: /etc/redisconf 44 | volumes: 45 | - name: redisconf 46 | configMap: 47 | name: redis-admin 48 | --- 49 | apiVersion: v1 50 | kind: ConfigMap 51 | metadata: 52 | name: redis-admin 53 | data: 54 | redis.conf: |- 55 | {{- .Values.data.redis_conf | nindent 4 }} 56 | -------------------------------------------------------------------------------- /charts/redis-cluster-operator/templates/role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | {{- if eq (len .Values.operator.namespace) 0 }} 3 | kind: ClusterRole 4 | {{- else }} 5 | kind: Role 6 | {{- end }} 7 | metadata: 8 | name: redis-cluster-operator 9 | rules: 10 | - apiGroups: 11 | - "" 12 | resources: 13 | - pods 14 | - secrets 15 | - endpoints 16 | - persistentvolumeclaims 17 | verbs: 18 | - get 19 | - list 20 | - watch 21 | - delete 22 | - apiGroups: 23 | - "" 24 | resources: 25 | - configmaps 26 | - pods/exec 27 | - secrets 28 | - services 29 | - events 30 | - persistentvolumeclaims 31 | verbs: 32 | - create 33 | - get 34 | - list 35 | - patch 36 | - update 37 | - watch 38 | - delete 39 | - apiGroups: 40 | - "" 41 | resources: 42 | - namespaces 43 | verbs: 44 | - get 45 | - list 46 | - watch 47 | - apiGroups: 48 | - batch 49 | resources: 50 | - jobs 51 | verbs: 52 | - create 53 | - get 54 | - list 55 | - patch 56 | - update 57 | - watch 58 | - delete 59 | - apiGroups: 60 | - apps 61 | resources: 62 | - deployments 63 | - replicasets 64 | - statefulsets 65 | verbs: 66 | - create 67 | - get 68 | - list 69 | - patch 70 | - update 71 | - watch 72 | - delete 73 | - apiGroups: 74 | - policy 75 | resources: 76 | - poddisruptionbudgets 77 | verbs: 78 | - create 79 | - get 80 | - list 81 | - patch 82 | - update 83 | - watch 84 | - delete 85 | - apiGroups: 86 | - apps 87 | resourceNames: 88 | - redis-operator 89 | resources: 90 | - deployments/finalizers 91 | verbs: 92 | - update 93 | - apiGroups: 94 | - redis.kun 95 | resources: 96 | - '*' 97 | - redisclusterbackups 98 | verbs: 99 | - delete 100 | - deletecollection 101 | - get 102 | - list 103 | - patch 104 | - update 105 | - watch 106 | -------------------------------------------------------------------------------- /charts/redis-cluster-operator/templates/role_binding.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq (len .Values.operator.namespace) 0 }} 2 | kind: ClusterRoleBinding 3 | {{- else }} 4 | kind: RoleBinding 5 | {{- end }} 6 | apiVersion: rbac.authorization.k8s.io/v1 7 | metadata: 8 | name: redis-cluster-operator 9 | subjects: 10 | - kind: ServiceAccount 11 | name: redis-cluster-operator 12 | namespace: {{.Release.Namespace}} 13 | roleRef: 14 | kind: ClusterRole 15 | name: redis-cluster-operator 16 | apiGroup: rbac.authorization.k8s.io 17 | -------------------------------------------------------------------------------- /charts/redis-cluster-operator/templates/service_account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: redis-cluster-operator 5 | namespace: {{.Release.Namespace}} 6 | -------------------------------------------------------------------------------- /charts/redis-cluster-operator/values.yaml: -------------------------------------------------------------------------------- 1 | operator: 2 | backup_enabled: "false" 3 | name: "redis-cluster-operator" 4 | replicas: 1 5 | service_account_name: "redis-cluster-operator" 6 | namespace: "" # keep this value "" if you want to deploy cluster-wide operator 7 | image_source: "fishu/redis-cluster-operator" 8 | image_tag: "latest" 9 | imagePullPolicy: "Always" 10 | resources: 11 | limits: 12 | cpu: 200m 13 | memory: 256Mi 14 | requests: 15 | cpu: 100m 16 | memory: 100Mi 17 | podsecurityContext: 18 | runAsUser: 1100 19 | runAsGroup: 1100 20 | fsGroup: 1100 21 | supplementalGroups: [1100] 22 | containersecurityContext: 23 | allowPrivilegeEscalation: false 24 | capabilities: 25 | drop: 26 | - ALL 27 | 28 | data: 29 | redis_conf: |- 30 | rename-command CONFIG lni07z1p 31 | rename-command BGSAVE pp14qluk 32 | rename-command DEBUG 8a4insyv 33 | rename-command SAVE 6on30p6z 34 | rename-command SHUTDOWN dvui0opr 35 | rename-command SLAVEOF xwxvcw36 36 | rename-command BGREWRITEAOF www07fko 37 | -------------------------------------------------------------------------------- /deploy/cluster/cluster_role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: redis-cluster-operator 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - pods 10 | - secrets 11 | - endpoints 12 | - persistentvolumeclaims 13 | verbs: 14 | - get 15 | - list 16 | - watch 17 | - delete 18 | - apiGroups: 19 | - "" 20 | resources: 21 | - configmaps 22 | - pods/exec 23 | - secrets 24 | - services 25 | - events 26 | - persistentvolumeclaims 27 | verbs: 28 | - create 29 | - get 30 | - list 31 | - patch 32 | - update 33 | - watch 34 | - delete 35 | - apiGroups: 36 | - "" 37 | resources: 38 | - namespaces 39 | verbs: 40 | - get 41 | - list 42 | - watch 43 | - apiGroups: 44 | - batch 45 | resources: 46 | - jobs 47 | verbs: 48 | - create 49 | - get 50 | - list 51 | - patch 52 | - update 53 | - watch 54 | - delete 55 | - apiGroups: 56 | - apps 57 | resources: 58 | - deployments 59 | - replicasets 60 | - statefulsets 61 | verbs: 62 | - create 63 | - get 64 | - list 65 | - patch 66 | - update 67 | - watch 68 | - delete 69 | - apiGroups: 70 | - policy 71 | resources: 72 | - poddisruptionbudgets 73 | verbs: 74 | - create 75 | - get 76 | - list 77 | - patch 78 | - update 79 | - watch 80 | - delete 81 | - apiGroups: 82 | - apps 83 | resourceNames: 84 | - redis-operator 85 | resources: 86 | - deployments/finalizers 87 | verbs: 88 | - update 89 | - apiGroups: 90 | - redis.kun 91 | resources: 92 | - '*' 93 | - redisclusterbackups 94 | verbs: 95 | - delete 96 | - deletecollection 97 | - get 98 | - list 99 | - patch 100 | - update 101 | - watch -------------------------------------------------------------------------------- /deploy/cluster/cluster_role_binding.yaml: -------------------------------------------------------------------------------- 1 | kind: ClusterRoleBinding 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: redis-cluster-operator 5 | subjects: 6 | - kind: ServiceAccount 7 | name: redis-cluster-operator 8 | namespace: default 9 | roleRef: 10 | kind: ClusterRole 11 | name: redis-cluster-operator 12 | apiGroup: rbac.authorization.k8s.io 13 | -------------------------------------------------------------------------------- /deploy/cluster/operator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: redis-cluster-operator 5 | spec: 6 | replicas: 1 7 | selector: 8 | matchLabels: 9 | name: redis-cluster-operator 10 | template: 11 | metadata: 12 | labels: 13 | name: redis-cluster-operator 14 | spec: 15 | serviceAccountName: redis-cluster-operator 16 | containers: 17 | - name: redis-cluster-operator 18 | # Replace this with the built image name 19 | image: fishu/redis-cluster-operator:latest 20 | command: 21 | - redis-cluster-operator 22 | args: 23 | - --rename-command-path=/etc/redisconf 24 | - --rename-command-file=redis.conf 25 | imagePullPolicy: Always 26 | env: 27 | - name: WATCH_NAMESPACE 28 | value: "" 29 | - name: POD_NAME 30 | valueFrom: 31 | fieldRef: 32 | fieldPath: metadata.name 33 | - name: OPERATOR_NAME 34 | value: "redis-cluster-operator" 35 | volumeMounts: 36 | - name: redisconf 37 | mountPath: /etc/redisconf 38 | volumes: 39 | - name: redisconf 40 | configMap: 41 | name: redis-admin 42 | --- 43 | apiVersion: v1 44 | kind: ConfigMap 45 | metadata: 46 | name: redis-admin 47 | data: 48 | redis.conf: |- 49 | rename-command CONFIG lni07z1p 50 | rename-command BGSAVE pp14qluk 51 | rename-command DEBUG 8a4insyv 52 | rename-command SAVE 6on30p6z 53 | rename-command SHUTDOWN dvui0opr 54 | rename-command SLAVEOF xwxvcw36 55 | rename-command BGREWRITEAOF www07fko 56 | -------------------------------------------------------------------------------- /deploy/crds/redis.kun_distributedredisclusters_crd.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1beta1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | name: distributedredisclusters.redis.kun 5 | spec: 6 | group: redis.kun 7 | names: 8 | kind: DistributedRedisCluster 9 | listKind: DistributedRedisClusterList 10 | plural: distributedredisclusters 11 | singular: distributedrediscluster 12 | shortNames: 13 | - drc 14 | scope: Namespaced 15 | additionalPrinterColumns: 16 | - JSONPath: .spec.masterSize 17 | description: The number of redis master node in the ensemble 18 | name: MasterSize 19 | type: integer 20 | - JSONPath: .status.status 21 | description: The status of redis cluster 22 | name: Status 23 | type: string 24 | - JSONPath: .metadata.creationTimestamp 25 | name: Age 26 | type: date 27 | - JSONPath: .status.numberOfMaster 28 | priority: 1 29 | description: The current master number of redis cluster 30 | name: CurrentMasters 31 | type: integer 32 | - JSONPath: .spec.image 33 | priority: 1 34 | description: The image of redis cluster 35 | name: Images 36 | type: string 37 | subresources: 38 | status: {} 39 | validation: 40 | openAPIV3Schema: 41 | description: DistributedRedisCluster is the Schema for the distributedredisclusters 42 | API 43 | properties: 44 | apiVersion: 45 | description: 'APIVersion defines the versioned schema of this representation 46 | of an object. Servers should convert recognized schemas to the latest 47 | internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' 48 | type: string 49 | kind: 50 | description: 'Kind is a string value representing the REST resource this 51 | object represents. Servers may infer this from the endpoint the client 52 | submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' 53 | type: string 54 | metadata: 55 | type: object 56 | spec: 57 | description: DistributedRedisClusterSpec defines the desired state of 58 | DistributedRedisCluster 59 | properties: 60 | masterSize: 61 | format: int32 62 | type: integer 63 | minimum: 3 64 | maximum: 10 65 | clusterReplicas: 66 | format: int32 67 | type: integer 68 | minimum: 1 69 | maximum: 3 70 | serviceName: 71 | type: string 72 | pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*' 73 | type: object 74 | status: 75 | description: DistributedRedisClusterStatus defines the observed state 76 | of DistributedRedisCluster 77 | type: object 78 | type: object 79 | version: v1alpha1 80 | versions: 81 | - name: v1alpha1 82 | served: true 83 | storage: true 84 | -------------------------------------------------------------------------------- /deploy/crds/redis.kun_redisclusterbackups_crd.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1beta1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | name: redisclusterbackups.redis.kun 5 | spec: 6 | group: redis.kun 7 | names: 8 | kind: RedisClusterBackup 9 | listKind: RedisClusterBackupList 10 | plural: redisclusterbackups 11 | singular: redisclusterbackup 12 | shortNames: 13 | - drcb 14 | scope: Namespaced 15 | additionalPrinterColumns: 16 | - JSONPath: .metadata.creationTimestamp 17 | name: Age 18 | type: date 19 | - JSONPath: .status.phase 20 | description: The phase of redis cluster backup 21 | name: Phase 22 | type: string 23 | subresources: 24 | status: {} 25 | versions: 26 | - name: v1alpha1 27 | # Each version can be enabled/disabled by Served flag. 28 | served: true 29 | # One and only one version must be marked as the storage version. 30 | storage: true 31 | validation: 32 | openAPIV3Schema: 33 | description: RedisClusterBackup is the Schema for the redisclusterbackups 34 | API 35 | properties: 36 | apiVersion: 37 | description: 'APIVersion defines the versioned schema of this representation 38 | of an object. Servers should convert recognized schemas to the latest 39 | internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' 40 | type: string 41 | kind: 42 | description: 'Kind is a string value representing the REST resource this 43 | object represents. Servers may infer this from the endpoint the client 44 | submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' 45 | type: string 46 | metadata: 47 | type: object 48 | spec: 49 | description: RedisClusterBackupSpec defines the desired state of RedisClusterBackup 50 | type: object 51 | status: 52 | description: RedisClusterBackupStatus defines the observed state of RedisClusterBackup 53 | type: object 54 | type: object 55 | -------------------------------------------------------------------------------- /deploy/e2e.yml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | generateName: drc-e2e- 5 | namespace: default 6 | labels: 7 | app: drc-e2e 8 | spec: 9 | backoffLimit: 0 10 | template: 11 | metadata: 12 | name: drc-e2e 13 | spec: 14 | serviceAccountName: e2e-sa 15 | restartPolicy: Never 16 | containers: 17 | - name: e2e 18 | image: fishu/redis-cluster-operator-e2e 19 | imagePullPolicy: Always 20 | env: 21 | - name: TEST_TIMEOUT 22 | value: "60m" 23 | - name: STORAGECLASSNAME 24 | value: "csi-rbd-sc" 25 | - name: AWS_ACCESS_KEY_ID 26 | value: "" 27 | - name: AWS_SECRET_ACCESS_KEY 28 | value: "" 29 | - name: S3_ENDPOINT 30 | value: "" 31 | - name: S3_BUCKET 32 | value: "" 33 | - name: CLUSTER_DOMAIN 34 | value: "" 35 | 36 | --- 37 | apiVersion: rbac.authorization.k8s.io/v1 38 | kind: ClusterRole 39 | metadata: 40 | name: e2e-cluster-role 41 | rules: 42 | - apiGroups: 43 | - "" 44 | resources: 45 | - endpoints 46 | - pods 47 | - configmaps 48 | - secrets 49 | - services 50 | - events 51 | - persistentvolumeclaims 52 | - namespaces 53 | verbs: 54 | - create 55 | - get 56 | - list 57 | - patch 58 | - update 59 | - watch 60 | - delete 61 | - apiGroups: 62 | - batch 63 | resources: 64 | - jobs 65 | verbs: 66 | - create 67 | - get 68 | - list 69 | - patch 70 | - update 71 | - watch 72 | - delete 73 | - apiGroups: 74 | - rbac.authorization.k8s.io 75 | resources: 76 | - roles 77 | - clusterroles 78 | - rolebindings 79 | - clusterrolebindings 80 | verbs: 81 | - create 82 | - get 83 | - list 84 | - delete 85 | - apiGroups: 86 | - apps 87 | resources: 88 | - deployments 89 | - replicasets 90 | - statefulsets 91 | verbs: 92 | - create 93 | - get 94 | - list 95 | - patch 96 | - update 97 | - watch 98 | - delete 99 | - apiGroups: 100 | - policy 101 | resources: 102 | - poddisruptionbudgets 103 | verbs: 104 | - create 105 | - get 106 | - list 107 | - patch 108 | - update 109 | - watch 110 | - delete 111 | - apiGroups: 112 | - apps 113 | resourceNames: 114 | - redis-operator 115 | resources: 116 | - deployments/finalizers 117 | verbs: 118 | - update 119 | - apiGroups: 120 | - redis.kun 121 | resources: 122 | - '*' 123 | verbs: 124 | - create 125 | - delete 126 | - deletecollection 127 | - get 128 | - list 129 | - patch 130 | - update 131 | - watch 132 | --- 133 | kind: ClusterRoleBinding 134 | apiVersion: rbac.authorization.k8s.io/v1 135 | metadata: 136 | name: e2e-cluster-role~default 137 | subjects: 138 | - kind: ServiceAccount 139 | name: e2e-sa 140 | namespace: default 141 | roleRef: 142 | kind: ClusterRole 143 | name: e2e-cluster-role 144 | apiGroup: rbac.authorization.k8s.io 145 | --- 146 | apiVersion: v1 147 | kind: ServiceAccount 148 | metadata: 149 | name: e2e-sa 150 | namespace: default 151 | -------------------------------------------------------------------------------- /deploy/example/backup-restore/redisclusterbackup_cr.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | AWS_ACCESS_KEY_ID: dGVzdA== 4 | AWS_SECRET_ACCESS_KEY: dGVzdA== 5 | kind: Secret 6 | metadata: 7 | name: s3-secret 8 | type: Opaque 9 | --- 10 | apiVersion: redis.kun/v1alpha1 11 | kind: RedisClusterBackup 12 | metadata: 13 | annotations: 14 | # if your operator run as cluster-scoped, add this annotations 15 | redis.kun/scope: cluster-scoped 16 | name: example-redisclusterbackup 17 | spec: 18 | image: redis-tools:5.0.4 19 | redisClusterName: example-distributedrediscluster 20 | storageSecretName: s3-secret 21 | # Replace this with the s3 info 22 | s3: 23 | endpoint: REPLACE_ENDPOINT 24 | bucket: REPLACE_BUCKET 25 | -------------------------------------------------------------------------------- /deploy/example/backup-restore/redisclusterbackup_topvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: test-backup 5 | spec: 6 | accessModes: 7 | - ReadWriteMany 8 | resources: 9 | requests: 10 | storage: 10Gi 11 | storageClassName: {storageClassName} 12 | volumeMode: Filesystem 13 | --- 14 | 15 | apiVersion: redis.kun/v1alpha1 16 | kind: RedisClusterBackup 17 | metadata: 18 | name: example-redisclusterbackup 19 | annotations: 20 | redis.kun/scope: cluster-scoped 21 | spec: 22 | image: uhub.service.ucloud.cn/operator/redis-tools:5.0.4 23 | # on same namespace 24 | redisClusterName: test 25 | local: 26 | mountPath: /back 27 | persistentVolumeClaim: 28 | claimName: test-backup 29 | -------------------------------------------------------------------------------- /deploy/example/backup-restore/restore.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: redis.kun/v1alpha1 2 | kind: DistributedRedisCluster 3 | metadata: 4 | annotations: 5 | # if your operator run as cluster-scoped, add this annotations 6 | redis.kun/scope: cluster-scoped 7 | name: example-restore 8 | spec: 9 | init: 10 | backupSource: 11 | name: example-redisclusterbackup 12 | namespace: default 13 | -------------------------------------------------------------------------------- /deploy/example/backup-restore/restore_frompvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: redis.kun/v1alpha1 2 | kind: DistributedRedisCluster 3 | metadata: 4 | name: restore 5 | spec: 6 | clusterReplicas: 1 7 | config: 8 | appendfsync: everysec 9 | appendonly: "yes" 10 | auto-aof-rewrite-min-size: 64mb 11 | auto-aof-rewrite-percentage: "100" 12 | cluster-node-timeout: "5000" 13 | loglevel: verbose 14 | maxclients: "1000" 15 | maxmemory: "0" 16 | notify-keyspace-events: "" 17 | rdbcompression: "yes" 18 | save: 900 1 300 10 19 | stop-writes-on-bgsave-error: "yes" 20 | tcp-keepalive: "0" 21 | timeout: "0" 22 | image: redis:5.0.4-alpine 23 | masterSize: 3 24 | resources: 25 | limits: 26 | cpu: 400m 27 | memory: 300Mi 28 | requests: 29 | cpu: 400m 30 | memory: 300Mi 31 | storage: 32 | class: {storageClassName} 33 | size: 10Gi 34 | type: persistent-claim 35 | init: 36 | backupSource: 37 | name: example-redisclusterbackup 38 | namespace: default 39 | -------------------------------------------------------------------------------- /deploy/example/custom-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: redis.kun/v1alpha1 2 | kind: DistributedRedisCluster 3 | metadata: 4 | annotations: 5 | # if your operator run as cluster-scoped, add this annotations 6 | redis.kun/scope: cluster-scoped 7 | name: example-distributedrediscluster 8 | spec: 9 | image: redis:5.0.4-alpine 10 | masterSize: 3 11 | clusterReplicas: 1 12 | config: 13 | activerehashing: "yes" 14 | appendfsync: everysec 15 | appendonly: "yes" 16 | hash-max-ziplist-entries: "512" 17 | hash-max-ziplist-value: "64" 18 | hll-sparse-max-bytes: "3000" 19 | list-compress-depth: "0" 20 | maxmemory-policy: noeviction 21 | maxmemory-samples: "5" 22 | no-appendfsync-on-rewrite: "no" 23 | notify-keyspace-events: "" 24 | set-max-intset-entries: "512" 25 | slowlog-log-slower-than: "10000" 26 | slowlog-max-len: "128" 27 | stop-writes-on-bgsave-error: "yes" 28 | tcp-keepalive: "0" 29 | timeout: "0" 30 | zset-max-ziplist-entries: "128" 31 | zset-max-ziplist-value: "64" 32 | -------------------------------------------------------------------------------- /deploy/example/custom-password.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | annotations: 5 | # if your operator run as cluster-scoped, add this annotations 6 | redis.kun/scope: cluster-scoped 7 | name: mysecret 8 | type: Opaque 9 | data: 10 | password: MWYyZDFlMmU2N2Rm 11 | --- 12 | apiVersion: redis.kun/v1alpha1 13 | kind: DistributedRedisCluster 14 | metadata: 15 | name: example-distributedrediscluster 16 | spec: 17 | image: redis:5.0.4-alpine 18 | masterSize: 3 19 | clusterReplicas: 1 20 | passwordSecret: 21 | name: mysecret 22 | resources: 23 | limits: 24 | cpu: 200m 25 | memory: 200Mi 26 | requests: 27 | cpu: 200m 28 | memory: 100Mi 29 | -------------------------------------------------------------------------------- /deploy/example/custom-resources.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: redis.kun/v1alpha1 2 | kind: DistributedRedisCluster 3 | metadata: 4 | annotations: 5 | # if your operator run as cluster-scoped, add this annotations 6 | redis.kun/scope: cluster-scoped 7 | name: example-distributedrediscluster 8 | spec: 9 | image: redis:5.0.4-alpine 10 | masterSize: 3 11 | clusterReplicas: 1 12 | resources: 13 | limits: 14 | cpu: 200m 15 | memory: 200Mi 16 | requests: 17 | cpu: 200m 18 | memory: 100Mi -------------------------------------------------------------------------------- /deploy/example/custom-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: redis.kun/v1alpha1 2 | kind: DistributedRedisCluster 3 | metadata: 4 | annotations: 5 | # if your operator run as cluster-scoped, add this annotations 6 | redis.kun/scope: cluster-scoped 7 | name: example-distributedrediscluster 8 | spec: 9 | image: redis:5.0.4-alpine 10 | masterSize: 3 11 | clusterReplicas: 1 12 | serviceName: redis-svc -------------------------------------------------------------------------------- /deploy/example/persistent.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: redis.kun/v1alpha1 2 | kind: DistributedRedisCluster 3 | metadata: 4 | annotations: 5 | # if your operator run as cluster-scoped, add this annotations 6 | redis.kun/scope: cluster-scoped 7 | name: example-distributedrediscluster 8 | spec: 9 | image: redis:5.0.4-alpine 10 | masterSize: 3 11 | clusterReplicas: 1 12 | storage: 13 | type: persistent-claim 14 | size: 1Gi 15 | class: csi-rbd-sc 16 | deleteClaim: true -------------------------------------------------------------------------------- /deploy/example/prometheus-exporter.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: redis.kun/v1alpha1 2 | kind: DistributedRedisCluster 3 | metadata: 4 | annotations: 5 | # if your operator run as cluster-scoped, add this annotations 6 | redis.kun/scope: cluster-scoped 7 | name: example-distributedrediscluster 8 | spec: 9 | image: redis:5.0.4-alpine 10 | masterSize: 3 11 | clusterReplicas: 1 12 | monitor: 13 | image: oliver006/redis_exporter -------------------------------------------------------------------------------- /deploy/example/redis.kun_v1alpha1_distributedrediscluster_cr.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: redis.kun/v1alpha1 2 | kind: DistributedRedisCluster 3 | metadata: 4 | annotations: 5 | # if your operator run as cluster-scoped, add this annotations 6 | redis.kun/scope: cluster-scoped 7 | name: example-distributedrediscluster 8 | spec: 9 | # Add fields here 10 | masterSize: 3 11 | clusterReplicas: 1 12 | image: redis:5.0.4-alpine 13 | -------------------------------------------------------------------------------- /deploy/example/securitycontext.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: redis.kun/v1alpha1 2 | kind: DistributedRedisCluster 3 | metadata: 4 | annotations: 5 | # if your operator run as cluster-scoped, add this annotations 6 | redis.kun/scope: cluster-scoped 7 | name: example-distributedrediscluster 8 | spec: 9 | image: redis:5.0.4-alpine 10 | masterSize: 3 11 | clusterReplicas: 1 12 | securityContext: 13 | runAsUser: 1101 14 | runAsGroup: 1101 15 | fsGroup: 1101 16 | supplementalGroups: [1101] 17 | containerSecurityContext: 18 | allowPrivilegeEscalation: false 19 | capabilities: 20 | drop: 21 | - ALL 22 | -------------------------------------------------------------------------------- /deploy/namespace/operator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: redis-cluster-operator 5 | spec: 6 | replicas: 1 7 | selector: 8 | matchLabels: 9 | name: redis-cluster-operator 10 | template: 11 | metadata: 12 | labels: 13 | name: redis-cluster-operator 14 | spec: 15 | serviceAccountName: redis-cluster-operator 16 | containers: 17 | - name: redis-cluster-operator 18 | # Replace this with the built image name 19 | image: fishu/redis-cluster-operator:latest 20 | command: 21 | - redis-cluster-operator 22 | args: 23 | - --rename-command-path=/etc/redisconf 24 | - --rename-command-file=redis.conf 25 | imagePullPolicy: Always 26 | env: 27 | - name: WATCH_NAMESPACE 28 | valueFrom: 29 | fieldRef: 30 | fieldPath: metadata.namespace 31 | - name: POD_NAME 32 | valueFrom: 33 | fieldRef: 34 | fieldPath: metadata.name 35 | - name: OPERATOR_NAME 36 | value: "redis-cluster-operator" 37 | volumeMounts: 38 | - name: redisconf 39 | mountPath: /etc/redisconf 40 | volumes: 41 | - name: redisconf 42 | configMap: 43 | name: redis-admin 44 | --- 45 | apiVersion: v1 46 | kind: ConfigMap 47 | metadata: 48 | name: redis-admin 49 | data: 50 | redis.conf: |- 51 | rename-command CONFIG lni07z1p 52 | rename-command BGSAVE pp14qluk 53 | rename-command DEBUG 8a4insyv 54 | rename-command SAVE 6on30p6z 55 | rename-command SHUTDOWN dvui0opr 56 | rename-command SLAVEOF xwxvcw36 57 | rename-command BGREWRITEAOF www07fko 58 | -------------------------------------------------------------------------------- /deploy/namespace/role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: redis-cluster-operator 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - pods 10 | - secrets 11 | - endpoints 12 | - persistentvolumeclaims 13 | verbs: 14 | - get 15 | - list 16 | - watch 17 | - delete 18 | - apiGroups: 19 | - "" 20 | resources: 21 | - configmaps 22 | - pods/exec 23 | - secrets 24 | - services 25 | - events 26 | - persistentvolumeclaims 27 | verbs: 28 | - create 29 | - get 30 | - list 31 | - patch 32 | - update 33 | - watch 34 | - delete 35 | - apiGroups: 36 | - "" 37 | resources: 38 | - namespaces 39 | verbs: 40 | - get 41 | - list 42 | - watch 43 | - apiGroups: 44 | - batch 45 | resources: 46 | - jobs 47 | verbs: 48 | - create 49 | - get 50 | - list 51 | - patch 52 | - update 53 | - watch 54 | - delete 55 | - apiGroups: 56 | - apps 57 | resources: 58 | - deployments 59 | - replicasets 60 | - statefulsets 61 | verbs: 62 | - create 63 | - get 64 | - list 65 | - patch 66 | - update 67 | - watch 68 | - delete 69 | - apiGroups: 70 | - policy 71 | resources: 72 | - poddisruptionbudgets 73 | verbs: 74 | - create 75 | - get 76 | - list 77 | - patch 78 | - update 79 | - watch 80 | - delete 81 | - apiGroups: 82 | - apps 83 | resourceNames: 84 | - redis-operator 85 | resources: 86 | - deployments/finalizers 87 | verbs: 88 | - update 89 | - apiGroups: 90 | - redis.kun 91 | resources: 92 | - '*' 93 | - redisclusterbackups 94 | verbs: 95 | - delete 96 | - deletecollection 97 | - get 98 | - list 99 | - patch 100 | - update 101 | - watch -------------------------------------------------------------------------------- /deploy/namespace/role_binding.yaml: -------------------------------------------------------------------------------- 1 | kind: RoleBinding 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: redis-cluster-operator 5 | subjects: 6 | - kind: ServiceAccount 7 | name: redis-cluster-operator 8 | roleRef: 9 | kind: Role 10 | name: redis-cluster-operator 11 | apiGroup: rbac.authorization.k8s.io 12 | -------------------------------------------------------------------------------- /deploy/service_account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: redis-cluster-operator 5 | -------------------------------------------------------------------------------- /doc/design/zh/cluster_backup_and_restore.md: -------------------------------------------------------------------------------- 1 | # 备份和恢复 2 | 3 | 目前只支持备份到 ceph S3对象存储及本地 pvc 中。 4 | 5 | 备份开始时使用 redis-cli 同步 Master 的 RDB到本地后再使用 [Rclone](https://rclone.org/) 将 6 | RDB 文件传输到对象存储或者 pvc 中,恢复时先使用 Rclone 从之前备份的位置同步备份到本地后,再启动 Redis 7 | 服务。备份恢复的工具类镜像中预置了 redis-cli 和 Rclone,参见 [Dockerfile](hack/docker/redis-tools/Dockerfile)。 8 | 9 | ## 备份 10 | 11 | Operator 会 Watch 集群所有的 RedisClusterBackup 实例变化,当用户提交一个备份的 CR 之后,Operator 会: 12 | 13 | 1. 创建一个 Kubernetes batch job,根据 Redis 集群分布数,在 job 中注入相同数量的 container,每个 container 向一个 Master 发起备份请求,设置开始时间及备份状态。 14 | 2. 同步完成 RDB 文件后,将 Redis 集群每个分片的 RDB 文件和 cluster-config-file(记录节点slots信息) 上传到对象存储,同时将 CR 的状态置为 Succeeded,设置完成时间。redis集群备份的快照和节点元数据信息,上传到对象存储后,有统一的路径,当前的规则是:redis/{Namespace}/{RedisClusterName}/{StartTime}/{BackupName}-x 15 | 比如一个备份一个在 default 命名空间的名为 redis-cluster-test 的 Redis 集群(集群含有三个 master 节点),备份名为 backup , 备份开始时间为 20191101083020,最后会有如下对象存储路径: 16 | 17 | ``` 18 | redis/default/redis-cluster-test/20191101083020/backup-0 19 | redis/default/redis-cluster-test/20191101083020/backup-1 20 | redis/default/redis-cluster-test/20191101083020/backup-2 21 | ``` 22 | 23 | 每个master节点备份的快照和节点元数据信息会存储在上述路径,用户可以到相应的 bucket 中查看。 24 | 25 | ## 从备份恢复 26 | 27 | 从备份恢复和创建步骤不同,分为两阶段,第一阶段同步数据,从快照启动 Master 节点;第二阶段启动 Slave 节点。 28 | 29 | 1. 设置`DistributedRedisCluster.Status.Restore.Phase=Running`,根据备份信息,创建与备份集群切片数相同的 Statefulset, 30 | 设置 Replicas 为 1,只启动 master 节点,注入 init container,init container 的作用是拉取对象存储上的快照数据。 31 | 2. 等待第1步同步数据完成,master 启动完成后,设置`DistributedRedisCluster.Status.Restore.Phase=Restart`,移除 32 | init container 后等待节点重启。 33 | 3. 第2步完成之后,增加每个分片的副本数调大 Statefulset 的 Replicas,拉起 Slave 节点,设置`DistributedRedisCluster.Status.Restore.Phase=Succeeded`, 34 | 等待所有 Pod 节点状态变为 Runing 之后,设置每个 Statefulset 的 Slave 节点 replicate Master 节点,加入集群。 35 | -------------------------------------------------------------------------------- /doc/design/zh/cluster_create.md: -------------------------------------------------------------------------------- 1 | # 创建集群 2 | 3 | **需要注意的是,只有配置了持久化存储(PVC)的 CR 实例可以故障自愈** 4 | 5 | 如用户希望创建 3 分片的集群,每个分片一主一从: 6 | 7 | ``` 8 | apiVersion: redis.kun/v1alpha1 9 | kind: DistributedRedisCluster 10 | metadata: 11 | annotations: 12 | # if your operator run as cluster-scoped, add this annotations 13 | redis.kun/scope: cluster-scoped 14 | name: example-distributedrediscluster 15 | spec: 16 | masterSize: 3 # 三分片 17 | clusterReplicas: 1 # 每个主节点一个从节点 18 | image: redis:5.0.4-alpine 19 | storage: 20 | type: persistent-claim 21 | size: 1Gi 22 | class: {StorageClassName} 23 | deleteClaim: true # 删除 Redis Cluster 时,自动清理 pvc 24 | ``` 25 | 26 | Operator Watch 到新的 Redis Cluster CR 实例被创建时,Operator 会执行以下操作: 27 | 28 | 1. 为每个分片创建一个 Statefulset,每个 Statefulset Name 后缀以 0,1,2... 递增,设置 Statefulset Replicas 为副本数+1(Master),每个 Statefulset 代表着一个分片及其所有副本,所以将创建 3 个 Statefulset,每个 Statefulset 的 Replicas 为 3,每个 Pod 代表一个 Redis 实例。 29 | 2. 等待所有 Pod 状态变为 Ready 且每个节点相互识别后,Operator 会在每个 Statefulset 的 Pod 中挑选一个作为 Master 节点,其余节点为该 Master 的 Slave,并尽可能保证所有 Master 节点不在同一个 k8s node。 30 | 3. 为 Master 分配 Slots,将 Slave 加入集群,从而组建集群。 31 | 4. 为每一个 Statefulset 创建一个 Headless Service,为整个集群创建一个 Service 指向所有的 pod。 32 | 33 | ## 亲和性和反亲和性 34 | 35 | 为保证 Redis CLuster 的高可用性,CRD 中设计了 `affinity` 及 `requiredAntiAffinity` (bool) 字段来做 Redis 节点间的打散: 36 | 37 | * 当 affinity 和 requiredAntiAffinity 都未设置时,Operator 默认设置 Statefulset 管理的一组 pod 及 所有 pod 尽量反亲和; 38 | * 当用户只设置 requiredAntiAffinity 字段的时,Operator 会设置 Statefulset 管理的一组 pod 强制反亲和,所有 pod 尽量反亲和; 39 | * 当用户设置了 affinity 时,Statefulset 直接继承 affinity,Operator 不做额外设置。 -------------------------------------------------------------------------------- /doc/design/zh/cluster_scaling.md: -------------------------------------------------------------------------------- 1 | # Redis Cluster 横向扩容和缩容 2 | 3 | ## 扩容 4 | 5 | 当 Operator Watch 到一个 Redis Cluster 需要扩容时,Operator 会: 6 | 7 | 1. 新建 Statefulset。 8 | 2. 等待新的 Pod 状态变为 Ready 且每个节点相互识别后后从新的 Statefulset 选出 Master 节点,其余为 Slave。 9 | 3. Operator 调整 Slot,将其他 Statefulset 的 Master 节点的 Slots 调度到新的 Master,尽可能使其平均分布到所有的 Redis 节点上。 10 | 11 | 以扩容一个节点为例。 12 | numSlots(迁移节点数): 表示扩容时分配到新节点的 slot 数量 13 | numSlots=16384/集群节点。 14 | 集群现有的每个 Master 节点待迁移slot数计算公式为: 15 | 待迁移slot数量 * (该源节点负责的slot数量 / slot总数) 16 | 17 | 当前 Master Slots 分布: 18 | ``` 19 | Master[0] -> Slots 0 - 5460 slots=5461 20 | Master[1] -> Slots 5461 - 10922 slots=5462 21 | Master[2] -> Slots 10923 - 16383 slots=5461 22 | ``` 23 | 加入节点 Master[3],numSlots=16384/4=4096 24 | 那么分配到集群现有每个 Master 节点的待迁移 migratingSlots 数为: 25 | ``` 26 | Master[0] migratingSlots = 4096 * (5461 / 16384) = 1365.25=1365 27 | Master[1] migratingSlots = 4096 * (5462 / 16384) = 1365.5=1366 28 | Master[2] migratingSlots = 4096 * (5461 / 16384) = 1365.25=1365 29 | ``` 30 | 最终: 31 | ``` 32 | Master[0] -> Slots 1365-5460 slots=4096 33 | Master[1] -> Slots 6827-10922 slots=4096 34 | Master[2] -> Slots 12288-16383 slots=4096 35 | Master[3] -> Slots 0-1364 5461-6826 10923-12287 slots=4096 36 | ``` 37 | ## 缩容 38 | 39 | 当 Operator Watch 到一个 Redis Cluster 需要缩容时,Operator 会: 40 | 41 | 1. 将待删除 Statefulset 的 Master 节点所有的 Slots 迁移到其他 Master。 42 | 2. 从后缀名最大的 Statefulset 的开始,将其所有的节点踢出集群。 43 | 3. 删除 Statefulset 及相关资源。 -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/ucloud/redis-cluster-operator 2 | 3 | require ( 4 | github.com/appscode/go v0.0.0-20191006073906-e3d193d493fc 5 | github.com/appscode/osm v0.12.0 6 | github.com/aws/aws-sdk-go v1.20.20 7 | github.com/go-logr/logr v0.1.0 8 | github.com/go-openapi/spec v0.19.2 9 | github.com/go-redis/redis v6.15.7+incompatible 10 | github.com/mediocregopher/radix.v2 v0.0.0-20181115013041-b67df6e626f9 11 | github.com/onsi/ginkgo v1.8.0 12 | github.com/onsi/gomega v1.5.0 13 | github.com/operator-framework/operator-sdk v0.13.0 14 | github.com/pkg/errors v0.8.1 15 | github.com/satori/go.uuid v1.2.0 16 | github.com/spf13/pflag v1.0.5 17 | golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e 18 | gomodules.xyz/stow v0.2.3 19 | k8s.io/api v0.0.0 20 | k8s.io/apimachinery v0.0.0 21 | k8s.io/client-go v12.0.0+incompatible 22 | k8s.io/kube-openapi v0.0.0-20190918143330-0270cf2f1c1d 23 | k8s.io/kubernetes v1.16.2 24 | kmodules.xyz/constants v0.0.0-20191024095500-cd4313df4aa6 25 | kmodules.xyz/objectstore-api v0.0.0-20191014210450-ac380fa650a3 26 | sigs.k8s.io/controller-runtime v0.4.0 27 | ) 28 | 29 | // Pinned to kubernetes-1.16.2 30 | replace ( 31 | k8s.io/api => k8s.io/api v0.0.0-20191016110408-35e52d86657a 32 | k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.0.0-20191016113550-5357c4baaf65 33 | k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20191004115801-a2eda9f80ab8 34 | k8s.io/apiserver => k8s.io/apiserver v0.0.0-20191016112112-5190913f932d 35 | k8s.io/cli-runtime => k8s.io/cli-runtime v0.0.0-20191016114015-74ad18325ed5 36 | k8s.io/client-go => k8s.io/client-go v0.0.0-20191016111102-bec269661e48 37 | k8s.io/cloud-provider => k8s.io/cloud-provider v0.0.0-20191016115326-20453efc2458 38 | k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.0.0-20191016115129-c07a134afb42 39 | k8s.io/code-generator => k8s.io/code-generator v0.0.0-20191004115455-8e001e5d1894 40 | k8s.io/component-base => k8s.io/component-base v0.0.0-20191016111319-039242c015a9 41 | k8s.io/cri-api => k8s.io/cri-api v0.0.0-20190828162817-608eb1dad4ac 42 | k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.0.0-20191016115521-756ffa5af0bd 43 | k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.0.0-20191016112429-9587704a8ad4 44 | k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.0.0-20191016114939-2b2b218dc1df 45 | k8s.io/kube-proxy => k8s.io/kube-proxy v0.0.0-20191016114407-2e83b6f20229 46 | k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.0.0-20191016114748-65049c67a58b 47 | k8s.io/kubectl => k8s.io/kubectl v0.0.0-20191016120415-2ed914427d51 48 | k8s.io/kubelet => k8s.io/kubelet v0.0.0-20191016114556-7841ed97f1b2 49 | k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.0.0-20191016115753-cf0698c3a16b 50 | k8s.io/metrics => k8s.io/metrics v0.0.0-20191016113814-3b1a734dba6e 51 | k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.0.0-20191016112829-06bb3c9d77c9 52 | ) 53 | 54 | replace ( 55 | github.com/Azure/go-autorest => github.com/Azure/go-autorest v13.3.1+incompatible 56 | github.com/go-check/check => github.com/go-check/check v0.0.0-20180628173108-788fd7840127 57 | ) 58 | 59 | go 1.13 60 | -------------------------------------------------------------------------------- /hack/docker/redis-tools/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM redis:5.0.4 2 | 3 | RUN set -x \ 4 | && apt-get update \ 5 | && apt-get install -y --no-install-recommends \ 6 | ca-certificates \ 7 | netcat \ 8 | zip \ 9 | && rm -rf /var/lib/apt/lists/* /usr/share/doc /usr/share/man /tmp/* 10 | 11 | COPY rclone /usr/local/bin/rclone 12 | COPY redis-tools.sh /usr/local/bin/redis-tools.sh 13 | RUN chmod +x /usr/local/bin/redis-tools.sh 14 | 15 | ENTRYPOINT ["redis-tools.sh"] 16 | -------------------------------------------------------------------------------- /hack/docker/redis-tools/make.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # ref: https://github.com/kubedb/mysql/blob/master/hack 4 | 5 | set -xeou pipefail 6 | 7 | GOPATH=$(go env GOPATH) 8 | REPO_ROOT=$GOPATH/src/github.com/ucloud/redis-cluster-operator 9 | 10 | source "$REPO_ROOT/hack/lib/lib.sh" 11 | source "$REPO_ROOT/hack/lib/image.sh" 12 | 13 | DOCKER_REGISTRY=${DOCKER_REGISTRY:-operator} 14 | 15 | IMG=redis-tools 16 | 17 | DB_VERSION=5.0.4 18 | TAG="$DB_VERSION" 19 | 20 | RCLONE_VER=${RCLONE_VER:-v1.50.2} 21 | 22 | DIST=$REPO_ROOT/dist 23 | mkdir -p $DIST 24 | 25 | build() { 26 | pushd "$REPO_ROOT/hack/docker/redis-tools" 27 | 28 | if [ ! -f "rclone" ]; then 29 | # Download rclone 30 | wget https://downloads.rclone.org/"${RCLONE_VER}"/rclone-"${RCLONE_VER}"-linux-amd64.zip 31 | unzip rclone-"${RCLONE_VER}"-linux-amd64.zip 32 | chmod +x rclone-"${RCLONE_VER}"-linux-amd64/rclone 33 | mv rclone-"${RCLONE_VER}"-linux-amd64/rclone rclone 34 | fi 35 | 36 | local cmd="docker build --pull -t $DOCKER_REGISTRY/$IMG:$TAG ." 37 | echo $cmd; $cmd 38 | 39 | rm -rf rclone-"${RCLONE_VER}"-linux-amd64* 40 | rm rclone 41 | popd 42 | } 43 | 44 | # shellcheck disable=SC2068 45 | binary_repo $@ 46 | -------------------------------------------------------------------------------- /hack/docker/redis-tools/redis-tools.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eou pipefail 3 | 4 | # ref: https://github.com/kubedb/mysql/blob/master/hack/docker/mysql-tools/5.7.25/mysql-tools.sh 5 | 6 | show_help() { 7 | echo "redis-tools.sh - run tools" 8 | echo " " 9 | echo "redis-tools.sh COMMAND [options]" 10 | echo " " 11 | echo "options:" 12 | echo "-h, --help show brief help" 13 | echo " --data-dir=DIR path to directory holding db data (default: /var/data)" 14 | echo " --host=HOST database host" 15 | echo " --user=USERNAME database username" 16 | echo " --bucket=BUCKET name of bucket" 17 | echo " --location=LOCATION location of backend (:)" 18 | echo " --folder=FOLDER name of folder in bucket" 19 | echo " --snapshot=SNAPSHOT name of snapshot" 20 | } 21 | 22 | RETVAL=0 23 | DEBUG=${DEBUG:-} 24 | REDIS_HOST=${REDIS_HOST:-} 25 | REDIS_PORT=${REDIS_PORT:-6379} 26 | REDIS_USER=${REDIS_USER:-} 27 | REDIS_PASSWORD=${REDIS_PASSWORD:-} 28 | REDIS_BUCKET=${REDIS_BUCKET:-} 29 | REDIS_LOCATION=${REDIS_LOCATION:-} 30 | REDIS_FOLDER=${REDIS_FOLDER:-} 31 | REDIS_SNAPSHOT=${REDIS_SNAPSHOT:-} 32 | REDIS_DATA_DIR=${REDIS_DATA_DIR:-/data} 33 | REDIS_RESTORE_SUCCEEDED=${REDIS_RESTORE_SUCCEEDED:-0} 34 | RCLONE_CONFIG_FILE=/etc/rclone/config 35 | 36 | op=$1 37 | shift 38 | 39 | while test $# -gt 0; do 40 | case "$1" in 41 | -h | --help) 42 | show_help 43 | exit 0 44 | ;; 45 | --data-dir*) 46 | export REDIS_DATA_DIR=$(echo $1 | sed -e 's/^[^=]*=//g') 47 | shift 48 | ;; 49 | --host*) 50 | export REDIS_HOST=$(echo $1 | sed -e 's/^[^=]*=//g') 51 | shift 52 | ;; 53 | --user*) 54 | export REDIS_USER=$(echo $1 | sed -e 's/^[^=]*=//g') 55 | shift 56 | ;; 57 | --bucket*) 58 | export REDIS_BUCKET=$(echo $1 | sed -e 's/^[^=]*=//g') 59 | shift 60 | ;; 61 | --location*) 62 | export REDIS_LOCATION=$(echo $1 | sed -e 's/^[^=]*=//g') 63 | shift 64 | ;; 65 | --folder*) 66 | export REDIS_FOLDER=$(echo $1 | sed -e 's/^[^=]*=//g') 67 | shift 68 | ;; 69 | --snapshot*) 70 | export REDIS_SNAPSHOT=$(echo $1 | sed -e 's/^[^=]*=//g') 71 | shift 72 | ;; 73 | --) 74 | shift 75 | break 76 | ;; 77 | *) 78 | show_help 79 | exit 1 80 | ;; 81 | esac 82 | done 83 | 84 | if [ -n "$DEBUG" ]; then 85 | env | sort | grep REDIS_* 86 | echo "" 87 | fi 88 | 89 | # Wait for redis to start 90 | # ref: http://unix.stackexchange.com/a/5279 91 | #while ! nc -q 1 "${REDIS_HOST}" "${REDIS_PORT}" nodes.conf 113 | pwd 114 | ls -lh "$SOURCE_DIR" 115 | echo "Uploading dump file to the backend......." 116 | echo "From $SOURCE_DIR" 117 | rclone --config "$RCLONE_CONFIG_FILE" copy "$SOURCE_DIR" "$REDIS_LOCATION"/"$REDIS_FOLDER/$REDIS_SNAPSHOT" -v 118 | 119 | echo "Backup successful" 120 | ;; 121 | restore) 122 | echo "Pulling backup file from the backend" 123 | if [ "${REDIS_RESTORE_SUCCEEDED}" == "1" ];then 124 | echo "Has been restored successfully" 125 | exit 0 126 | fi 127 | index=$(echo "${POD_NAME}" | awk -F- '{print $(NF-1)}') 128 | REDIS_SNAPSHOT=${REDIS_SNAPSHOT}-${index} 129 | SOURCE_SNAPSHOT="$REDIS_LOCATION"/"$REDIS_FOLDER/$REDIS_SNAPSHOT" 130 | echo "From $SOURCE_SNAPSHOT" 131 | rclone --config "$RCLONE_CONFIG_FILE" sync "$SOURCE_SNAPSHOT" "$REDIS_DATA_DIR" -v 132 | 133 | echo "Recovery successful" 134 | ;; 135 | *) 136 | (10) 137 | echo $"Unknown op!" 138 | RETVAL=1 139 | ;; 140 | esac 141 | exit "$RETVAL" 142 | -------------------------------------------------------------------------------- /hack/e2e.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | readonly REPO_PATH=github.com/ucloud/redis-cluster-operator 6 | 7 | if [[ -z ${STORAGECLASSNAME} ]]; then 8 | echo "env STORAGECLASSNAME not set" 9 | exit 1 10 | fi 11 | 12 | if [[ -z ${CLUSTER_DOMAIN} ]]; then 13 | echo "env CLUSTER_DOMAIN not set" 14 | exit 1 15 | fi 16 | 17 | if [[ -z ${GINKGO_SKIP} ]]; then 18 | export GINKGO_SKIP="" 19 | fi 20 | 21 | if [[ -z $TEST_TIMEOUT ]]; then 22 | echo "env $TEST_TIMEOUT not set, auto set to 60m" 23 | export TEST_TIMEOUT=60m 24 | fi 25 | 26 | echo "run e2e tests..." 27 | e2ecmd="cd ${GOPATH}/src/${REPO_PATH} && ginkgo -v --mod=vendor --failFast --skip=${GINKGO_SKIP} --timeout=${TEST_TIMEOUT} test/e2e/... -- --rename-command-path=${GOPATH}/src/${REPO_PATH}/test/e2e --rename-command-file=rename.conf" 28 | echo "${e2ecmd}" 29 | eval "${e2ecmd}" 30 | 31 | 32 | -------------------------------------------------------------------------------- /hack/lib/image.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # ref: https://github.com/kubedb/mysql/blob/master/hack 4 | 5 | export DOCKER_REGISTRY=${DOCKER_REGISTRY:-operator} 6 | source $(dirname "${BASH_SOURCE}")/lib.sh 7 | 8 | # override this one if you need to change push & pull 9 | docker_push() { 10 | hub_canary 11 | } 12 | 13 | docker_pull() { 14 | hub_pull 15 | } 16 | 17 | source_repo() { 18 | RETVAL=0 19 | 20 | if [ $# -eq 0 ]; then 21 | cmd=${DEFAULT_COMMAND:-build} 22 | $cmd 23 | exit $RETVAL 24 | fi 25 | 26 | case "$1" in 27 | build) 28 | build 29 | ;; 30 | build_binary) 31 | build_binary 32 | ;; 33 | build_docker) 34 | build_docker 35 | ;; 36 | clean) 37 | clean 38 | ;; 39 | push) 40 | docker_push 41 | ;; 42 | pull) 43 | docker_pull 44 | ;; 45 | release) 46 | docker_release 47 | ;; 48 | check) 49 | docker_check 50 | ;; 51 | run) 52 | docker_run 53 | ;; 54 | sh) 55 | docker_sh 56 | ;; 57 | rm) 58 | docker_rm 59 | ;; 60 | rmi) 61 | docker_rmi 62 | ;; 63 | *) 64 | (10) 65 | echo $"Usage: $0 {build|build_binary|build_docker|clean|push|pull|release|check|sh|rm|rmi}" 66 | RETVAL=1 67 | ;; 68 | esac 69 | exit $RETVAL 70 | } 71 | 72 | binary_repo() { 73 | RETVAL=0 74 | 75 | if [ $# -eq 0 ]; then 76 | cmd=${DEFAULT_COMMAND:-build} 77 | $cmd 78 | exit $RETVAL 79 | fi 80 | 81 | case "$1" in 82 | build) 83 | build 84 | ;; 85 | clean) 86 | clean 87 | ;; 88 | push) 89 | docker_push 90 | ;; 91 | pull) 92 | docker_pull 93 | ;; 94 | release) 95 | docker_release 96 | ;; 97 | check) 98 | docker_check 99 | ;; 100 | run) 101 | docker_run 102 | ;; 103 | sh) 104 | docker_sh 105 | ;; 106 | rm) 107 | docker_rm 108 | ;; 109 | rmi) 110 | docker_rmi 111 | ;; 112 | *) 113 | (10) 114 | echo $"Usage: $0 {build|clean|push|pull|release|check|sh|rm|rmi}" 115 | RETVAL=1 116 | ;; 117 | esac 118 | exit $RETVAL 119 | } 120 | -------------------------------------------------------------------------------- /hack/webhook/README.md: -------------------------------------------------------------------------------- 1 | # ValidatingWebhook for distributedredisclusters 2 | 3 | ## Prerequisites 4 | 5 | Kubernetes with the `admissionregistration.k8s.io/v1beta1` API enabled. Verify that by the following command: 6 | ``` 7 | kubectl api-versions | grep admissionregistration.k8s.io/v1beta1 8 | ``` 9 | The result should be: 10 | ``` 11 | admissionregistration.k8s.io/v1beta1 12 | ``` 13 | 14 | In addition, the `MutatingAdmissionWebhook` and `ValidatingAdmissionWebhook` admission controllers should be added and listed in the correct order in the admission-control flag of kube-apiserver. 15 | 16 | ## Deploy 17 | 18 | 1. Create a signed cert/key pair and store it in a Kubernetes `secret` that will be consumed by operator deployment 19 | ``` 20 | ./create-signed-cert.sh --service drc-admission-webhook --secret drc-webhook-cert --namespace default 21 | ``` 22 | 23 | 2. Patch the `ValidatingWebhookConfiguration` by set `caBundle` with correct value from Kubernetes cluster 24 | ``` 25 | cat validatingwebhook.yaml | \ 26 | patch-ca-bundle.sh > \ 27 | validatingwebhook-ca-bundle.yaml 28 | ``` 29 | 30 | 3. Deploy resources 31 | ``` 32 | kubectl delete -f operator.yml 33 | kubectl create -f operator.yml 34 | kubectl create -f validatingwebhook-ca-bundle.yaml 35 | kubectl create -f service.yaml 36 | ``` -------------------------------------------------------------------------------- /hack/webhook/create-signed-cert.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | usage() { 6 | cat <> ${tmpdir}/csr.conf 58 | [req] 59 | req_extensions = v3_req 60 | distinguished_name = req_distinguished_name 61 | [req_distinguished_name] 62 | [ v3_req ] 63 | basicConstraints = CA:FALSE 64 | keyUsage = nonRepudiation, digitalSignature, keyEncipherment 65 | extendedKeyUsage = serverAuth 66 | subjectAltName = @alt_names 67 | [alt_names] 68 | DNS.1 = ${service} 69 | DNS.2 = ${service}.${namespace} 70 | DNS.3 = ${service}.${namespace}.svc 71 | EOF 72 | 73 | openssl genrsa -out ${tmpdir}/server-key.pem 2048 74 | openssl req -new -key ${tmpdir}/server-key.pem -subj "/CN=${service}.${namespace}.svc" -out ${tmpdir}/server.csr -config ${tmpdir}/csr.conf 75 | 76 | # clean-up any previously created CSR for our service. Ignore errors if not present. 77 | kubectl delete csr ${csrName} 2>/dev/null || true 78 | 79 | # create server cert/key CSR and send to k8s API 80 | cat <&2 115 | exit 1 116 | fi 117 | echo ${serverCert} | openssl base64 -d -A -out ${tmpdir}/server-cert.pem 118 | 119 | 120 | # create the secret with CA cert and server cert/key 121 | kubectl create secret generic ${secret} \ 122 | --from-file=tls.key=${tmpdir}/server-key.pem \ 123 | --from-file=tls.crt=${tmpdir}/server-cert.pem \ 124 | --dry-run -o yaml | 125 | kubectl -n ${namespace} apply -f - -------------------------------------------------------------------------------- /hack/webhook/operator.yml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: redis-cluster-operator 5 | spec: 6 | replicas: 1 7 | selector: 8 | matchLabels: 9 | name: redis-cluster-operator 10 | template: 11 | metadata: 12 | labels: 13 | name: redis-cluster-operator 14 | spec: 15 | serviceAccountName: redis-cluster-operator 16 | containers: 17 | - name: redis-cluster-operator 18 | image: fishu/redis-cluster-operator:latest 19 | command: 20 | - redis-cluster-operator 21 | args: 22 | - --zap-level=4 23 | - --zap-time-encoding=iso8601 24 | - --ctr-maxconcurrent=10 25 | - --rename-command-path=/etc/redisconf 26 | - --rename-command-file=redis.conf 27 | imagePullPolicy: Always 28 | resources: 29 | limits: 30 | cpu: "2" 31 | memory: 2000Mi 32 | requests: 33 | cpu: 500m 34 | memory: 500Mi 35 | env: 36 | - name: WATCH_NAMESPACE 37 | value: "" 38 | - name: ENABLE_WEBHOOKS 39 | value: "true" 40 | - name: POD_NAME 41 | valueFrom: 42 | fieldRef: 43 | fieldPath: metadata.name 44 | - name: OPERATOR_NAME 45 | value: "redis-cluster-operator" 46 | volumeMounts: 47 | - name: redisconf 48 | mountPath: /etc/redisconf 49 | - name: webhook-certs 50 | mountPath: /etc/webhook/certs 51 | readOnly: true 52 | volumes: 53 | - name: redisconf 54 | configMap: 55 | name: redis-admin 56 | - name: webhook-certs 57 | secret: 58 | secretName: drc-webhook-cert 59 | --- 60 | apiVersion: v1 61 | kind: ConfigMap 62 | metadata: 63 | name: redis-admin 64 | data: 65 | redis.conf: |- 66 | rename-command CONFIG lni07z1p 67 | rename-command BGSAVE pp14qluk 68 | rename-command DEBUG 8a4insyv 69 | rename-command SAVE 6on30p6z 70 | rename-command SHUTDOWN dvui0opr 71 | rename-command SLAVEOF xwxvcw36 72 | rename-command BGREWRITEAOF www07fko 73 | rename-command FLUSHDB dfqw3rsf 74 | rename-command FLUSHALL zvsdf2ff -------------------------------------------------------------------------------- /hack/webhook/patch-ca-bundle.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ROOT=$(cd $(dirname $0)/../../; pwd) 4 | 5 | set -o errexit 6 | set -o nounset 7 | set -o pipefail 8 | 9 | export CA_BUNDLE=$(kubectl config view --raw --minify --flatten -o jsonpath='{.clusters[].cluster.certificate-authority-data}') 10 | 11 | if command -v envsubst >/dev/null 2>&1; then 12 | envsubst 13 | else 14 | sed -e "s|\${CA_BUNDLE}|${CA_BUNDLE}|g" 15 | fi -------------------------------------------------------------------------------- /hack/webhook/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: drc-admission-webhook 5 | labels: 6 | app: admission-webhook 7 | spec: 8 | ports: 9 | - port: 443 10 | targetPort: 7443 11 | selector: 12 | name: redis-cluster-operator -------------------------------------------------------------------------------- /hack/webhook/validatingwebhook.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: admissionregistration.k8s.io/v1beta1 2 | kind: ValidatingWebhookConfiguration 3 | metadata: 4 | name: drc-validation-webhook 5 | webhooks: 6 | - name: drc.redis.kun 7 | #failurePolicy: Ignore 8 | failurePolicy: Fail 9 | clientConfig: 10 | service: 11 | name: drc-admission-webhook 12 | namespace: default 13 | path: /validate-redis-kun-v1alpha1-distributedrediscluster 14 | caBundle: ${CA_BUNDLE} 15 | rules: 16 | - operations: 17 | - CREATE 18 | - UPDATE 19 | apiGroups: 20 | - redis.kun 21 | apiVersions: 22 | - v1alpha1 23 | resources: 24 | - distributedredisclusters -------------------------------------------------------------------------------- /pkg/apis/addtoscheme_redis_v1alpha1.go: -------------------------------------------------------------------------------- 1 | package apis 2 | 3 | import ( 4 | "github.com/ucloud/redis-cluster-operator/pkg/apis/redis/v1alpha1" 5 | ) 6 | 7 | func init() { 8 | // Register the types with the Scheme so the components can map objects to GroupVersionKinds and back 9 | AddToSchemes = append(AddToSchemes, v1alpha1.SchemeBuilder.AddToScheme) 10 | } 11 | -------------------------------------------------------------------------------- /pkg/apis/apis.go: -------------------------------------------------------------------------------- 1 | package apis 2 | 3 | import ( 4 | "k8s.io/apimachinery/pkg/runtime" 5 | ) 6 | 7 | // AddToSchemes may be used to add all resources defined in the project to a Scheme 8 | var AddToSchemes runtime.SchemeBuilder 9 | 10 | // AddToScheme adds all Resources to the Scheme 11 | func AddToScheme(s *runtime.Scheme) error { 12 | return AddToSchemes.AddToScheme(s) 13 | } 14 | -------------------------------------------------------------------------------- /pkg/apis/redis/group.go: -------------------------------------------------------------------------------- 1 | // Package redis contains redis API versions. 2 | // 3 | // This file ensures Go source parsers acknowledge the redis package 4 | // and any child packages. It can be removed if any other Go source files are 5 | // added to this package. 6 | package redis 7 | -------------------------------------------------------------------------------- /pkg/apis/redis/v1alpha1/constants.go: -------------------------------------------------------------------------------- 1 | package v1alpha1 2 | 3 | type StorageType string 4 | 5 | const ( 6 | PersistentClaim StorageType = "persistent-claim" 7 | Ephemeral StorageType = "ephemeral" 8 | ) 9 | 10 | const ( 11 | OperatorName = "redis-cluster-operator" 12 | LabelManagedByKey = "managed-by" 13 | LabelNameKey = "distributed-redis-cluster" 14 | StatefulSetLabel = "statefulSet" 15 | PasswordENV = "REDIS_PASSWORD" 16 | ) 17 | 18 | // RedisRole RedisCluster Node Role type 19 | type RedisRole string 20 | 21 | const ( 22 | // RedisClusterNodeRoleMaster RedisCluster Master node role 23 | RedisClusterNodeRoleMaster RedisRole = "Master" 24 | // RedisClusterNodeRoleSlave RedisCluster Master node role 25 | RedisClusterNodeRoleSlave RedisRole = "Slave" 26 | // RedisClusterNodeRoleNone None node role 27 | RedisClusterNodeRoleNone RedisRole = "None" 28 | ) 29 | 30 | // ClusterStatus Redis Cluster status 31 | type ClusterStatus string 32 | 33 | const ( 34 | // ClusterStatusOK ClusterStatus OK 35 | ClusterStatusOK ClusterStatus = "Healthy" 36 | // ClusterStatusKO ClusterStatus KO 37 | ClusterStatusKO ClusterStatus = "Failed" 38 | // ClusterStatusCreating ClusterStatus Creating 39 | ClusterStatusCreating = "Creating" 40 | // ClusterStatusScaling ClusterStatus Scaling 41 | ClusterStatusScaling ClusterStatus = "Scaling" 42 | // ClusterStatusCalculatingRebalancing ClusterStatus Rebalancing 43 | ClusterStatusCalculatingRebalancing ClusterStatus = "Calculating Rebalancing" 44 | // ClusterStatusRebalancing ClusterStatus Rebalancing 45 | ClusterStatusRebalancing ClusterStatus = "Rebalancing" 46 | // ClusterStatusRollingUpdate ClusterStatus RollingUpdate 47 | ClusterStatusRollingUpdate ClusterStatus = "RollingUpdate" 48 | // ClusterStatusResetPassword ClusterStatus ResetPassword 49 | ClusterStatusResetPassword ClusterStatus = "ResetPassword" 50 | ) 51 | 52 | // NodesPlacementInfo Redis Nodes placement mode information 53 | type NodesPlacementInfo string 54 | 55 | const ( 56 | // NodesPlacementInfoBestEffort the cluster nodes placement is in best effort, 57 | // it means you can have 2 masters (or more) on the same VM. 58 | NodesPlacementInfoBestEffort NodesPlacementInfo = "BestEffort" 59 | // NodesPlacementInfoOptimal the cluster nodes placement is optimal, 60 | // it means on master by VM 61 | NodesPlacementInfoOptimal NodesPlacementInfo = "Optimal" 62 | ) 63 | 64 | type RestorePhase string 65 | 66 | const ( 67 | // RestorePhaseRunning used for Restore that are currently running. 68 | RestorePhaseRunning RestorePhase = "Running" 69 | // RestorePhaseRestart used for Restore that are restart master nodes. 70 | RestorePhaseRestart RestorePhase = "Restart" 71 | // RestorePhaseSucceeded used for Restore that are Succeeded. 72 | RestorePhaseSucceeded RestorePhase = "Succeeded" 73 | ) 74 | 75 | const ( 76 | DatabaseNamePrefix = "redis" 77 | 78 | GenericKey = "redis.kun" 79 | 80 | LabelClusterName = GenericKey + "/name" 81 | 82 | BackupKey = ResourceSingularBackup + "." + GenericKey 83 | LabelBackupStatus = BackupKey + "/status" 84 | 85 | AnnotationJobType = GenericKey + "/job-type" 86 | 87 | JobTypeBackup = "backup" 88 | JobTypeRestore = "restore" 89 | 90 | PrometheusExporterPortNumber = 9100 91 | PrometheusExporterTelemetryPath = "/metrics" 92 | 93 | BackupDumpDir = "/data" 94 | UtilVolumeName = "util-volume" 95 | ) 96 | -------------------------------------------------------------------------------- /pkg/apis/redis/v1alpha1/default.go: -------------------------------------------------------------------------------- 1 | package v1alpha1 2 | 3 | import ( 4 | "fmt" 5 | "path/filepath" 6 | 7 | "github.com/go-logr/logr" 8 | "k8s.io/api/core/v1" 9 | "k8s.io/apimachinery/pkg/api/resource" 10 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 11 | "k8s.io/apimachinery/pkg/runtime/schema" 12 | ) 13 | 14 | const ( 15 | minMasterSize = 3 16 | minClusterReplicas = 1 17 | defaultRedisImage = "redis:5.0.4-alpine" 18 | defaultMonitorImage = "oliver006/redis_exporter:latest" 19 | ) 20 | 21 | func (in *DistributedRedisCluster) DefaultSpec(log logr.Logger) bool { 22 | update := false 23 | if in.Spec.MasterSize < minMasterSize { 24 | in.Spec.MasterSize = minMasterSize 25 | update = true 26 | } 27 | 28 | if in.Spec.Image == "" { 29 | in.Spec.Image = defaultRedisImage 30 | update = true 31 | } 32 | 33 | if in.Spec.ServiceName == "" { 34 | in.Spec.ServiceName = in.Name 35 | update = true 36 | } 37 | 38 | if in.Spec.Resources == nil || in.Spec.Resources.Size() == 0 { 39 | in.Spec.Resources = defaultResource() 40 | update = true 41 | } 42 | 43 | mon := in.Spec.Monitor 44 | if mon != nil { 45 | if mon.Image == "" { 46 | mon.Image = defaultMonitorImage 47 | update = true 48 | } 49 | 50 | if mon.Prometheus == nil { 51 | mon.Prometheus = &PrometheusSpec{} 52 | update = true 53 | } 54 | if mon.Prometheus.Port == 0 { 55 | mon.Prometheus.Port = PrometheusExporterPortNumber 56 | update = true 57 | } 58 | if in.Spec.Annotations == nil { 59 | in.Spec.Annotations = make(map[string]string) 60 | update = true 61 | } 62 | 63 | in.Spec.Annotations["prometheus.io/scrape"] = "true" 64 | in.Spec.Annotations["prometheus.io/path"] = PrometheusExporterTelemetryPath 65 | in.Spec.Annotations["prometheus.io/port"] = fmt.Sprintf("%d", mon.Prometheus.Port) 66 | } 67 | return update 68 | } 69 | 70 | func (in *DistributedRedisCluster) IsRestoreFromBackup() bool { 71 | initSpec := in.Spec.Init 72 | if initSpec != nil && initSpec.BackupSource != nil { 73 | return true 74 | } 75 | return false 76 | } 77 | 78 | func (in *DistributedRedisCluster) IsRestored() bool { 79 | return in.Status.Restore.Phase == RestorePhaseSucceeded 80 | } 81 | 82 | func (in *DistributedRedisCluster) ShouldInitRestorePhase() bool { 83 | return in.Status.Restore.Phase == "" 84 | } 85 | 86 | func (in *DistributedRedisCluster) IsRestoreRunning() bool { 87 | return in.Status.Restore.Phase == RestorePhaseRunning 88 | } 89 | 90 | func (in *DistributedRedisCluster) IsRestoreRestarting() bool { 91 | return in.Status.Restore.Phase == RestorePhaseRestart 92 | } 93 | 94 | func defaultResource() *v1.ResourceRequirements { 95 | return &v1.ResourceRequirements{ 96 | Requests: v1.ResourceList{ 97 | v1.ResourceCPU: resource.MustParse("200m"), 98 | v1.ResourceMemory: resource.MustParse("2Gi"), 99 | }, 100 | Limits: v1.ResourceList{ 101 | v1.ResourceCPU: resource.MustParse("1000m"), 102 | v1.ResourceMemory: resource.MustParse("4Gi"), 103 | }, 104 | } 105 | } 106 | 107 | func DefaultOwnerReferences(cluster *DistributedRedisCluster) []metav1.OwnerReference { 108 | return []metav1.OwnerReference{ 109 | *metav1.NewControllerRef(cluster, schema.GroupVersionKind{ 110 | Group: SchemeGroupVersion.Group, 111 | Version: SchemeGroupVersion.Version, 112 | Kind: DistributedRedisClusterKind, 113 | }), 114 | } 115 | } 116 | 117 | func (in *RedisClusterBackup) Validate() error { 118 | clusterName := in.Spec.RedisClusterName 119 | if clusterName == "" { 120 | return fmt.Errorf("bakcup [RedisClusterName] is missing") 121 | } 122 | // BucketName can't be empty 123 | if in.Spec.S3 == nil && in.Spec.GCS == nil && in.Spec.Azure == nil && in.Spec.Swift == nil && in.Spec.Local == nil { 124 | return fmt.Errorf("no storage provider is configured") 125 | } 126 | 127 | if in.Spec.Azure != nil || in.Spec.Swift != nil { 128 | if in.Spec.StorageSecretName == "" { 129 | return fmt.Errorf("bakcup [SecretName] is missing") 130 | } 131 | } 132 | return nil 133 | } 134 | 135 | func (in *RedisClusterBackup) RemotePath() (string, error) { 136 | spec := in.Spec.Backend 137 | timePrefix := in.Status.StartTime.Format("20060102150405") 138 | if spec.S3 != nil { 139 | return filepath.Join(spec.S3.Prefix, DatabaseNamePrefix, in.Namespace, in.Spec.RedisClusterName, timePrefix), nil 140 | } else if spec.GCS != nil { 141 | return filepath.Join(spec.GCS.Prefix, DatabaseNamePrefix, in.Namespace, in.Spec.RedisClusterName, timePrefix), nil 142 | } else if spec.Azure != nil { 143 | return filepath.Join(spec.Azure.Prefix, DatabaseNamePrefix, in.Namespace, in.Spec.RedisClusterName, timePrefix), nil 144 | } else if spec.Local != nil { 145 | return filepath.Join(DatabaseNamePrefix, in.Namespace, in.Spec.RedisClusterName, timePrefix), nil 146 | } else if spec.Swift != nil { 147 | return filepath.Join(spec.Swift.Prefix, DatabaseNamePrefix, in.Namespace, in.Spec.RedisClusterName, timePrefix), nil 148 | } 149 | return "", fmt.Errorf("no storage provider is configured") 150 | } 151 | 152 | func (in *RedisClusterBackup) RCloneSecretName() string { 153 | return fmt.Sprintf("rcloneconfig-%v", in.Name) 154 | } 155 | 156 | func (in *RedisClusterBackup) JobName() string { 157 | return fmt.Sprintf("redisbackup-%v", in.Name) 158 | } 159 | 160 | func (in *RedisClusterBackup) IsRefLocalPVC() bool { 161 | return in.Spec.Local != nil && in.Spec.Local.PersistentVolumeClaim != nil 162 | } 163 | -------------------------------------------------------------------------------- /pkg/apis/redis/v1alpha1/distributedrediscluster_webhook.go: -------------------------------------------------------------------------------- 1 | package v1alpha1 2 | 3 | import ( 4 | "fmt" 5 | "reflect" 6 | "strings" 7 | 8 | "github.com/go-logr/logr" 9 | "k8s.io/apimachinery/pkg/runtime" 10 | utilvalidation "k8s.io/apimachinery/pkg/util/validation" 11 | "k8s.io/apimachinery/pkg/util/validation/field" 12 | "k8s.io/kubernetes/pkg/apis/core/v1/validation" 13 | ctrl "sigs.k8s.io/controller-runtime" 14 | logf "sigs.k8s.io/controller-runtime/pkg/log" 15 | "sigs.k8s.io/controller-runtime/pkg/webhook" 16 | 17 | "github.com/ucloud/redis-cluster-operator/pkg/utils" 18 | ) 19 | 20 | var log = logf.Log.WithName("drc-resource") 21 | 22 | var _ webhook.Validator = &DistributedRedisCluster{} 23 | 24 | func (in *DistributedRedisCluster) ValidateCreate() error { 25 | log := log.WithValues("namespace", in.Namespace, "name", in.Name) 26 | log.Info("ValidateCreate") 27 | if errs := utilvalidation.IsDNS1035Label(in.Spec.ServiceName); len(in.Spec.ServiceName) > 0 && len(errs) > 0 { 28 | return fmt.Errorf("the custom service is invalid: invalid value: %s, %s", in.Spec.ServiceName, strings.Join(errs, ",")) 29 | } 30 | 31 | if in.Spec.Resources != nil { 32 | if errs := validation.ValidateResourceRequirements(in.Spec.Resources, field.NewPath("resources")); len(errs) > 0 { 33 | return errs.ToAggregate() 34 | } 35 | } 36 | 37 | return nil 38 | } 39 | 40 | func (in *DistributedRedisCluster) ValidateUpdate(old runtime.Object) error { 41 | log := log.WithValues("namespace", in.Namespace, "name", in.Name) 42 | log.Info("ValidateUpdate") 43 | 44 | oldObj, ok := old.(*DistributedRedisCluster) 45 | if !ok { 46 | err := fmt.Errorf("invalid obj type") 47 | log.Error(err, "can not reflect type") 48 | return err 49 | } 50 | 51 | if errs := utilvalidation.IsDNS1035Label(in.Spec.ServiceName); len(in.Spec.ServiceName) > 0 && len(errs) > 0 { 52 | return fmt.Errorf("the custom service is invalid: invalid value: %s, %s", in.Spec.ServiceName, strings.Join(errs, ",")) 53 | } 54 | 55 | if in.Spec.Resources != nil { 56 | if errs := validation.ValidateResourceRequirements(in.Spec.Resources, field.NewPath("resources")); len(errs) > 0 { 57 | return errs.ToAggregate() 58 | } 59 | } 60 | 61 | if oldObj.Status.Status == "" { 62 | return nil 63 | } 64 | if compareObj(in, oldObj, log) && oldObj.Status.Status != ClusterStatusOK { 65 | return fmt.Errorf("redis cluster status: [%s], wait for the status to become %s before operating", oldObj.Status.Status, ClusterStatusOK) 66 | } 67 | 68 | return nil 69 | } 70 | 71 | func compareObj(new, old *DistributedRedisCluster, log logr.Logger) bool { 72 | if utils.CompareInt32("MasterSize", new.Spec.MasterSize, old.Spec.MasterSize, log) { 73 | return true 74 | } 75 | 76 | if utils.CompareStringValue("Image", new.Spec.Image, old.Spec.Image, log) { 77 | return true 78 | } 79 | 80 | if !reflect.DeepEqual(new.Spec.Resources, old.Spec.Resources) { 81 | log.Info("compare resource", "new", new.Spec.Resources, "old", old.Spec.Resources) 82 | return true 83 | } 84 | 85 | if !reflect.DeepEqual(new.Spec.PasswordSecret, old.Spec.PasswordSecret) { 86 | log.Info("compare password", "new", new.Spec.PasswordSecret, "old", old.Spec.PasswordSecret) 87 | return true 88 | } 89 | 90 | return false 91 | } 92 | 93 | func (in *DistributedRedisCluster) ValidateDelete() error { 94 | log := log.WithValues("namespace", in.Namespace, "name", in.Name) 95 | log.Info("ValidateDelete") 96 | return nil 97 | } 98 | 99 | func (in *DistributedRedisCluster) SetupWebhookWithManager(mgr ctrl.Manager) error { 100 | return ctrl.NewWebhookManagedBy(mgr). 101 | For(in). 102 | Complete() 103 | } 104 | -------------------------------------------------------------------------------- /pkg/apis/redis/v1alpha1/doc.go: -------------------------------------------------------------------------------- 1 | // Package v1alpha1 contains API Schema definitions for the redis v1alpha1 API group 2 | // +k8s:deepcopy-gen=package,register 3 | // +groupName=redis.kun 4 | package v1alpha1 5 | -------------------------------------------------------------------------------- /pkg/apis/redis/v1alpha1/register.go: -------------------------------------------------------------------------------- 1 | // NOTE: Boilerplate only. Ignore this file. 2 | 3 | // Package v1alpha1 contains API Schema definitions for the redis v1alpha1 API group 4 | // +k8s:deepcopy-gen=package,register 5 | // +groupName=redis.kun 6 | package v1alpha1 7 | 8 | import ( 9 | "k8s.io/apimachinery/pkg/runtime/schema" 10 | "sigs.k8s.io/controller-runtime/pkg/scheme" 11 | ) 12 | 13 | const ( 14 | DistributedRedisClusterKind = "DistributedRedisCluster" 15 | RedisClusterBackupKind = "RedisClusterBackup" 16 | ) 17 | 18 | var ( 19 | // SchemeGroupVersion is group version used to register these objects 20 | SchemeGroupVersion = schema.GroupVersion{Group: "redis.kun", Version: "v1alpha1"} 21 | 22 | // SchemeBuilder is used to add go types to the GroupVersionKind scheme 23 | SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} 24 | ) 25 | -------------------------------------------------------------------------------- /pkg/config/redis.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | import ( 4 | "path" 5 | 6 | "github.com/spf13/pflag" 7 | ) 8 | 9 | const ( 10 | // DefaultRedisTimeout default redis timeout (ms) 11 | DefaultRedisTimeout = 2000 12 | //DefaultClusterNodeTimeout default cluster node timeout (ms) 13 | //The maximum amount of time a Redis Cluster node can be unavailable, without it being considered as failing 14 | DefaultClusterNodeTimeout = 2000 15 | // RedisRenameCommandsDefaultPath default path to volume storing rename commands 16 | RedisRenameCommandsDefaultPath = "/etc/secret-volume" 17 | // RedisRenameCommandsDefaultFile default file name containing rename commands 18 | RedisRenameCommandsDefaultFile = "" 19 | // RedisConfigFileDefault default config file path 20 | RedisConfigFileDefault = "/redis-conf/redis.conf" 21 | // RedisServerBinDefault default binary name 22 | RedisServerBinDefault = "redis-server" 23 | // RedisServerPortDefault default redis port 24 | RedisServerPortDefault = "6379" 25 | // RedisMaxMemoryDefault default redis max memory 26 | RedisMaxMemoryDefault = 0 27 | // RedisMaxMemoryPolicyDefault default redis max memory evition policy 28 | RedisMaxMemoryPolicyDefault = "noeviction" 29 | ) 30 | 31 | //var redisFlagSet *pflag.FlagSet 32 | // 33 | //func init() { 34 | // redisFlagSet = pflag.NewFlagSet("redis", pflag.ExitOnError) 35 | //} 36 | 37 | var redisConf *Redis 38 | 39 | func init() { 40 | redisConf = &Redis{} 41 | } 42 | 43 | func RedisConf() *Redis { 44 | return redisConf 45 | } 46 | 47 | // Redis used to store all Redis configuration information 48 | type Redis struct { 49 | DialTimeout int 50 | ClusterNodeTimeout int 51 | ConfigFileName string 52 | RenameCommandsPath string 53 | RenameCommandsFile string 54 | HTTPServerAddr string 55 | ServerBin string 56 | ServerPort string 57 | ServerIP string 58 | MaxMemory uint32 59 | MaxMemoryPolicy string 60 | ConfigFiles []string 61 | } 62 | 63 | // AddFlags use to add the Redis Config flags to the command line 64 | func (r *Redis) AddFlags(fs *pflag.FlagSet) { 65 | fs.IntVar(&r.DialTimeout, "rdt", DefaultRedisTimeout, "redis dial timeout (ms)") 66 | fs.IntVar(&r.ClusterNodeTimeout, "cluster-node-timeout", DefaultClusterNodeTimeout, "redis node timeout (ms)") 67 | fs.StringVar(&r.ConfigFileName, "c", RedisConfigFileDefault, "redis config file path") 68 | fs.StringVar(&r.RenameCommandsPath, "rename-command-path", RedisRenameCommandsDefaultPath, "Path to the folder where rename-commands option for redis are available") 69 | fs.StringVar(&r.RenameCommandsFile, "rename-command-file", RedisRenameCommandsDefaultFile, "Name of the file where rename-commands option for redis are available, disabled if empty") 70 | fs.Uint32Var(&r.MaxMemory, "max-memory", RedisMaxMemoryDefault, "redis max memory") 71 | fs.StringVar(&r.MaxMemoryPolicy, "max-memory-policy", RedisMaxMemoryPolicyDefault, "redis max memory evition policy") 72 | fs.StringVar(&r.ServerBin, "bin", RedisServerBinDefault, "redis server binary file name") 73 | fs.StringVar(&r.ServerPort, "port", RedisServerPortDefault, "redis server listen port") 74 | fs.StringVar(&r.ServerIP, "ip", "", "redis server listen ip") 75 | fs.StringArrayVar(&r.ConfigFiles, "config-file", []string{}, "Location of redis configuration file that will be include in the ") 76 | } 77 | 78 | // GetRenameCommandsFile return the path to the rename command file, or empty string if not define 79 | func (r *Redis) GetRenameCommandsFile() string { 80 | if r.RenameCommandsFile == "" { 81 | return "" 82 | } 83 | return path.Join(r.RenameCommandsPath, r.RenameCommandsFile) 84 | } 85 | -------------------------------------------------------------------------------- /pkg/controller/add_distributedrediscluster.go: -------------------------------------------------------------------------------- 1 | package controller 2 | 3 | import ( 4 | "github.com/ucloud/redis-cluster-operator/pkg/controller/distributedrediscluster" 5 | ) 6 | 7 | func init() { 8 | // AddToManagerFuncs is a list of functions to create controllers and add them to a manager. 9 | AddToManagerFuncs = append(AddToManagerFuncs, distributedrediscluster.Add) 10 | } 11 | -------------------------------------------------------------------------------- /pkg/controller/add_redisclusterbackup.go: -------------------------------------------------------------------------------- 1 | package controller 2 | 3 | import ( 4 | "github.com/ucloud/redis-cluster-operator/pkg/controller/redisclusterbackup" 5 | ) 6 | 7 | func init() { 8 | // AddToManagerFuncs is a list of functions to create controllers and add them to a manager. 9 | AddToManagerFuncs = append(AddToManagerFuncs, redisclusterbackup.Add) 10 | } 11 | -------------------------------------------------------------------------------- /pkg/controller/clustering/placement_v2.go: -------------------------------------------------------------------------------- 1 | package clustering 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/go-logr/logr" 7 | 8 | redisv1alpha1 "github.com/ucloud/redis-cluster-operator/pkg/apis/redis/v1alpha1" 9 | "github.com/ucloud/redis-cluster-operator/pkg/redisutil" 10 | "github.com/ucloud/redis-cluster-operator/pkg/resources/statefulsets" 11 | ) 12 | 13 | type Ctx struct { 14 | log logr.Logger 15 | expectedMasterNum int 16 | clusterName string 17 | cluster *redisutil.Cluster 18 | nodes map[string]redisutil.Nodes 19 | currentMasters redisutil.Nodes 20 | newMastersBySts map[string]*redisutil.Node 21 | slavesByMaster map[string]redisutil.Nodes 22 | bestEffort bool 23 | } 24 | 25 | func NewCtx(cluster *redisutil.Cluster, nodes redisutil.Nodes, masterNum int32, clusterName string, log logr.Logger) *Ctx { 26 | ctx := &Ctx{ 27 | log: log, 28 | expectedMasterNum: int(masterNum), 29 | clusterName: clusterName, 30 | cluster: cluster, 31 | slavesByMaster: make(map[string]redisutil.Nodes), 32 | newMastersBySts: make(map[string]*redisutil.Node), 33 | } 34 | ctx.nodes = ctx.sortRedisNodeByStatefulSet(nodes) 35 | return ctx 36 | } 37 | 38 | func (c *Ctx) sortRedisNodeByStatefulSet(nodes redisutil.Nodes) map[string]redisutil.Nodes { 39 | nodesByStatefulSet := make(map[string]redisutil.Nodes) 40 | 41 | for _, rNode := range nodes { 42 | cNode, err := c.cluster.GetNodeByID(rNode.ID) 43 | if err != nil { 44 | c.log.Error(err, "[sortRedisNodeByStatefulSet] unable fo found the Cluster.Node with redis", "ID", rNode.ID) 45 | continue // if not then next line with cNode.Pod will cause a panic since cNode is nil 46 | } 47 | ssName := unknownVMName 48 | if cNode.StatefulSet != "" { 49 | ssName = cNode.StatefulSet 50 | } 51 | if _, ok := nodesByStatefulSet[ssName]; !ok { 52 | nodesByStatefulSet[ssName] = redisutil.Nodes{} 53 | } 54 | nodesByStatefulSet[ssName] = append(nodesByStatefulSet[ssName], rNode) 55 | if (rNode.GetRole() == redisv1alpha1.RedisClusterNodeRoleMaster) && rNode.TotalSlots() > 0 { 56 | c.currentMasters = append(c.currentMasters, rNode) 57 | } 58 | } 59 | 60 | return nodesByStatefulSet 61 | } 62 | 63 | func (c *Ctx) DispatchMasters() error { 64 | for i := 0; i < c.expectedMasterNum; i++ { 65 | stsName := statefulsets.ClusterStatefulSetName(c.clusterName, i) 66 | nodes, ok := c.nodes[stsName] 67 | if !ok { 68 | return fmt.Errorf("missing statefulset %s", stsName) 69 | } 70 | currentMasterNodes := nodes.FilterByFunc(redisutil.IsMasterWithSlot) 71 | if len(currentMasterNodes) == 0 { 72 | master := c.PlaceMasters(stsName) 73 | c.newMastersBySts[stsName] = master 74 | } else if len(currentMasterNodes) == 1 { 75 | c.newMastersBySts[stsName] = currentMasterNodes[0] 76 | } else if len(currentMasterNodes) > 1 { 77 | c.log.Error(fmt.Errorf("split brain"), "fix manually", "statefulSet", stsName, "masters", currentMasterNodes) 78 | return fmt.Errorf("split brain: %s", stsName) 79 | } 80 | } 81 | 82 | return nil 83 | } 84 | 85 | func (c *Ctx) PlaceMasters(ssName string) *redisutil.Node { 86 | var allMasters redisutil.Nodes 87 | allMasters = append(allMasters, c.currentMasters...) 88 | for _, master := range c.newMastersBySts { 89 | allMasters = append(allMasters, master) 90 | } 91 | nodes := c.nodes[ssName] 92 | for _, cNode := range nodes { 93 | _, err := allMasters.GetNodesByFunc(func(node *redisutil.Node) bool { 94 | if node.NodeName == cNode.NodeName { 95 | return true 96 | } 97 | return false 98 | }) 99 | if err != nil { 100 | return cNode 101 | } 102 | } 103 | c.bestEffort = true 104 | c.log.Info("the pod are not spread enough on VMs to have only one master by VM", "select", nodes[0].IP) 105 | return nodes[0] 106 | } 107 | 108 | func (c *Ctx) PlaceSlaves() error { 109 | c.bestEffort = true 110 | for ssName, nodes := range c.nodes { 111 | master := c.newMastersBySts[ssName] 112 | for _, node := range nodes { 113 | if node.IP == master.IP { 114 | continue 115 | } 116 | if node.NodeName != master.NodeName { 117 | c.bestEffort = false 118 | } 119 | if node.GetRole() == redisv1alpha1.RedisClusterNodeRoleSlave { 120 | if node.MasterReferent != master.ID { 121 | c.log.Error(nil, "master referent conflict", "node ip", node.IP, 122 | "current masterID", node.MasterReferent, "expect masterID", master.ID, "master IP", master.IP) 123 | c.slavesByMaster[master.ID] = append(c.slavesByMaster[master.ID], node) 124 | } 125 | continue 126 | } 127 | c.slavesByMaster[master.ID] = append(c.slavesByMaster[master.ID], node) 128 | } 129 | } 130 | return nil 131 | } 132 | 133 | func (c *Ctx) GetCurrentMasters() redisutil.Nodes { 134 | return c.currentMasters 135 | } 136 | 137 | func (c *Ctx) GetNewMasters() redisutil.Nodes { 138 | var nodes redisutil.Nodes 139 | for _, node := range c.newMastersBySts { 140 | nodes = append(nodes, node) 141 | } 142 | return nodes 143 | } 144 | 145 | func (c *Ctx) GetSlaves() map[string]redisutil.Nodes { 146 | return c.slavesByMaster 147 | } 148 | 149 | func (c *Ctx) GetStatefulsetNodes() map[string]redisutil.Nodes { 150 | return c.nodes 151 | } 152 | -------------------------------------------------------------------------------- /pkg/controller/clustering/rebalance.go: -------------------------------------------------------------------------------- 1 | package clustering 2 | 3 | import ( 4 | "fmt" 5 | "math" 6 | 7 | "github.com/ucloud/redis-cluster-operator/pkg/redisutil" 8 | "github.com/ucloud/redis-cluster-operator/pkg/utils" 9 | ) 10 | 11 | // RebalancedCluster rebalanced a redis cluster. 12 | func (c *Ctx) RebalancedCluster(admin redisutil.IAdmin, newMasterNodes redisutil.Nodes) error { 13 | nbNode := len(newMasterNodes) 14 | for _, node := range newMasterNodes { 15 | expected := int(float64(admin.GetHashMaxSlot()+1) / float64(nbNode)) 16 | node.SetBalance(len(node.Slots) - expected) 17 | } 18 | 19 | totalBalance := 0 20 | for _, node := range newMasterNodes { 21 | totalBalance += node.Balance() 22 | } 23 | 24 | for totalBalance > 0 { 25 | for _, node := range newMasterNodes { 26 | if node.Balance() < 0 && totalBalance > 0 { 27 | b := node.Balance() - 1 28 | node.SetBalance(b) 29 | totalBalance -= 1 30 | } 31 | } 32 | } 33 | 34 | // Sort nodes by their slots balance. 35 | sn := newMasterNodes.SortByFunc(func(a, b *redisutil.Node) bool { return a.Balance() < b.Balance() }) 36 | if log.V(4).Enabled() { 37 | for _, node := range sn { 38 | log.Info("debug rebalanced master", "node", node.IPPort(), "balance", node.Balance()) 39 | } 40 | } 41 | 42 | log.Info(">>> rebalancing", "nodeNum", nbNode) 43 | 44 | dstIdx := 0 45 | srcIdx := len(sn) - 1 46 | 47 | for dstIdx < srcIdx { 48 | dst := sn[dstIdx] 49 | src := sn[srcIdx] 50 | 51 | var numSlots float64 52 | if math.Abs(float64(dst.Balance())) < math.Abs(float64(src.Balance())) { 53 | numSlots = math.Abs(float64(dst.Balance())) 54 | } else { 55 | numSlots = math.Abs(float64(src.Balance())) 56 | } 57 | 58 | if numSlots > 0 { 59 | log.Info(fmt.Sprintf("Moving %f slots from %s to %s", numSlots, src.IPPort(), dst.IPPort())) 60 | srcs := redisutil.Nodes{src} 61 | reshardTable := computeReshardTable(srcs, int(numSlots)) 62 | if len(reshardTable) != int(numSlots) { 63 | log.Error(nil, "*** Assertion failed: Reshard table != number of slots", "table", len(reshardTable), "slots", numSlots) 64 | } 65 | for _, e := range reshardTable { 66 | if err := c.moveSlot(e, dst, admin); err != nil { 67 | return err 68 | } 69 | } 70 | } 71 | 72 | // Update nodes balance. 73 | log.V(4).Info("balance", "dst", dst.Balance(), "src", src.Balance(), "slots", numSlots) 74 | dst.SetBalance(dst.Balance() + int(numSlots)) 75 | src.SetBalance(src.Balance() - int(numSlots)) 76 | if dst.Balance() == 0 { 77 | dstIdx += 1 78 | } 79 | if src.Balance() == 0 { 80 | srcIdx -= 1 81 | } 82 | } 83 | 84 | return nil 85 | } 86 | 87 | type MovedNode struct { 88 | Source *redisutil.Node 89 | Slot redisutil.Slot 90 | } 91 | 92 | // computeReshardTable Given a list of source nodes return a "resharding plan" 93 | // with what slots to move in order to move "numslots" slots to another instance. 94 | func computeReshardTable(src redisutil.Nodes, numSlots int) []*MovedNode { 95 | var moved []*MovedNode 96 | 97 | sources := src.SortByFunc(func(a, b *redisutil.Node) bool { return a.TotalSlots() < b.TotalSlots() }) 98 | sourceTotSlots := 0 99 | for _, node := range sources { 100 | sourceTotSlots += node.TotalSlots() 101 | } 102 | for idx, node := range sources { 103 | n := float64(numSlots) / float64(sourceTotSlots) * float64(node.TotalSlots()) 104 | 105 | if idx == 0 { 106 | n = math.Ceil(n) 107 | } else { 108 | n = math.Floor(n) 109 | } 110 | 111 | keys := node.Slots 112 | 113 | for i := 0; i < int(n); i++ { 114 | if len(moved) < numSlots { 115 | mnode := &MovedNode{ 116 | Source: node, 117 | Slot: keys[i], 118 | } 119 | moved = append(moved, mnode) 120 | } 121 | } 122 | } 123 | return moved 124 | } 125 | 126 | func (c *Ctx) moveSlot(source *MovedNode, target *redisutil.Node, admin redisutil.IAdmin) error { 127 | if err := admin.SetSlot(target.IPPort(), "IMPORTING", source.Slot, target.ID); err != nil { 128 | return err 129 | } 130 | if err := admin.SetSlot(source.Source.IPPort(), "MIGRATING", source.Slot, source.Source.ID); err != nil { 131 | return err 132 | } 133 | if _, err := admin.MigrateKeysInSlot(source.Source.IPPort(), target, source.Slot, 10, 30000, true); err != nil { 134 | return err 135 | } 136 | if err := admin.SetSlot(target.IPPort(), "NODE", source.Slot, target.ID); err != nil { 137 | c.log.Error(err, "SET NODE", "node", target.IPPort()) 138 | } 139 | if err := admin.SetSlot(source.Source.IPPort(), "NODE", source.Slot, target.ID); err != nil { 140 | c.log.Error(err, "SET NODE", "node", source.Source.IPPort()) 141 | } 142 | source.Source.Slots = redisutil.RemoveSlot(source.Source.Slots, source.Slot) 143 | return nil 144 | } 145 | 146 | func (c *Ctx) AllocSlots(admin redisutil.IAdmin, newMasterNodes redisutil.Nodes) error { 147 | mastersNum := len(newMasterNodes) 148 | clusterHashSlots := int(admin.GetHashMaxSlot() + 1) 149 | slotsPerNode := float64(clusterHashSlots) / float64(mastersNum) 150 | first := 0 151 | cursor := 0.0 152 | for index, node := range newMasterNodes { 153 | last := utils.Round(cursor + slotsPerNode - 1) 154 | if last > clusterHashSlots || index == mastersNum-1 { 155 | last = clusterHashSlots - 1 156 | } 157 | 158 | if last < first { 159 | last = first 160 | } 161 | 162 | node.Slots = redisutil.BuildSlotSlice(redisutil.Slot(first), redisutil.Slot(last)) 163 | first = last + 1 164 | cursor += slotsPerNode 165 | if err := admin.AddSlots(node.IPPort(), node.Slots); err != nil { 166 | return err 167 | } 168 | } 169 | return nil 170 | } 171 | -------------------------------------------------------------------------------- /pkg/controller/clustering/rebalance_test.go: -------------------------------------------------------------------------------- 1 | package clustering 2 | 3 | import ( 4 | "github.com/ucloud/redis-cluster-operator/pkg/redisutil" 5 | "testing" 6 | ) 7 | 8 | func Test_computeReshardTable(t *testing.T) { 9 | type args struct { 10 | src redisutil.Nodes 11 | numSlots int 12 | } 13 | tests := []struct { 14 | name string 15 | args args 16 | want int 17 | }{ 18 | { 19 | name: "", 20 | args: args{ 21 | src: redisutil.Nodes{&redisutil.Node{ 22 | ID: "node1", 23 | IP: "10.1.1.1", 24 | Port: "6379", 25 | Role: "master", 26 | Slots: redisutil.BuildSlotSlice(5461, 10922), 27 | }}, 28 | numSlots: 1366, 29 | }, 30 | want: 1366, 31 | }, 32 | } 33 | for _, tt := range tests { 34 | t.Run(tt.name, func(t *testing.T) { 35 | if got := computeReshardTable(tt.args.src, tt.args.numSlots); len(got) != tt.want { 36 | t.Errorf("computeReshardTable() = %v, want %v", len(got), tt.want) 37 | } 38 | }) 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /pkg/controller/clustering/roles.go: -------------------------------------------------------------------------------- 1 | package clustering 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/ucloud/redis-cluster-operator/pkg/redisutil" 7 | ) 8 | 9 | // AttachingSlavesToMaster used to attach slaves to there masters 10 | func (c *Ctx) AttachingSlavesToMaster(admin redisutil.IAdmin) error { 11 | var globalErr error 12 | for masterID, slaves := range c.slavesByMaster { 13 | masterNode, err := c.cluster.GetNodeByID(masterID) 14 | if err != nil { 15 | c.log.Error(err, fmt.Sprintf("unable fo found the Cluster.Node with redis ID:%s", masterID)) 16 | continue 17 | } 18 | for _, slave := range slaves { 19 | c.log.Info(fmt.Sprintf("attaching node %s to master %s", slave.ID, masterID)) 20 | 21 | err := admin.AttachSlaveToMaster(slave, masterNode.ID) 22 | if err != nil { 23 | c.log.Error(err, fmt.Sprintf("attaching node %s to master %s", slave.ID, masterID)) 24 | globalErr = err 25 | } 26 | } 27 | } 28 | return globalErr 29 | } 30 | -------------------------------------------------------------------------------- /pkg/controller/controller.go: -------------------------------------------------------------------------------- 1 | package controller 2 | 3 | import ( 4 | "sigs.k8s.io/controller-runtime/pkg/manager" 5 | ) 6 | 7 | // AddToManagerFuncs is a list of functions to add all Controllers to the Manager 8 | var AddToManagerFuncs []func(manager.Manager) error 9 | 10 | // AddToManager adds all Controllers to the Manager 11 | func AddToManager(m manager.Manager) error { 12 | for _, f := range AddToManagerFuncs { 13 | if err := f(m); err != nil { 14 | return err 15 | } 16 | } 17 | return nil 18 | } 19 | -------------------------------------------------------------------------------- /pkg/controller/distributedrediscluster/errors.go: -------------------------------------------------------------------------------- 1 | package distributedrediscluster 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/pkg/errors" 7 | ) 8 | 9 | // ErrorType is the type of an error 10 | type ErrorType uint 11 | 12 | const ( 13 | // NoType error 14 | NoType ErrorType = iota 15 | // Requeue error 16 | Requeue 17 | // Kubernetes error 18 | Kubernetes 19 | // Redis error 20 | Redis 21 | // Cluster 22 | Cluster 23 | // StopRetry stop retry error 24 | StopRetry 25 | ) 26 | 27 | type customError struct { 28 | errorType ErrorType 29 | originalError error 30 | } 31 | 32 | // New creates a new customError 33 | func (errorType ErrorType) New(msg string) error { 34 | return customError{errorType: errorType, originalError: errors.New(msg)} 35 | } 36 | 37 | // New creates a new customError with formatted message 38 | func (errorType ErrorType) Newf(msg string, args ...interface{}) error { 39 | return customError{errorType: errorType, originalError: fmt.Errorf(msg, args...)} 40 | } 41 | 42 | // Wrap creates a new wrapped error 43 | func (errorType ErrorType) Wrap(err error, msg string) error { 44 | return errorType.Wrapf(err, msg) 45 | } 46 | 47 | // Wrap creates a new wrapped error with formatted message 48 | func (errorType ErrorType) Wrapf(err error, msg string, args ...interface{}) error { 49 | return customError{errorType: errorType, originalError: errors.Wrapf(err, msg, args...)} 50 | } 51 | 52 | // Error returns the mssage of a customError 53 | func (error customError) Error() string { 54 | return error.originalError.Error() 55 | } 56 | 57 | // New creates a no type error 58 | func New(msg string) error { 59 | return customError{errorType: NoType, originalError: errors.New(msg)} 60 | } 61 | 62 | // Newf creates a no type error with formatted message 63 | func Newf(msg string, args ...interface{}) error { 64 | return customError{errorType: NoType, originalError: errors.New(fmt.Sprintf(msg, args...))} 65 | } 66 | 67 | // Wrap an error with a string 68 | func Wrap(err error, msg string) error { 69 | return Wrapf(err, msg) 70 | } 71 | 72 | // Cause gives the original error 73 | func Cause(err error) error { 74 | return errors.Cause(err) 75 | } 76 | 77 | // Wrapf an error with format string 78 | func Wrapf(err error, msg string, args ...interface{}) error { 79 | wrappedError := errors.Wrapf(err, msg, args...) 80 | if customErr, ok := err.(customError); ok { 81 | return customError{ 82 | errorType: customErr.errorType, 83 | originalError: wrappedError, 84 | } 85 | } 86 | 87 | return customError{errorType: NoType, originalError: wrappedError} 88 | } 89 | 90 | // GetType returns the error type 91 | func GetType(err error) ErrorType { 92 | if customErr, ok := err.(customError); ok { 93 | return customErr.errorType 94 | } 95 | 96 | return NoType 97 | } 98 | -------------------------------------------------------------------------------- /pkg/controller/heal/clustersplit.go: -------------------------------------------------------------------------------- 1 | package heal 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | "k8s.io/apimachinery/pkg/util/errors" 8 | 9 | redisv1alpha1 "github.com/ucloud/redis-cluster-operator/pkg/apis/redis/v1alpha1" 10 | "github.com/ucloud/redis-cluster-operator/pkg/config" 11 | "github.com/ucloud/redis-cluster-operator/pkg/redisutil" 12 | ) 13 | 14 | // FixClusterSplit use to detect and fix Cluster split 15 | func (c *CheckAndHeal) FixClusterSplit(cluster *redisv1alpha1.DistributedRedisCluster, infos *redisutil.ClusterInfos, admin redisutil.IAdmin, config *config.Redis) (bool, error) { 16 | clusters := buildClustersLists(infos) 17 | 18 | if len(clusters) > 1 { 19 | if c.DryRun { 20 | return true, nil 21 | } 22 | return true, c.reassignClusters(admin, config, clusters) 23 | } 24 | c.Logger.V(3).Info("[Check] No split cluster detected") 25 | return false, nil 26 | } 27 | 28 | type cluster []string 29 | 30 | func (c *CheckAndHeal) reassignClusters(admin redisutil.IAdmin, config *config.Redis, clusters []cluster) error { 31 | c.Logger.Info("[Check] Cluster split detected, the Redis manager will recover from the issue, but data may be lost") 32 | var errs []error 33 | // only one cluster may remain 34 | mainCluster, badClusters := splitMainCluster(clusters) 35 | if len(mainCluster) == 0 { 36 | c.Logger.Error(nil, "[Check] Impossible to fix cluster split, cannot elect main cluster") 37 | return fmt.Errorf("impossible to fix cluster split, cannot elect main cluster") 38 | } 39 | c.Logger.Info("[Check] Cluster is elected as main cluster", "Cluster", mainCluster) 40 | // reset admin to connect to the correct cluster 41 | admin.Connections().ReplaceAll(mainCluster) 42 | 43 | // reconfigure bad clusters 44 | for _, cluster := range badClusters { 45 | c.Logger.Info(fmt.Sprintf("[Check] All keys stored in redis cluster '%s' will be lost", cluster)) 46 | clusterAdmin := redisutil.NewAdmin(cluster, 47 | &redisutil.AdminOptions{ 48 | ConnectionTimeout: time.Duration(config.DialTimeout) * time.Millisecond, 49 | RenameCommandsFile: config.GetRenameCommandsFile(), 50 | }, c.Logger) 51 | for _, nodeAddr := range cluster { 52 | if err := clusterAdmin.FlushAndReset(nodeAddr, redisutil.ResetHard); err != nil { 53 | c.Logger.Error(err, "unable to flush the node", "node", nodeAddr) 54 | errs = append(errs, err) 55 | } 56 | if err := admin.AttachNodeToCluster(nodeAddr); err != nil { 57 | c.Logger.Error(err, "unable to attach the node", "node", nodeAddr) 58 | errs = append(errs, err) 59 | } 60 | 61 | } 62 | clusterAdmin.Close() 63 | } 64 | 65 | return errors.NewAggregate(errs) 66 | } 67 | 68 | func splitMainCluster(clusters []cluster) (cluster, []cluster) { 69 | if len(clusters) == 0 { 70 | return cluster{}, []cluster{} 71 | } 72 | // only the bigger cluster is kept, or the first one if several cluster have the same size 73 | maincluster := -1 74 | maxSize := 0 75 | for i, c := range clusters { 76 | if len(c) > maxSize { 77 | maxSize = len(c) 78 | maincluster = i 79 | } 80 | } 81 | if maincluster != -1 { 82 | main := clusters[maincluster] 83 | return main, append(clusters[:maincluster], clusters[maincluster+1:]...) 84 | } 85 | return clusters[0], []cluster{} 86 | } 87 | 88 | // buildClustersLists build a list of independant clusters 89 | // we could have cluster partially overlapping in case of inconsistent cluster view 90 | func buildClustersLists(infos *redisutil.ClusterInfos) []cluster { 91 | clusters := []cluster{} 92 | for _, nodeinfos := range infos.Infos { 93 | if nodeinfos == nil || nodeinfos.Node == nil { 94 | continue 95 | } 96 | slice := append(nodeinfos.Friends, nodeinfos.Node) 97 | var c cluster 98 | // build list of addresses 99 | for _, node := range slice { 100 | if len(node.FailStatus) == 0 { 101 | c = append(c, node.IPPort()) 102 | } 103 | } 104 | // check if this cluster overlap with another 105 | overlap := false 106 | for _, node := range c { 107 | if findInCluster(node, clusters) { 108 | overlap = true 109 | break 110 | } 111 | } 112 | // if this is a new cluster, add it 113 | if !overlap { 114 | clusters = append(clusters, c) 115 | } 116 | } 117 | return clusters 118 | } 119 | 120 | func findInCluster(addr string, clusters []cluster) bool { 121 | for _, c := range clusters { 122 | for _, nodeAddr := range c { 123 | if addr == nodeAddr { 124 | return true 125 | } 126 | } 127 | } 128 | return false 129 | } 130 | -------------------------------------------------------------------------------- /pkg/controller/heal/clustersplit_test.go: -------------------------------------------------------------------------------- 1 | package heal 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/ucloud/redis-cluster-operator/pkg/redisutil" 7 | ) 8 | 9 | func Test_buildClustersLists(t *testing.T) { 10 | // In the test below, we cannot directly use initialize redisutil.NodeSlice in redisutil.NodeInfos, this is a go vet issue: https://github.com/golang/go/issues/9171 11 | ip1 := redisutil.Nodes{{IP: "ip1", Port: "1234"}} 12 | ip2 := redisutil.Nodes{{IP: "ip2", Port: "1234"}} 13 | ip56 := redisutil.Nodes{{IP: "ip5", Port: "1234"}, {IP: "ip6", Port: "1234"}} 14 | ip64 := redisutil.Nodes{{IP: "ip6", Port: "1234"}, {IP: "ip4", Port: "1234"}} 15 | ip54 := redisutil.Nodes{{IP: "ip5", Port: "1234"}, {IP: "ip4", Port: "1234"}} 16 | // end of workaround 17 | testCases := []struct { 18 | input *redisutil.ClusterInfos 19 | output []cluster 20 | }{ //several partilly different cannot happen, so not tested 21 | { // empty 22 | input: &redisutil.ClusterInfos{Infos: map[string]*redisutil.NodeInfos{}, Status: redisutil.ClusterInfosConsistent}, 23 | output: []cluster{}, 24 | }, 25 | { // one node 26 | input: &redisutil.ClusterInfos{Infos: map[string]*redisutil.NodeInfos{"ip1:1234": {Node: &redisutil.Node{IP: "ip1", Port: "1234"}, Friends: redisutil.Nodes{}}}, Status: redisutil.ClusterInfosConsistent}, 27 | output: []cluster{{"ip1:1234"}}, 28 | }, 29 | { // no discrepency 30 | input: &redisutil.ClusterInfos{ 31 | Infos: map[string]*redisutil.NodeInfos{ 32 | "ip1:1234": {Node: &redisutil.Node{IP: "ip1", Port: "1234"}, Friends: ip2}, 33 | "ip2:1234": {Node: &redisutil.Node{IP: "ip2", Port: "1234"}, Friends: ip1}, 34 | }, 35 | Status: redisutil.ClusterInfosConsistent, 36 | }, 37 | output: []cluster{{"ip1:1234", "ip2:1234"}}, 38 | }, 39 | { // several decorelated 40 | input: &redisutil.ClusterInfos{ 41 | Infos: map[string]*redisutil.NodeInfos{ 42 | "ip1:1234": {Node: &redisutil.Node{IP: "ip1", Port: "1234"}, Friends: ip2}, 43 | "ip2:1234": {Node: &redisutil.Node{IP: "ip2", Port: "1234"}, Friends: ip1}, 44 | "ip3:1234": {Node: &redisutil.Node{IP: "ip3", Port: "1234"}, Friends: redisutil.Nodes{}}, 45 | "ip4:1234": {Node: &redisutil.Node{IP: "ip4", Port: "1234"}, Friends: ip56}, 46 | "ip5:1234": {Node: &redisutil.Node{IP: "ip5", Port: "1234"}, Friends: ip64}, 47 | "ip6:1234": {Node: &redisutil.Node{IP: "ip6", Port: "1234"}, Friends: ip54}, 48 | }, 49 | Status: redisutil.ClusterInfosInconsistent, 50 | }, 51 | output: []cluster{{"ip1:1234", "ip2:1234"}, {"ip3:1234"}, {"ip4:1234", "ip5:1234", "ip6:1234"}}, 52 | }, 53 | { // empty ignored 54 | input: &redisutil.ClusterInfos{ 55 | Infos: map[string]*redisutil.NodeInfos{ 56 | "ip1:1234": {Node: &redisutil.Node{IP: "ip1", Port: "1234"}, Friends: ip2}, 57 | "ip2:1234": {Node: &redisutil.Node{IP: "ip2", Port: "1234"}, Friends: ip1}, 58 | "ip3:1234": nil, 59 | }, 60 | Status: redisutil.ClusterInfosInconsistent, 61 | }, 62 | output: []cluster{{"ip1:1234", "ip2:1234"}}, 63 | }, 64 | } 65 | 66 | for i, tc := range testCases { 67 | output := buildClustersLists(tc.input) 68 | // because we work with map, order might not be conserved 69 | if !compareClusters(output, tc.output) { 70 | t.Errorf("[Case %d] Unexpected result for buildClustersLists, expected %v, got %v", i, tc.output, output) 71 | } 72 | } 73 | } 74 | 75 | func compareClusters(c1, c2 []cluster) bool { 76 | if len(c1) != len(c2) { 77 | return false 78 | } 79 | 80 | for _, c1elem := range c2 { 81 | found := false 82 | for _, c2elem := range c1 { 83 | if compareCluster(c1elem, c2elem) { 84 | found = true 85 | break 86 | } 87 | } 88 | if !found { 89 | return false 90 | } 91 | } 92 | 93 | return true 94 | } 95 | 96 | func compareCluster(c1, c2 cluster) bool { 97 | if len(c1) != len(c2) { 98 | return false 99 | } 100 | for _, c1elem := range c2 { 101 | found := false 102 | for _, c2elem := range c1 { 103 | if c1elem == c2elem { 104 | found = true 105 | break 106 | } 107 | } 108 | if !found { 109 | return false 110 | } 111 | } 112 | 113 | return true 114 | } 115 | -------------------------------------------------------------------------------- /pkg/controller/heal/failednodes.go: -------------------------------------------------------------------------------- 1 | package heal 2 | 3 | import ( 4 | "k8s.io/apimachinery/pkg/util/errors" 5 | 6 | redisv1alpha1 "github.com/ucloud/redis-cluster-operator/pkg/apis/redis/v1alpha1" 7 | "github.com/ucloud/redis-cluster-operator/pkg/redisutil" 8 | ) 9 | 10 | // FixFailedNodes fix failed nodes: in some cases (cluster without enough master after crash or scale down), some nodes may still know about fail nodes 11 | func (c *CheckAndHeal) FixFailedNodes(cluster *redisv1alpha1.DistributedRedisCluster, infos *redisutil.ClusterInfos, admin redisutil.IAdmin) (bool, error) { 12 | forgetSet := listGhostNodes(cluster, infos) 13 | var errs []error 14 | doneAnAction := false 15 | for id := range forgetSet { 16 | doneAnAction = true 17 | c.Logger.Info("[FixFailedNodes] Forgetting failed node, this command might fail, this is not an error", "node", id) 18 | if !c.DryRun { 19 | c.Logger.Info("[FixFailedNodes] try to forget node", "nodeId", id) 20 | if err := admin.ForgetNode(id); err != nil { 21 | errs = append(errs, err) 22 | } 23 | } 24 | } 25 | 26 | return doneAnAction, errors.NewAggregate(errs) 27 | } 28 | 29 | // listGhostNodes : A Ghost node is a node still known by some redis node but which doesn't exists anymore 30 | // meaning it is failed, and pod not in kubernetes, or without targetable IP 31 | func listGhostNodes(cluster *redisv1alpha1.DistributedRedisCluster, infos *redisutil.ClusterInfos) map[string]bool { 32 | ghostNodesSet := map[string]bool{} 33 | if infos == nil || infos.Infos == nil { 34 | return ghostNodesSet 35 | } 36 | for _, nodeinfos := range infos.Infos { 37 | for _, node := range nodeinfos.Friends { 38 | // only forget it when no more part of kubernetes, or if noaddress 39 | if node.HasStatus(redisutil.NodeStatusNoAddr) { 40 | ghostNodesSet[node.ID] = true 41 | } 42 | if node.HasStatus(redisutil.NodeStatusFail) || node.HasStatus(redisutil.NodeStatusPFail) { 43 | found := false 44 | for _, pod := range cluster.Status.Nodes { 45 | if pod.ID == node.ID { 46 | found = true 47 | } 48 | } 49 | if !found { 50 | ghostNodesSet[node.ID] = true 51 | } 52 | } 53 | } 54 | } 55 | return ghostNodesSet 56 | } 57 | -------------------------------------------------------------------------------- /pkg/controller/heal/heal.go: -------------------------------------------------------------------------------- 1 | package heal 2 | 3 | import ( 4 | "github.com/go-logr/logr" 5 | corev1 "k8s.io/api/core/v1" 6 | 7 | "github.com/ucloud/redis-cluster-operator/pkg/k8sutil" 8 | ) 9 | 10 | type CheckAndHeal struct { 11 | Logger logr.Logger 12 | PodControl k8sutil.IPodControl 13 | Pods []*corev1.Pod 14 | DryRun bool 15 | } 16 | -------------------------------------------------------------------------------- /pkg/controller/heal/terminatingpod.go: -------------------------------------------------------------------------------- 1 | package heal 2 | 3 | import ( 4 | "time" 5 | 6 | "k8s.io/apimachinery/pkg/util/errors" 7 | 8 | redisv1alpha1 "github.com/ucloud/redis-cluster-operator/pkg/apis/redis/v1alpha1" 9 | ) 10 | 11 | // FixTerminatingPods used to for the deletion of pod blocked in terminating status. 12 | // in it append the this method will for the deletion of the Pod. 13 | func (c *CheckAndHeal) FixTerminatingPods(cluster *redisv1alpha1.DistributedRedisCluster, maxDuration time.Duration) (bool, error) { 14 | var errs []error 15 | var actionDone bool 16 | 17 | if maxDuration == time.Duration(0) { 18 | return actionDone, nil 19 | } 20 | 21 | now := time.Now() 22 | for _, pod := range c.Pods { 23 | if pod.DeletionTimestamp == nil { 24 | // ignore pod without deletion timestamp 25 | continue 26 | } 27 | maxTime := pod.DeletionTimestamp.Add(maxDuration) // adding MaxDuration for configuration 28 | if maxTime.Before(now) { 29 | c.Logger.Info("[FixTerminatingPods] found deletion pod", "podName", pod.Name) 30 | actionDone = true 31 | // it means that this pod should already been deleted since a wild 32 | if !c.DryRun { 33 | c.Logger.Info("[FixTerminatingPods] try to delete pod", "podName", pod.Name) 34 | if err := c.PodControl.DeletePodByName(cluster.Namespace, pod.Name); err != nil { 35 | errs = append(errs, err) 36 | } 37 | } 38 | } 39 | } 40 | 41 | return actionDone, errors.NewAggregate(errs) 42 | } 43 | -------------------------------------------------------------------------------- /pkg/controller/heal/untrustenodes.go: -------------------------------------------------------------------------------- 1 | package heal 2 | 3 | import ( 4 | corev1 "k8s.io/api/core/v1" 5 | "k8s.io/apimachinery/pkg/util/errors" 6 | 7 | redisv1alpha1 "github.com/ucloud/redis-cluster-operator/pkg/apis/redis/v1alpha1" 8 | "github.com/ucloud/redis-cluster-operator/pkg/redisutil" 9 | ) 10 | 11 | // FixUntrustedNodes used to remove Nodes that are not trusted by other nodes. It can append when a node 12 | // are removed from the cluster (with the "forget nodes" command) but try to rejoins the cluster. 13 | func (c *CheckAndHeal) FixUntrustedNodes(cluster *redisv1alpha1.DistributedRedisCluster, infos *redisutil.ClusterInfos, admin redisutil.IAdmin) (bool, error) { 14 | untrustedNode := listUntrustedNodes(infos) 15 | var errs []error 16 | doneAnAction := false 17 | 18 | for id, uNode := range untrustedNode { 19 | c.Logger.Info("[FixUntrustedNodes] found untrust node", "node", uNode) 20 | getByIPFunc := func(n *redisutil.Node) bool { 21 | if n.IP == uNode.IP && n.ID != uNode.ID { 22 | return true 23 | } 24 | return false 25 | } 26 | node2, err := infos.GetNodes().GetNodesByFunc(getByIPFunc) 27 | if err != nil && !redisutil.IsNodeNotFoundedError(err) { 28 | c.Logger.Error(err, "error with GetNodesByFunc(getByIPFunc) search function") 29 | errs = append(errs, err) 30 | continue 31 | } 32 | if len(node2) > 0 { 33 | // it means the POD is used by another Redis node ID so we should not delete the pod. 34 | continue 35 | } 36 | exist, reused := checkIfPodNameExistAndIsReused(uNode, c.Pods) 37 | if exist && !reused { 38 | c.Logger.Info("[FixUntrustedNodes] try to delete pod", "podName", uNode.PodName) 39 | if err := c.PodControl.DeletePodByName(cluster.Namespace, uNode.PodName); err != nil { 40 | errs = append(errs, err) 41 | } 42 | } 43 | doneAnAction = true 44 | if !c.DryRun { 45 | c.Logger.Info("[FixUntrustedNodes] try to forget node", "nodeId", id) 46 | if err := admin.ForgetNode(id); err != nil { 47 | errs = append(errs, err) 48 | } 49 | } 50 | } 51 | 52 | return doneAnAction, errors.NewAggregate(errs) 53 | } 54 | 55 | func listUntrustedNodes(infos *redisutil.ClusterInfos) map[string]*redisutil.Node { 56 | untrustedNodes := make(map[string]*redisutil.Node) 57 | if infos == nil || infos.Infos == nil { 58 | return untrustedNodes 59 | } 60 | for _, nodeinfos := range infos.Infos { 61 | for _, node := range nodeinfos.Friends { 62 | if node.HasStatus(redisutil.NodeStatusHandshake) { 63 | if _, found := untrustedNodes[node.ID]; !found { 64 | untrustedNodes[node.ID] = node 65 | } 66 | } 67 | } 68 | } 69 | return untrustedNodes 70 | } 71 | 72 | func checkIfPodNameExistAndIsReused(node *redisutil.Node, podlist []*corev1.Pod) (exist bool, reused bool) { 73 | if node.PodName == "" { 74 | return 75 | } 76 | for _, currentPod := range podlist { 77 | if currentPod.Name == node.PodName { 78 | exist = true 79 | if currentPod.Status.PodIP == node.IP { 80 | // this check is use to see if the Pod name is not use by another RedisNode. 81 | // for that we check the the Pod name from the Redis node is not used by another 82 | // Redis node, by comparing the IP of the current Pod with the Pod from the cluster bom. 83 | // if the Pod IP and Name from the redis info is equal to the IP/NAME from the getPod; it 84 | // means that the Pod is still use and the Redis Node is not a ghost 85 | reused = true 86 | break 87 | } 88 | } 89 | } 90 | return 91 | } 92 | -------------------------------------------------------------------------------- /pkg/controller/manager/checker.go: -------------------------------------------------------------------------------- 1 | package manager 2 | 3 | import ( 4 | "fmt" 5 | 6 | appsv1 "k8s.io/api/apps/v1" 7 | "sigs.k8s.io/controller-runtime/pkg/client" 8 | 9 | redisv1alpha1 "github.com/ucloud/redis-cluster-operator/pkg/apis/redis/v1alpha1" 10 | "github.com/ucloud/redis-cluster-operator/pkg/k8sutil" 11 | "github.com/ucloud/redis-cluster-operator/pkg/resources/statefulsets" 12 | ) 13 | 14 | type ICheck interface { 15 | CheckRedisNodeNum(*redisv1alpha1.DistributedRedisCluster) error 16 | //CheckRedisMasterNum(*redisv1alpha1.DistributedRedisCluster) error 17 | } 18 | 19 | type realCheck struct { 20 | statefulSetClient k8sutil.IStatefulSetControl 21 | } 22 | 23 | func NewCheck(client client.Client) ICheck { 24 | return &realCheck{ 25 | statefulSetClient: k8sutil.NewStatefulSetController(client), 26 | } 27 | } 28 | 29 | func (c *realCheck) CheckRedisNodeNum(cluster *redisv1alpha1.DistributedRedisCluster) error { 30 | for i := 0; i < int(cluster.Spec.MasterSize); i++ { 31 | name := statefulsets.ClusterStatefulSetName(cluster.Name, i) 32 | expectNodeNum := cluster.Spec.ClusterReplicas + 1 33 | ss, err := c.statefulSetClient.GetStatefulSet(cluster.Namespace, name) 34 | if err != nil { 35 | return err 36 | } 37 | if err := c.checkRedisNodeNum(expectNodeNum, ss); err != nil { 38 | return err 39 | } 40 | } 41 | 42 | return nil 43 | } 44 | 45 | func (c *realCheck) checkRedisNodeNum(expectNodeNum int32, ss *appsv1.StatefulSet) error { 46 | if expectNodeNum != *ss.Spec.Replicas { 47 | return fmt.Errorf("number of redis pods is different from specification") 48 | } 49 | if expectNodeNum != ss.Status.ReadyReplicas { 50 | return fmt.Errorf("redis pods are not all ready") 51 | } 52 | if expectNodeNum != ss.Status.CurrentReplicas { 53 | return fmt.Errorf("redis pods need to be updated") 54 | } 55 | 56 | return nil 57 | } 58 | 59 | func (c *realCheck) CheckRedisMasterNum(cluster *redisv1alpha1.DistributedRedisCluster) error { 60 | if cluster.Spec.MasterSize != cluster.Status.NumberOfMaster { 61 | return fmt.Errorf("number of redis master different from specification") 62 | } 63 | return nil 64 | } 65 | 66 | // 67 | //func (c *realCheck) CheckRedisClusterIsEmpty(cluster *redisv1alpha1.DistributedRedisCluster, admin redisutil.IAdmin) (bool, error) { 68 | // 69 | //} 70 | -------------------------------------------------------------------------------- /pkg/controller/manager/ensurer_test.go: -------------------------------------------------------------------------------- 1 | package manager 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/go-logr/logr" 7 | logf "sigs.k8s.io/controller-runtime/pkg/log" 8 | ) 9 | 10 | var log = logf.Log.WithName("test") 11 | 12 | func Test_isRedisConfChanged(t *testing.T) { 13 | type args struct { 14 | confInCm string 15 | currentConf map[string]string 16 | log logr.Logger 17 | } 18 | tests := []struct { 19 | name string 20 | args args 21 | want bool 22 | }{ 23 | { 24 | name: "should false", 25 | args: args{ 26 | confInCm: `appendfsync everysec 27 | appendonly yes 28 | auto-aof-rewrite-min-size 67108864 29 | save 900 1 300 10`, 30 | currentConf: map[string]string{ 31 | "appendfsync": "everysec", 32 | "appendonly": "yes", 33 | "auto-aof-rewrite-min-size": "67108864", 34 | "save": "900 1 300 10", 35 | }, 36 | log: log, 37 | }, 38 | want: false, 39 | }, 40 | { 41 | name: "should false with newline", 42 | args: args{ 43 | confInCm: `appendfsync everysec 44 | appendonly yes 45 | auto-aof-rewrite-min-size 67108864 46 | save 900 1 300 10 47 | `, 48 | currentConf: map[string]string{ 49 | "appendfsync": "everysec", 50 | "appendonly": "yes", 51 | "auto-aof-rewrite-min-size": "67108864", 52 | "save": "900 1 300 10", 53 | }, 54 | log: log, 55 | }, 56 | want: false, 57 | }, 58 | { 59 | name: "should true, compare value", 60 | args: args{ 61 | confInCm: `appendfsync everysec 62 | appendonly yes 63 | auto-aof-rewrite-min-size 6710886 64 | save 900 1 300 10 65 | `, 66 | currentConf: map[string]string{ 67 | "appendfsync": "everysec", 68 | "appendonly": "yes", 69 | "auto-aof-rewrite-min-size": "67108864", 70 | "save": "900 1 300 10", 71 | }, 72 | log: log, 73 | }, 74 | want: true, 75 | }, 76 | { 77 | name: "should true, add current", 78 | args: args{ 79 | confInCm: `appendfsync everysec 80 | appendonly yes 81 | save 900 1 300 10 82 | `, 83 | currentConf: map[string]string{ 84 | "appendfsync": "everysec", 85 | "appendonly": "yes", 86 | "auto-aof-rewrite-min-size": "67108864", 87 | "save": "900 1 300 10", 88 | }, 89 | log: log, 90 | }, 91 | want: true, 92 | }, 93 | { 94 | name: "should true, del current", 95 | args: args{ 96 | confInCm: `appendfsync everysec 97 | appendonly yes 98 | auto-aof-rewrite-min-size 67108864 99 | save 900 1 300 10 100 | `, 101 | currentConf: map[string]string{ 102 | "appendfsync": "everysec", 103 | "appendonly": "yes", 104 | "save": "900 1 300 10", 105 | }, 106 | log: log, 107 | }, 108 | want: true, 109 | }, 110 | { 111 | name: "should true, compare key", 112 | args: args{ 113 | confInCm: `appendfsync everysec 114 | appendonly yes 115 | save 900 1 300 10 116 | `, 117 | currentConf: map[string]string{ 118 | "appendonly": "yes", 119 | "auto-aof-rewrite-min-size": "67108864", 120 | "save": "900 1 300 10", 121 | }, 122 | log: log, 123 | }, 124 | want: true, 125 | }, 126 | { 127 | name: "should true, compare save", 128 | args: args{ 129 | confInCm: `appendfsync everysec 130 | appendonly yes 131 | auto-aof-rewrite-min-size 67108864 132 | save 900 1 300 10 133 | `, 134 | currentConf: map[string]string{ 135 | "appendfsync": "everysec", 136 | "appendonly": "yes", 137 | "auto-aof-rewrite-min-size": "67108864", 138 | "save": "900 1", 139 | }, 140 | log: log, 141 | }, 142 | want: true, 143 | }, 144 | } 145 | for _, tt := range tests { 146 | t.Run(tt.name, func(t *testing.T) { 147 | if got := isRedisConfChanged(tt.args.confInCm, tt.args.currentConf, tt.args.log); got != tt.want { 148 | t.Errorf("isRedisConfChanged() = %v, want %v", got, tt.want) 149 | } 150 | }) 151 | } 152 | } 153 | -------------------------------------------------------------------------------- /pkg/controller/manager/healer.go: -------------------------------------------------------------------------------- 1 | package manager 2 | 3 | import ( 4 | "time" 5 | 6 | redisv1alpha1 "github.com/ucloud/redis-cluster-operator/pkg/apis/redis/v1alpha1" 7 | "github.com/ucloud/redis-cluster-operator/pkg/controller/heal" 8 | "github.com/ucloud/redis-cluster-operator/pkg/redisutil" 9 | ) 10 | 11 | type IHeal interface { 12 | Heal(cluster *redisv1alpha1.DistributedRedisCluster, infos *redisutil.ClusterInfos, admin redisutil.IAdmin) (bool, error) 13 | FixTerminatingPods(cluster *redisv1alpha1.DistributedRedisCluster, maxDuration time.Duration) (bool, error) 14 | } 15 | 16 | type realHeal struct { 17 | *heal.CheckAndHeal 18 | } 19 | 20 | func NewHealer(heal *heal.CheckAndHeal) IHeal { 21 | return &realHeal{heal} 22 | } 23 | 24 | func (h *realHeal) Heal(cluster *redisv1alpha1.DistributedRedisCluster, infos *redisutil.ClusterInfos, admin redisutil.IAdmin) (bool, error) { 25 | if actionDone, err := h.FixFailedNodes(cluster, infos, admin); err != nil { 26 | return actionDone, err 27 | } else if actionDone { 28 | return actionDone, nil 29 | } 30 | 31 | if actionDone, err := h.FixUntrustedNodes(cluster, infos, admin); err != nil { 32 | return actionDone, err 33 | } else if actionDone { 34 | return actionDone, nil 35 | } 36 | return false, nil 37 | } 38 | -------------------------------------------------------------------------------- /pkg/controller/redisclusterbackup/helper.go: -------------------------------------------------------------------------------- 1 | package redisclusterbackup 2 | 3 | import ( 4 | "context" 5 | 6 | batch "k8s.io/api/batch/v1" 7 | corev1 "k8s.io/api/core/v1" 8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | "k8s.io/client-go/rest" 10 | "sigs.k8s.io/controller-runtime/pkg/client" 11 | 12 | redisv1alpha1 "github.com/ucloud/redis-cluster-operator/pkg/apis/redis/v1alpha1" 13 | ) 14 | 15 | func (r *ReconcileRedisClusterBackup) markAsFailedBackup(backup *redisv1alpha1.RedisClusterBackup, 16 | reason string) error { 17 | t := metav1.Now() 18 | backup.Status.CompletionTime = &t 19 | backup.Status.Phase = redisv1alpha1.BackupPhaseFailed 20 | backup.Status.Reason = reason 21 | return r.crController.UpdateCRStatus(backup) 22 | } 23 | 24 | func (r *ReconcileRedisClusterBackup) markAsIgnoredBackup(backup *redisv1alpha1.RedisClusterBackup, 25 | reason string) error { 26 | t := metav1.Now() 27 | backup.Status.CompletionTime = &t 28 | backup.Status.Phase = redisv1alpha1.BackupPhaseIgnored 29 | backup.Status.Reason = reason 30 | return r.crController.UpdateCRStatus(backup) 31 | } 32 | 33 | func (r *ReconcileRedisClusterBackup) isBackupRunning(backup *redisv1alpha1.RedisClusterBackup) (bool, error) { 34 | labMap := client.MatchingLabels{ 35 | redisv1alpha1.LabelBackupStatus: string(redisv1alpha1.BackupPhaseRunning), 36 | redisv1alpha1.LabelClusterName: backup.Spec.RedisClusterName, 37 | } 38 | backupList := &redisv1alpha1.RedisClusterBackupList{} 39 | opts := []client.ListOption{ 40 | client.InNamespace(backup.Namespace), 41 | labMap, 42 | } 43 | err := r.client.List(context.TODO(), backupList, opts...) 44 | if err != nil { 45 | return false, err 46 | } 47 | 48 | jobLabMap := client.MatchingLabels{ 49 | redisv1alpha1.LabelClusterName: backup.Spec.RedisClusterName, 50 | redisv1alpha1.AnnotationJobType: redisv1alpha1.JobTypeBackup, 51 | } 52 | backupJobList, err := r.jobController.ListJobByLabels(backup.Namespace, jobLabMap) 53 | if err != nil { 54 | return false, err 55 | } 56 | 57 | if len(backupList.Items) > 0 && len(backupJobList.Items) > 0 { 58 | return true, nil 59 | } 60 | 61 | return false, nil 62 | } 63 | 64 | func upsertEnvVars(vars []corev1.EnvVar, nv ...corev1.EnvVar) []corev1.EnvVar { 65 | upsert := func(env corev1.EnvVar) { 66 | for i, v := range vars { 67 | if v.Name == env.Name { 68 | vars[i] = env 69 | return 70 | } 71 | } 72 | vars = append(vars, env) 73 | } 74 | 75 | for _, env := range nv { 76 | upsert(env) 77 | } 78 | return vars 79 | } 80 | 81 | // Returns the REDIS_PASSWORD environment variable. 82 | func redisPassword(cluster *redisv1alpha1.DistributedRedisCluster) corev1.EnvVar { 83 | secretName := cluster.Spec.PasswordSecret.Name 84 | return corev1.EnvVar{ 85 | Name: "REDIS_PASSWORD", 86 | ValueFrom: &corev1.EnvVarSource{ 87 | SecretKeyRef: &corev1.SecretKeySelector{ 88 | LocalObjectReference: corev1.LocalObjectReference{ 89 | Name: secretName, 90 | }, 91 | Key: "password", 92 | }, 93 | }, 94 | } 95 | } 96 | 97 | func newDirectClient(config *rest.Config) client.Client { 98 | c, err := client.New(config, client.Options{}) 99 | if err != nil { 100 | panic(err) 101 | } 102 | return c 103 | } 104 | 105 | func isJobFinished(j *batch.Job) bool { 106 | for _, c := range j.Status.Conditions { 107 | if (c.Type == batch.JobComplete || c.Type == batch.JobFailed) && c.Status == corev1.ConditionTrue { 108 | return true 109 | } 110 | } 111 | return false 112 | } 113 | -------------------------------------------------------------------------------- /pkg/event/event.go: -------------------------------------------------------------------------------- 1 | package event 2 | 3 | const ( 4 | BackupError string = "BakcupError" 5 | BackupFailed string = "BakcupFailed" 6 | Starting string = "Starting" 7 | Successful string = "Successful" 8 | BackupSuccessful string = "SuccessfulBackup" 9 | ) 10 | -------------------------------------------------------------------------------- /pkg/exec/exec.go: -------------------------------------------------------------------------------- 1 | package exec 2 | 3 | import ( 4 | "bytes" 5 | "io" 6 | "net/url" 7 | "strings" 8 | 9 | "github.com/go-logr/logr" 10 | corev1 "k8s.io/api/core/v1" 11 | "k8s.io/client-go/kubernetes/scheme" 12 | "k8s.io/client-go/rest" 13 | "k8s.io/client-go/tools/remotecommand" 14 | ) 15 | 16 | // IExec is an injectable interface for running remote exec commands. 17 | type IExec interface { 18 | // ExecCommandInPodSet exec cmd in pod set. 19 | ExecCommandInPodSet(podSet []*corev1.Pod, cmd ...string) error 20 | } 21 | 22 | type remoteExec struct { 23 | restGVKClient rest.Interface 24 | logger logr.Logger 25 | config *rest.Config 26 | } 27 | 28 | // NewRemoteExec returns a new IExec which will exec remote cmd. 29 | func NewRemoteExec(restGVKClient rest.Interface, config *rest.Config, logger logr.Logger) IExec { 30 | return &remoteExec{ 31 | restGVKClient: restGVKClient, 32 | logger: logger, 33 | config: config, 34 | } 35 | } 36 | 37 | // ExecOptions passed to ExecWithOptions. 38 | type ExecOptions struct { 39 | Command []string 40 | 41 | Namespace string 42 | PodName string 43 | ContainerName string 44 | 45 | Stdin io.Reader 46 | CaptureStdout bool 47 | CaptureStderr bool 48 | // If false, whitespace in std{err,out} will be removed. 49 | PreserveWhitespace bool 50 | } 51 | 52 | // ExecCommandInPodSet implements IExec interface. 53 | func (e *remoteExec) ExecCommandInPodSet(podSet []*corev1.Pod, cmd ...string) error { 54 | for _, pod := range podSet { 55 | if _, err := e.ExecCommandInContainer(pod.Namespace, pod.Name, pod.Spec.Containers[0].Name, cmd...); err != nil { 56 | return err 57 | } 58 | } 59 | return nil 60 | } 61 | 62 | // ExecCommandInContainer executes a command in the specified container. 63 | func (e *remoteExec) ExecCommandInContainer(namespace, podName, containerName string, cmd ...string) (string, error) { 64 | stdout, stderr, err := e.ExecCommandInContainerWithFullOutput(namespace, podName, containerName, cmd...) 65 | if stderr != "" { 66 | e.logger.Info("ExecCommand", "command", cmd, "stderr", stderr) 67 | } 68 | return stdout, err 69 | } 70 | 71 | // ExecCommandInContainerWithFullOutput executes a command in the 72 | // specified container and return stdout, stderr and error 73 | func (e *remoteExec) ExecCommandInContainerWithFullOutput(namespace, podName, containerName string, cmd ...string) (string, string, error) { 74 | return e.ExecWithOptions(ExecOptions{ 75 | Command: cmd, 76 | Namespace: namespace, 77 | PodName: podName, 78 | ContainerName: containerName, 79 | 80 | Stdin: nil, 81 | CaptureStdout: true, 82 | CaptureStderr: true, 83 | PreserveWhitespace: false, 84 | }) 85 | } 86 | 87 | // ExecWithOptions executes a command in the specified container, 88 | // returning stdout, stderr and error. `options` allowed for 89 | // additional parameters to be passed. 90 | func (e *remoteExec) ExecWithOptions(options ExecOptions) (string, string, error) { 91 | const tty = false 92 | 93 | req := e.restGVKClient.Post(). 94 | Resource("pods"). 95 | Name(options.PodName). 96 | Namespace(options.Namespace). 97 | SubResource("exec"). 98 | Param("container", options.ContainerName) 99 | 100 | req.VersionedParams(&corev1.PodExecOptions{ 101 | Container: options.ContainerName, 102 | Command: options.Command, 103 | Stdin: options.Stdin != nil, 104 | Stdout: options.CaptureStdout, 105 | Stderr: options.CaptureStderr, 106 | TTY: tty, 107 | }, scheme.ParameterCodec) 108 | 109 | var stdout, stderr bytes.Buffer 110 | err := execute("POST", req.URL(), e.config, options.Stdin, &stdout, &stderr, tty) 111 | 112 | if options.PreserveWhitespace { 113 | return stdout.String(), stderr.String(), err 114 | } 115 | return strings.TrimSpace(stdout.String()), strings.TrimSpace(stderr.String()), err 116 | } 117 | 118 | func execute(method string, url *url.URL, config *rest.Config, stdin io.Reader, stdout, stderr io.Writer, tty bool) error { 119 | exec, err := remotecommand.NewSPDYExecutor(config, method, url) 120 | if err != nil { 121 | return err 122 | } 123 | return exec.Stream(remotecommand.StreamOptions{ 124 | Stdin: stdin, 125 | Stdout: stdout, 126 | Stderr: stderr, 127 | Tty: tty, 128 | }) 129 | } 130 | -------------------------------------------------------------------------------- /pkg/k8sutil/batchjob.go: -------------------------------------------------------------------------------- 1 | package k8sutil 2 | 3 | import ( 4 | "context" 5 | 6 | batchv1 "k8s.io/api/batch/v1" 7 | "k8s.io/apimachinery/pkg/types" 8 | "sigs.k8s.io/controller-runtime/pkg/client" 9 | ) 10 | 11 | // IJobControl definej the interface that usej to create, update, and delete Jobs. 12 | type IJobControl interface { 13 | CreateJob(*batchv1.Job) error 14 | UpdateJob(*batchv1.Job) error 15 | DeleteJob(*batchv1.Job) error 16 | GetJob(namespace, name string) (*batchv1.Job, error) 17 | ListJobByLabels(namespace string, labs client.MatchingLabels) (*batchv1.JobList, error) 18 | } 19 | 20 | type JobController struct { 21 | client client.Client 22 | } 23 | 24 | // NewRealJobControl creates a concrete implementation of the 25 | // IJobControl. 26 | func NewJobController(client client.Client) IJobControl { 27 | return &JobController{client: client} 28 | } 29 | 30 | // CreateJob implement the IJobControl.Interface. 31 | func (j *JobController) CreateJob(job *batchv1.Job) error { 32 | return j.client.Create(context.TODO(), job) 33 | } 34 | 35 | // UpdateJob implement the IJobControl.Interface. 36 | func (j *JobController) UpdateJob(job *batchv1.Job) error { 37 | return j.client.Update(context.TODO(), job) 38 | } 39 | 40 | // DeleteJob implement the IJobControl.Interface. 41 | func (j *JobController) DeleteJob(job *batchv1.Job) error { 42 | return j.client.Delete(context.TODO(), job) 43 | } 44 | 45 | // GetJob implement the IJobControl.Interface. 46 | func (j *JobController) GetJob(namespace, name string) (*batchv1.Job, error) { 47 | job := &batchv1.Job{} 48 | err := j.client.Get(context.TODO(), types.NamespacedName{ 49 | Name: name, 50 | Namespace: namespace, 51 | }, job) 52 | return job, err 53 | } 54 | 55 | func (j *JobController) ListJobByLabels(namespace string, labs client.MatchingLabels) (*batchv1.JobList, error) { 56 | jobList := &batchv1.JobList{} 57 | opts := []client.ListOption{ 58 | client.InNamespace(namespace), 59 | labs, 60 | } 61 | err := j.client.List(context.TODO(), jobList, opts...) 62 | if err != nil { 63 | return nil, err 64 | } 65 | 66 | return jobList, nil 67 | } 68 | -------------------------------------------------------------------------------- /pkg/k8sutil/configmap.go: -------------------------------------------------------------------------------- 1 | package k8sutil 2 | 3 | import ( 4 | "context" 5 | 6 | corev1 "k8s.io/api/core/v1" 7 | "k8s.io/apimachinery/pkg/types" 8 | "sigs.k8s.io/controller-runtime/pkg/client" 9 | ) 10 | 11 | // IConfigMapControl defines the interface that uses to create, update, and delete ConfigMaps. 12 | type IConfigMapControl interface { 13 | // CreateConfigMap creates a ConfigMap in a DistributedRedisCluster. 14 | CreateConfigMap(*corev1.ConfigMap) error 15 | // UpdateConfigMap updates a ConfigMap in a DistributedRedisCluster. 16 | UpdateConfigMap(*corev1.ConfigMap) error 17 | // DeleteConfigMap deletes a ConfigMap in a DistributedRedisCluster. 18 | DeleteConfigMap(*corev1.ConfigMap) error 19 | // GetConfigMap get ConfigMap in a DistributedRedisCluster. 20 | GetConfigMap(namespace, name string) (*corev1.ConfigMap, error) 21 | } 22 | 23 | type ConfigMapController struct { 24 | client client.Client 25 | } 26 | 27 | // NewRealConfigMapControl creates a concrete implementation of the 28 | // IConfigMapControl. 29 | func NewConfigMapController(client client.Client) IConfigMapControl { 30 | return &ConfigMapController{client: client} 31 | } 32 | 33 | // CreateConfigMap implement the IConfigMapControl.Interface. 34 | func (s *ConfigMapController) CreateConfigMap(cm *corev1.ConfigMap) error { 35 | return s.client.Create(context.TODO(), cm) 36 | } 37 | 38 | // UpdateConfigMap implement the IConfigMapControl.Interface. 39 | func (s *ConfigMapController) UpdateConfigMap(cm *corev1.ConfigMap) error { 40 | return s.client.Update(context.TODO(), cm) 41 | } 42 | 43 | // DeleteConfigMap implement the IConfigMapControl.Interface. 44 | func (s *ConfigMapController) DeleteConfigMap(cm *corev1.ConfigMap) error { 45 | return s.client.Delete(context.TODO(), cm) 46 | } 47 | 48 | // GetConfigMap implement the IConfigMapControl.Interface. 49 | func (s *ConfigMapController) GetConfigMap(namespace, name string) (*corev1.ConfigMap, error) { 50 | cm := &corev1.ConfigMap{} 51 | err := s.client.Get(context.TODO(), types.NamespacedName{ 52 | Name: name, 53 | Namespace: namespace, 54 | }, cm) 55 | return cm, err 56 | } 57 | -------------------------------------------------------------------------------- /pkg/k8sutil/customresource.go: -------------------------------------------------------------------------------- 1 | package k8sutil 2 | 3 | import ( 4 | "context" 5 | "k8s.io/apimachinery/pkg/types" 6 | 7 | "k8s.io/apimachinery/pkg/runtime" 8 | "sigs.k8s.io/controller-runtime/pkg/client" 9 | 10 | redisv1alpha1 "github.com/ucloud/redis-cluster-operator/pkg/apis/redis/v1alpha1" 11 | ) 12 | 13 | // ICustomResource defines the interface that uses to update cr status 14 | type ICustomResource interface { 15 | // UpdateCRStatus update the RedisCluster status 16 | UpdateCRStatus(runtime.Object) error 17 | UpdateCR(runtime.Object) error 18 | GetRedisClusterBackup(namespace, name string) (*redisv1alpha1.RedisClusterBackup, error) 19 | GetDistributedRedisCluster(namespace, name string) (*redisv1alpha1.DistributedRedisCluster, error) 20 | } 21 | 22 | type clusterControl struct { 23 | client client.Client 24 | } 25 | 26 | // NewCRControl creates a concrete implementation of the 27 | // ICustomResource. 28 | func NewCRControl(client client.Client) ICustomResource { 29 | return &clusterControl{client: client} 30 | } 31 | 32 | func (c *clusterControl) UpdateCRStatus(obj runtime.Object) error { 33 | return c.client.Status().Update(context.TODO(), obj) 34 | } 35 | 36 | func (c *clusterControl) UpdateCR(obj runtime.Object) error { 37 | return c.client.Update(context.TODO(), obj) 38 | } 39 | 40 | func (c *clusterControl) GetRedisClusterBackup(namespace, name string) (*redisv1alpha1.RedisClusterBackup, error) { 41 | backup := &redisv1alpha1.RedisClusterBackup{} 42 | if err := c.client.Get(context.TODO(), types.NamespacedName{ 43 | Name: name, 44 | Namespace: namespace, 45 | }, backup); err != nil { 46 | return nil, err 47 | } 48 | return backup, nil 49 | } 50 | 51 | func (c *clusterControl) GetDistributedRedisCluster(namespace, name string) (*redisv1alpha1.DistributedRedisCluster, error) { 52 | drc := &redisv1alpha1.DistributedRedisCluster{} 53 | if err := c.client.Get(context.TODO(), types.NamespacedName{ 54 | Name: name, 55 | Namespace: namespace, 56 | }, drc); err != nil { 57 | return nil, err 58 | } 59 | return drc, nil 60 | } 61 | -------------------------------------------------------------------------------- /pkg/k8sutil/pod.go: -------------------------------------------------------------------------------- 1 | package k8sutil 2 | 3 | import ( 4 | "context" 5 | 6 | corev1 "k8s.io/api/core/v1" 7 | "k8s.io/apimachinery/pkg/types" 8 | "sigs.k8s.io/controller-runtime/pkg/client" 9 | ) 10 | 11 | // IPodControl defines the interface that uses to create, update, and delete Pods. 12 | type IPodControl interface { 13 | // CreatePod creates a Pod in a DistributedRedisCluster. 14 | CreatePod(*corev1.Pod) error 15 | // UpdatePod updates a Pod in a DistributedRedisCluster. 16 | UpdatePod(*corev1.Pod) error 17 | // DeletePod deletes a Pod in a DistributedRedisCluster. 18 | DeletePod(*corev1.Pod) error 19 | DeletePodByName(namespace, name string) error 20 | // GetPod get Pod in a DistributedRedisCluster. 21 | GetPod(namespace, name string) (*corev1.Pod, error) 22 | } 23 | 24 | type PodController struct { 25 | client client.Client 26 | } 27 | 28 | // NewPodController creates a concrete implementation of the 29 | // IPodControl. 30 | func NewPodController(client client.Client) IPodControl { 31 | return &PodController{client: client} 32 | } 33 | 34 | // CreatePod implement the IPodControl.Interface. 35 | func (p *PodController) CreatePod(pod *corev1.Pod) error { 36 | return p.client.Create(context.TODO(), pod) 37 | } 38 | 39 | // UpdatePod implement the IPodControl.Interface. 40 | func (p *PodController) UpdatePod(pod *corev1.Pod) error { 41 | return p.client.Update(context.TODO(), pod) 42 | } 43 | 44 | // DeletePod implement the IPodControl.Interface. 45 | func (p *PodController) DeletePod(pod *corev1.Pod) error { 46 | return p.client.Delete(context.TODO(), pod) 47 | } 48 | 49 | // DeletePod implement the IPodControl.Interface. 50 | func (p *PodController) DeletePodByName(namespace, name string) error { 51 | pod, err := p.GetPod(namespace, name) 52 | if err != nil { 53 | return err 54 | } 55 | return p.client.Delete(context.TODO(), pod) 56 | } 57 | 58 | // GetPod implement the IPodControl.Interface. 59 | func (p *PodController) GetPod(namespace, name string) (*corev1.Pod, error) { 60 | pod := &corev1.Pod{} 61 | err := p.client.Get(context.TODO(), types.NamespacedName{ 62 | Name: name, 63 | Namespace: namespace, 64 | }, pod) 65 | return pod, err 66 | } 67 | -------------------------------------------------------------------------------- /pkg/k8sutil/poddisruptionbudget.go: -------------------------------------------------------------------------------- 1 | package k8sutil 2 | 3 | import ( 4 | "context" 5 | 6 | policyv1beta1 "k8s.io/api/policy/v1beta1" 7 | "k8s.io/apimachinery/pkg/types" 8 | "sigs.k8s.io/controller-runtime/pkg/client" 9 | ) 10 | 11 | // IPodDisruptionBudgetControl defines the interface that uses to create, update, and delete PodDisruptionBudgets. 12 | type IPodDisruptionBudgetControl interface { 13 | // CreatePodDisruptionBudget creates a PodDisruptionBudget in a DistributedRedisCluster. 14 | CreatePodDisruptionBudget(*policyv1beta1.PodDisruptionBudget) error 15 | // UpdatePodDisruptionBudget updates a PodDisruptionBudget in a DistributedRedisCluster. 16 | UpdatePodDisruptionBudget(*policyv1beta1.PodDisruptionBudget) error 17 | // DeletePodDisruptionBudget deletes a PodDisruptionBudget in a DistributedRedisCluster. 18 | DeletePodDisruptionBudget(*policyv1beta1.PodDisruptionBudget) error 19 | DeletePodDisruptionBudgetByName(namespace, name string) error 20 | // GetPodDisruptionBudget get PodDisruptionBudget in a DistributedRedisCluster. 21 | GetPodDisruptionBudget(namespace, name string) (*policyv1beta1.PodDisruptionBudget, error) 22 | } 23 | 24 | type PodDisruptionBudgetController struct { 25 | client client.Client 26 | } 27 | 28 | // NewRealPodDisruptionBudgetControl creates a concrete implementation of the 29 | // IPodDisruptionBudgetControl. 30 | func NewPodDisruptionBudgetController(client client.Client) IPodDisruptionBudgetControl { 31 | return &PodDisruptionBudgetController{client: client} 32 | } 33 | 34 | // CreatePodDisruptionBudget implement the IPodDisruptionBudgetControl.Interface. 35 | func (s *PodDisruptionBudgetController) CreatePodDisruptionBudget(pb *policyv1beta1.PodDisruptionBudget) error { 36 | return s.client.Create(context.TODO(), pb) 37 | } 38 | 39 | // UpdatePodDisruptionBudget implement the IPodDisruptionBudgetControl.Interface. 40 | func (s *PodDisruptionBudgetController) UpdatePodDisruptionBudget(pb *policyv1beta1.PodDisruptionBudget) error { 41 | return s.client.Update(context.TODO(), pb) 42 | } 43 | 44 | // DeletePodDisruptionBudget implement the IPodDisruptionBudgetControl.Interface. 45 | func (s *PodDisruptionBudgetController) DeletePodDisruptionBudget(pb *policyv1beta1.PodDisruptionBudget) error { 46 | return s.client.Delete(context.TODO(), pb) 47 | } 48 | 49 | func (s *PodDisruptionBudgetController) DeletePodDisruptionBudgetByName(namespace, name string) error { 50 | pdb, err := s.GetPodDisruptionBudget(namespace, name) 51 | if err != nil { 52 | return err 53 | } 54 | return s.DeletePodDisruptionBudget(pdb) 55 | } 56 | 57 | // GetPodDisruptionBudget implement the IPodDisruptionBudgetControl.Interface. 58 | func (s *PodDisruptionBudgetController) GetPodDisruptionBudget(namespace, name string) (*policyv1beta1.PodDisruptionBudget, error) { 59 | pb := &policyv1beta1.PodDisruptionBudget{} 60 | err := s.client.Get(context.TODO(), types.NamespacedName{ 61 | Name: name, 62 | Namespace: namespace, 63 | }, pb) 64 | return pb, err 65 | } 66 | -------------------------------------------------------------------------------- /pkg/k8sutil/pvc.go: -------------------------------------------------------------------------------- 1 | package k8sutil 2 | 3 | import ( 4 | "context" 5 | 6 | corev1 "k8s.io/api/core/v1" 7 | "k8s.io/apimachinery/pkg/types" 8 | "sigs.k8s.io/controller-runtime/pkg/client" 9 | ) 10 | 11 | // IPvcControl defines the interface that uses to create, update, and delete PersistentVolumeClaim. 12 | type IPvcControl interface { 13 | DeletePvc(claim *corev1.PersistentVolumeClaim) error 14 | DeletePvcByLabels(namespace string, labels map[string]string) error 15 | GetPvc(namespace, name string) (*corev1.PersistentVolumeClaim, error) 16 | } 17 | 18 | type pvcController struct { 19 | client client.Client 20 | } 21 | 22 | // NewPvcController creates a concrete implementation of the 23 | // IPvcControl. 24 | func NewPvcController(client client.Client) IPvcControl { 25 | return &pvcController{client: client} 26 | } 27 | 28 | // DeletePvc implement the IPvcControl.Interface. 29 | func (s *pvcController) DeletePvc(pvc *corev1.PersistentVolumeClaim) error { 30 | return s.client.Delete(context.TODO(), pvc) 31 | } 32 | 33 | func (s *pvcController) DeletePvcByLabels(namespace string, labels map[string]string) error { 34 | foundPvcs := &corev1.PersistentVolumeClaimList{} 35 | err := s.client.List(context.TODO(), foundPvcs, client.InNamespace(namespace), client.MatchingLabels(labels)) 36 | if err != nil { 37 | return err 38 | } 39 | 40 | for _, pvc := range foundPvcs.Items { 41 | if err := s.client.Delete(context.TODO(), &pvc); err != nil { 42 | return err 43 | } 44 | } 45 | return nil 46 | } 47 | 48 | // GetPvc implement the IPvcControl.Interface. 49 | func (s *pvcController) GetPvc(namespace, name string) (*corev1.PersistentVolumeClaim, error) { 50 | pvc := &corev1.PersistentVolumeClaim{} 51 | err := s.client.Get(context.TODO(), types.NamespacedName{ 52 | Name: name, 53 | Namespace: namespace, 54 | }, pvc) 55 | return pvc, err 56 | } 57 | -------------------------------------------------------------------------------- /pkg/k8sutil/service.go: -------------------------------------------------------------------------------- 1 | package k8sutil 2 | 3 | import ( 4 | "context" 5 | 6 | corev1 "k8s.io/api/core/v1" 7 | "k8s.io/apimachinery/pkg/types" 8 | "sigs.k8s.io/controller-runtime/pkg/client" 9 | ) 10 | 11 | // IServiceControl defines the interface that uses to create, update, and delete Services. 12 | type IServiceControl interface { 13 | // CreateService creates a Service in a DistributedRedisCluster. 14 | CreateService(*corev1.Service) error 15 | // UpdateService updates a Service in a DistributedRedisCluster. 16 | UpdateService(*corev1.Service) error 17 | // DeleteService deletes a Service in a DistributedRedisCluster. 18 | DeleteService(*corev1.Service) error 19 | DeleteServiceByName(namespace, name string) error 20 | // GetService get Service in a DistributedRedisCluster. 21 | GetService(namespace, name string) (*corev1.Service, error) 22 | } 23 | 24 | type serviceController struct { 25 | client client.Client 26 | } 27 | 28 | // NewRealServiceControl creates a concrete implementation of the 29 | // IServiceControl. 30 | func NewServiceController(client client.Client) IServiceControl { 31 | return &serviceController{client: client} 32 | } 33 | 34 | // CreateService implement the IServiceControl.Interface. 35 | func (s *serviceController) CreateService(svc *corev1.Service) error { 36 | return s.client.Create(context.TODO(), svc) 37 | } 38 | 39 | // UpdateService implement the IServiceControl.Interface. 40 | func (s *serviceController) UpdateService(svc *corev1.Service) error { 41 | return s.client.Update(context.TODO(), svc) 42 | } 43 | 44 | // DeleteService implement the IServiceControl.Interface. 45 | func (s *serviceController) DeleteService(svc *corev1.Service) error { 46 | return s.client.Delete(context.TODO(), svc) 47 | } 48 | 49 | func (s *serviceController) DeleteServiceByName(namespace, name string) error { 50 | svc, err := s.GetService(namespace, name) 51 | if err != nil { 52 | return err 53 | } 54 | return s.DeleteService(svc) 55 | } 56 | 57 | // GetService implement the IServiceControl.Interface. 58 | func (s *serviceController) GetService(namespace, name string) (*corev1.Service, error) { 59 | svc := &corev1.Service{} 60 | err := s.client.Get(context.TODO(), types.NamespacedName{ 61 | Name: name, 62 | Namespace: namespace, 63 | }, svc) 64 | return svc, err 65 | } 66 | -------------------------------------------------------------------------------- /pkg/k8sutil/statefulset.go: -------------------------------------------------------------------------------- 1 | package k8sutil 2 | 3 | import ( 4 | "context" 5 | 6 | appsv1 "k8s.io/api/apps/v1" 7 | corev1 "k8s.io/api/core/v1" 8 | "k8s.io/apimachinery/pkg/types" 9 | "sigs.k8s.io/controller-runtime/pkg/client" 10 | ) 11 | 12 | // IStatefulSetControl defines the interface that uses to create, update, and delete StatefulSets. 13 | type IStatefulSetControl interface { 14 | // CreateStatefulSet creates a StatefulSet in a DistributedRedisCluster. 15 | CreateStatefulSet(*appsv1.StatefulSet) error 16 | // UpdateStatefulSet updates a StatefulSet in a DistributedRedisCluster. 17 | UpdateStatefulSet(*appsv1.StatefulSet) error 18 | // DeleteStatefulSet deletes a StatefulSet in a DistributedRedisCluster. 19 | DeleteStatefulSet(*appsv1.StatefulSet) error 20 | DeleteStatefulSetByName(namespace, name string) error 21 | // GetStatefulSet get StatefulSet in a DistributedRedisCluster. 22 | GetStatefulSet(namespace, name string) (*appsv1.StatefulSet, error) 23 | ListStatefulSetByLabels(namespace string, labels map[string]string) (*appsv1.StatefulSetList, error) 24 | // GetStatefulSetPods will retrieve the pods managed by a given StatefulSet. 25 | GetStatefulSetPods(namespace, name string) (*corev1.PodList, error) 26 | GetStatefulSetPodsByLabels(namespace string, labels map[string]string) (*corev1.PodList, error) 27 | } 28 | 29 | type stateFulSetController struct { 30 | client client.Client 31 | } 32 | 33 | // NewRealStatefulSetControl creates a concrete implementation of the 34 | // IStatefulSetControl. 35 | func NewStatefulSetController(client client.Client) IStatefulSetControl { 36 | return &stateFulSetController{client: client} 37 | } 38 | 39 | // CreateStatefulSet implement the IStatefulSetControl.Interface. 40 | func (s *stateFulSetController) CreateStatefulSet(ss *appsv1.StatefulSet) error { 41 | return s.client.Create(context.TODO(), ss) 42 | } 43 | 44 | // UpdateStatefulSet implement the IStatefulSetControl.Interface. 45 | func (s *stateFulSetController) UpdateStatefulSet(ss *appsv1.StatefulSet) error { 46 | return s.client.Update(context.TODO(), ss) 47 | } 48 | 49 | // DeleteStatefulSet implement the IStatefulSetControl.Interface. 50 | func (s *stateFulSetController) DeleteStatefulSet(ss *appsv1.StatefulSet) error { 51 | return s.client.Delete(context.TODO(), ss) 52 | } 53 | 54 | func (s *stateFulSetController) DeleteStatefulSetByName(namespace, name string) error { 55 | sts, err := s.GetStatefulSet(namespace, name) 56 | if err != nil { 57 | return err 58 | } 59 | return s.DeleteStatefulSet(sts) 60 | } 61 | 62 | // GetStatefulSet implement the IStatefulSetControl.Interface. 63 | func (s *stateFulSetController) GetStatefulSet(namespace, name string) (*appsv1.StatefulSet, error) { 64 | statefulSet := &appsv1.StatefulSet{} 65 | err := s.client.Get(context.TODO(), types.NamespacedName{ 66 | Name: name, 67 | Namespace: namespace, 68 | }, statefulSet) 69 | return statefulSet, err 70 | } 71 | 72 | // GetStatefulSetPods implement the IStatefulSetControl.Interface. 73 | func (s *stateFulSetController) GetStatefulSetPods(namespace, name string) (*corev1.PodList, error) { 74 | statefulSet, err := s.GetStatefulSet(namespace, name) 75 | if err != nil { 76 | return nil, err 77 | } 78 | 79 | match := make(client.MatchingLabels) 80 | for k, v := range statefulSet.Spec.Selector.MatchLabels { 81 | match[k] = v 82 | } 83 | foundPods := &corev1.PodList{} 84 | err = s.client.List(context.TODO(), foundPods, client.InNamespace(namespace), match) 85 | return foundPods, err 86 | } 87 | 88 | // GetStatefulSetPodsByLabels implement the IStatefulSetControl.Interface. 89 | func (s *stateFulSetController) GetStatefulSetPodsByLabels(namespace string, labels map[string]string) (*corev1.PodList, error) { 90 | foundPods := &corev1.PodList{} 91 | err := s.client.List(context.TODO(), foundPods, client.InNamespace(namespace), client.MatchingLabels(labels)) 92 | return foundPods, err 93 | } 94 | 95 | func (s *stateFulSetController) ListStatefulSetByLabels(namespace string, labels map[string]string) (*appsv1.StatefulSetList, error) { 96 | foundSts := &appsv1.StatefulSetList{} 97 | err := s.client.List(context.TODO(), foundSts, client.InNamespace(namespace), client.MatchingLabels(labels)) 98 | return foundSts, err 99 | } 100 | -------------------------------------------------------------------------------- /pkg/k8sutil/util.go: -------------------------------------------------------------------------------- 1 | package k8sutil 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/go-logr/logr" 7 | corev1 "k8s.io/api/core/v1" 8 | "k8s.io/apimachinery/pkg/api/errors" 9 | kerr "k8s.io/apimachinery/pkg/api/errors" 10 | "k8s.io/apimachinery/pkg/types" 11 | "sigs.k8s.io/controller-runtime/pkg/client" 12 | ) 13 | 14 | func IsRequestRetryable(err error) bool { 15 | return kerr.IsServiceUnavailable(err) || 16 | kerr.IsTimeout(err) || 17 | kerr.IsServerTimeout(err) || 18 | kerr.IsTooManyRequests(err) 19 | } 20 | 21 | func CreateSecret(client client.Client, secret *corev1.Secret, logger logr.Logger) error { 22 | ctx := context.TODO() 23 | s := &corev1.Secret{} 24 | err := client.Get(ctx, types.NamespacedName{ 25 | Namespace: secret.Namespace, 26 | Name: secret.Name, 27 | }, s) 28 | if err != nil { 29 | if errors.IsNotFound(err) { 30 | logger.WithValues("Secret.Namespace", secret.Namespace, "Secret.Name", secret.Name). 31 | Info("creating a new secret") 32 | return client.Create(ctx, secret) 33 | } 34 | } 35 | return err 36 | } 37 | -------------------------------------------------------------------------------- /pkg/osm/rclone.go: -------------------------------------------------------------------------------- 1 | package osm 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | core "k8s.io/api/core/v1" 8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | ktypes "k8s.io/apimachinery/pkg/types" 10 | awsconst "kmodules.xyz/constants/aws" 11 | api "kmodules.xyz/objectstore-api/api/v1" 12 | "sigs.k8s.io/controller-runtime/pkg/client" 13 | ) 14 | 15 | // NewRcloneSecret creates a secret that contains the config file of Rclone. 16 | // So, generally, if this secret is mounted in `etc/rclone`, 17 | // the tree of `/etc/rclone` directory will be similar to, 18 | // 19 | // /etc/rclone 20 | // └── config 21 | func NewRcloneSecret(kc client.Client, name, namespace string, spec api.Backend, ownerReference []metav1.OwnerReference) (*core.Secret, error) { 22 | rcloneCtx, err := newContext(kc, spec, namespace) 23 | if err != nil { 24 | return nil, err 25 | } 26 | 27 | rcloneBytes := []byte(rcloneCtx) 28 | 29 | out := &core.Secret{ 30 | ObjectMeta: metav1.ObjectMeta{ 31 | Name: name, 32 | Namespace: namespace, 33 | OwnerReferences: ownerReference, 34 | }, 35 | Data: map[string][]byte{ 36 | "config": rcloneBytes, 37 | }, 38 | } 39 | return out, nil 40 | } 41 | 42 | func newContext(kc client.Client, spec api.Backend, namespace string) (string, error) { 43 | config := make(map[string][]byte) 44 | if spec.StorageSecretName != "" { 45 | secret := &core.Secret{} 46 | err := kc.Get(context.TODO(), ktypes.NamespacedName{ 47 | Name: spec.StorageSecretName, 48 | Namespace: namespace, 49 | }, secret) 50 | if err != nil { 51 | return "", err 52 | } 53 | config = secret.Data 54 | } 55 | provider, err := spec.Provider() 56 | if err != nil { 57 | return "", err 58 | } 59 | 60 | if spec.S3 != nil { 61 | return cephContext(config, provider, spec), nil 62 | } 63 | if spec.Local != nil { 64 | return localContext(provider), nil 65 | } 66 | 67 | return "", fmt.Errorf("no storage provider is configured") 68 | } 69 | 70 | func cephContext(config map[string][]byte, provider string, spec api.Backend) string { 71 | keyID := config[awsconst.AWS_ACCESS_KEY_ID] 72 | key := config[awsconst.AWS_SECRET_ACCESS_KEY] 73 | 74 | return fmt.Sprintf(`[%s] 75 | type = s3 76 | provider = Ceph 77 | env_auth = false 78 | access_key_id = %s 79 | secret_access_key = %s 80 | region = 81 | endpoint = %s 82 | location_constraint = 83 | acl = 84 | server_side_encryption = 85 | storage_class = 86 | `, provider, keyID, key, spec.S3.Endpoint) 87 | } 88 | 89 | func localContext(provider string) string { 90 | return fmt.Sprintf(`[%s] 91 | type = local 92 | `, provider) 93 | } 94 | -------------------------------------------------------------------------------- /pkg/redisutil/client.go: -------------------------------------------------------------------------------- 1 | package redisutil 2 | 3 | import ( 4 | "strings" 5 | "time" 6 | 7 | "github.com/mediocregopher/radix.v2/redis" 8 | ) 9 | 10 | // IClient redis client interface 11 | type IClient interface { 12 | // Close closes the connection. 13 | Close() error 14 | 15 | // Cmd calls the given Redis command. 16 | Cmd(cmd string, args ...interface{}) *redis.Resp 17 | 18 | // PipeAppend adds the given call to the pipeline queue. 19 | // Use PipeResp() to read the response. 20 | PipeAppend(cmd string, args ...interface{}) 21 | 22 | // PipeResp returns the reply for the next request in the pipeline queue. Err 23 | // with ErrPipelineEmpty is returned if the pipeline queue is empty. 24 | PipeResp() *redis.Resp 25 | 26 | // PipeClear clears the contents of the current pipeline queue, both commands 27 | // queued by PipeAppend which have yet to be sent and responses which have yet 28 | // to be retrieved through PipeResp. The first returned int will be the number 29 | // of pending commands dropped, the second will be the number of pending 30 | // responses dropped 31 | PipeClear() (int, int) 32 | 33 | // ReadResp will read a Resp off of the connection without sending anything 34 | // first (useful after you've sent a SUSBSCRIBE command). This will block until 35 | // a reply is received or the timeout is reached (returning the IOErr). You can 36 | // use IsTimeout to check if the Resp is due to a Timeout 37 | // 38 | // Note: this is a more low-level function, you really shouldn't have to 39 | // actually use it unless you're writing your own pub/sub code 40 | ReadResp() *redis.Resp 41 | } 42 | 43 | // Client structure representing a client connection to redis 44 | type Client struct { 45 | commandsMapping map[string]string 46 | client *redis.Client 47 | } 48 | 49 | // NewClient build a client connection and connect to a redis address 50 | func NewClient(addr, password string, cnxTimeout time.Duration, commandsMapping map[string]string) (IClient, error) { 51 | var err error 52 | c := &Client{ 53 | commandsMapping: commandsMapping, 54 | } 55 | 56 | c.client, err = redis.DialTimeout("tcp", addr, cnxTimeout) 57 | if err != nil { 58 | return c, err 59 | } 60 | if password != "" { 61 | err = c.client.Cmd("AUTH", password).Err 62 | } 63 | return c, err 64 | } 65 | 66 | // Close closes the connection. 67 | func (c *Client) Close() error { 68 | return c.client.Close() 69 | } 70 | 71 | // Cmd calls the given Redis command. 72 | func (c *Client) Cmd(cmd string, args ...interface{}) *redis.Resp { 73 | return c.client.Cmd(c.getCommand(cmd), args) 74 | } 75 | 76 | // getCommand return the command name after applying rename-command 77 | func (c *Client) getCommand(cmd string) string { 78 | upperCmd := strings.ToUpper(cmd) 79 | if renamed, found := c.commandsMapping[upperCmd]; found { 80 | return renamed 81 | } 82 | return upperCmd 83 | } 84 | 85 | // PipeAppend adds the given call to the pipeline queue. 86 | func (c *Client) PipeAppend(cmd string, args ...interface{}) { 87 | c.client.PipeAppend(c.getCommand(cmd), args) 88 | } 89 | 90 | // PipeResp returns the reply for the next request in the pipeline queue. Err 91 | func (c *Client) PipeResp() *redis.Resp { 92 | return c.client.PipeResp() 93 | } 94 | 95 | // PipeClear clears the contents of the current pipeline queue 96 | func (c *Client) PipeClear() (int, int) { 97 | return c.client.PipeClear() 98 | } 99 | 100 | // ReadResp will read a Resp off of the connection without sending anything 101 | func (c *Client) ReadResp() *redis.Resp { 102 | return c.client.ReadResp() 103 | } 104 | -------------------------------------------------------------------------------- /pkg/redisutil/cluster.go: -------------------------------------------------------------------------------- 1 | package redisutil 2 | 3 | import ( 4 | redisv1alpha1 "github.com/ucloud/redis-cluster-operator/pkg/apis/redis/v1alpha1" 5 | ) 6 | 7 | // Cluster represents a Redis Cluster 8 | type Cluster struct { 9 | Name string 10 | Namespace string 11 | Nodes map[string]*Node 12 | Status redisv1alpha1.ClusterStatus 13 | NodesPlacement redisv1alpha1.NodesPlacementInfo 14 | ActionsInfo ClusterActionsInfo 15 | } 16 | 17 | // ClusterActionsInfo use to store information about current action on the Cluster 18 | type ClusterActionsInfo struct { 19 | NbslotsToMigrate int32 20 | } 21 | 22 | // NewCluster builds and returns new Cluster instance 23 | func NewCluster(name, namespace string) *Cluster { 24 | c := &Cluster{ 25 | Name: name, 26 | Namespace: namespace, 27 | Nodes: make(map[string]*Node), 28 | } 29 | 30 | return c 31 | } 32 | 33 | // AddNode used to add new Node in the cluster 34 | // if node with the same ID is already present in the cluster 35 | // the previous Node is replaced 36 | func (c *Cluster) AddNode(node *Node) { 37 | if n, ok := c.Nodes[node.ID]; ok { 38 | n.Clear() 39 | } 40 | 41 | c.Nodes[node.ID] = node 42 | } 43 | 44 | // GetNodeByID returns a Cluster Node by its ID 45 | // if not present in the cluster return an error 46 | func (c *Cluster) GetNodeByID(id string) (*Node, error) { 47 | if n, ok := c.Nodes[id]; ok { 48 | return n, nil 49 | } 50 | return nil, nodeNotFoundedError 51 | } 52 | 53 | // GetNodeByIP returns a Cluster Node by its ID 54 | // if not present in the cluster return an error 55 | func (c *Cluster) GetNodeByIP(ip string) (*Node, error) { 56 | findFunc := func(node *Node) bool { 57 | return node.IP == ip 58 | } 59 | 60 | return c.GetNodeByFunc(findFunc) 61 | } 62 | 63 | // GetNodeByPodName returns a Cluster Node by its Pod name 64 | // if not present in the cluster return an error 65 | func (c *Cluster) GetNodeByPodName(name string) (*Node, error) { 66 | findFunc := func(node *Node) bool { 67 | if node.PodName == name { 68 | return true 69 | } 70 | return false 71 | } 72 | 73 | return c.GetNodeByFunc(findFunc) 74 | } 75 | 76 | // GetNodeByFunc returns first node found by the FindNodeFunc 77 | func (c *Cluster) GetNodeByFunc(f FindNodeFunc) (*Node, error) { 78 | for _, n := range c.Nodes { 79 | if f(n) { 80 | return n, nil 81 | } 82 | } 83 | return nil, nodeNotFoundedError 84 | } 85 | 86 | // GetNodesByFunc returns first node found by the FindNodeFunc 87 | func (c *Cluster) GetNodesByFunc(f FindNodeFunc) (Nodes, error) { 88 | nodes := Nodes{} 89 | for _, n := range c.Nodes { 90 | if f(n) { 91 | nodes = append(nodes, n) 92 | } 93 | } 94 | if len(nodes) == 0 { 95 | return nodes, nodeNotFoundedError 96 | } 97 | return nodes, nil 98 | } 99 | 100 | // FindNodeFunc function for finding a Node 101 | // it is use as input for GetNodeByFunc and GetNodesByFunc 102 | type FindNodeFunc func(node *Node) bool 103 | 104 | // ToAPIClusterStatus convert the Cluster information to a api 105 | //func (c *Cluster) ToAPIClusterStatus() redisv1alpha1.RedisClusterStatus { 106 | // status := redisv1alpha1.RedisClusterClusterStatus{} 107 | // status.Status = c.Status 108 | // for _, node := range c.Nodes { 109 | // status.Nodes = append(status.Nodes, node.ToAPINode()) 110 | // } 111 | // return status 112 | //} 113 | -------------------------------------------------------------------------------- /pkg/redisutil/errors.go: -------------------------------------------------------------------------------- 1 | package redisutil 2 | 3 | import "fmt" 4 | 5 | // Error used to represent an error 6 | type Error string 7 | 8 | func (e Error) Error() string { return string(e) } 9 | 10 | // nodeNotFoundedError returns when a node is not present in the cluster 11 | const nodeNotFoundedError = Error("node not founded") 12 | 13 | // IsNodeNotFoundedError returns true if the current error is a NodeNotFoundedError 14 | func IsNodeNotFoundedError(err error) bool { 15 | return err == nodeNotFoundedError 16 | } 17 | 18 | // ClusterInfosError error type for redis cluster infos access 19 | type ClusterInfosError struct { 20 | errs map[string]error 21 | partial bool 22 | inconsistent bool 23 | } 24 | 25 | // NewClusterInfosError returns an instance of cluster infos error 26 | func NewClusterInfosError() ClusterInfosError { 27 | return ClusterInfosError{ 28 | errs: make(map[string]error), 29 | partial: false, 30 | inconsistent: false, 31 | } 32 | } 33 | 34 | // Error error string 35 | func (e ClusterInfosError) Error() string { 36 | s := "" 37 | if e.partial { 38 | s += "Cluster infos partial: " 39 | for addr, err := range e.errs { 40 | s += fmt.Sprintf("%s: '%s'", addr, err) 41 | } 42 | return s 43 | } 44 | if e.inconsistent { 45 | s += "Cluster view is inconsistent" 46 | } 47 | return s 48 | } 49 | 50 | // Partial true if the some nodes of the cluster didn't answer 51 | func (e ClusterInfosError) Partial() bool { 52 | return e.partial 53 | } 54 | 55 | // Inconsistent true if the nodes do not agree with each other 56 | func (e ClusterInfosError) Inconsistent() bool { 57 | return e.inconsistent 58 | } 59 | 60 | // IsPartialError returns true if the error is due to partial data recovery 61 | func IsPartialError(err error) bool { 62 | e, ok := err.(ClusterInfosError) 63 | return ok && e.Partial() 64 | } 65 | 66 | // IsInconsistentError eturns true if the error is due to cluster inconsistencies 67 | func IsInconsistentError(err error) bool { 68 | e, ok := err.(ClusterInfosError) 69 | return ok && e.Inconsistent() 70 | } 71 | -------------------------------------------------------------------------------- /pkg/redisutil/node_test.go: -------------------------------------------------------------------------------- 1 | package redisutil 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | ) 7 | 8 | func TestNodes_SortByFunc(t *testing.T) { 9 | n1 := Node{ 10 | ID: "n1", 11 | IP: "10.1.1.1", 12 | Port: "", 13 | Role: "master", 14 | balance: 1365, 15 | } 16 | n2 := Node{ 17 | ID: "n2", 18 | IP: "10.1.1.2", 19 | Port: "", 20 | Role: "master", 21 | balance: 1366, 22 | } 23 | n3 := Node{ 24 | ID: "n3", 25 | IP: "10.1.1.3", 26 | Port: "", 27 | Role: "master", 28 | balance: 1365, 29 | } 30 | n4 := Node{ 31 | ID: "n4", 32 | IP: "10.1.1.4", 33 | Port: "", 34 | Role: "master", 35 | balance: -4096, 36 | } 37 | type args struct { 38 | less func(*Node, *Node) bool 39 | } 40 | tests := []struct { 41 | name string 42 | n Nodes 43 | args args 44 | want Nodes 45 | }{ 46 | { 47 | name: "asc by balance", 48 | n: Nodes{&n1, &n2, &n3, &n4}, 49 | args: args{less: func(a, b *Node) bool { return a.Balance() < b.Balance() }}, 50 | want: Nodes{&n4, &n1, &n3, &n2}, 51 | }, 52 | } 53 | for _, tt := range tests { 54 | t.Run(tt.name, func(t *testing.T) { 55 | if got := tt.n.SortByFunc(tt.args.less); !reflect.DeepEqual(got, tt.want) { 56 | t.Errorf("SortByFunc() = %v, want %v", got, tt.want) 57 | } 58 | }) 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /pkg/redisutil/slot_test.go: -------------------------------------------------------------------------------- 1 | package redisutil 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | ) 7 | 8 | func TestRemoveSlots(t *testing.T) { 9 | type args struct { 10 | slots []Slot 11 | removedSlots []Slot 12 | } 13 | tests := []struct { 14 | name string 15 | args args 16 | want []Slot 17 | }{ 18 | { 19 | name: "1", 20 | args: args{ 21 | slots: []Slot{2, 3, 4, 5, 6, 7, 8, 9, 10}, 22 | removedSlots: []Slot{2, 10}, 23 | }, 24 | want: []Slot{3, 4, 5, 6, 7, 8, 9}, 25 | }, 26 | { 27 | name: "2", 28 | args: args{ 29 | slots: []Slot{2, 5}, 30 | removedSlots: []Slot{2, 2, 3}, 31 | }, 32 | want: []Slot{5}, 33 | }, 34 | { 35 | name: "3", 36 | args: args{ 37 | slots: []Slot{0, 1, 3, 4}, 38 | removedSlots: []Slot{0, 1, 3, 4}, 39 | }, 40 | want: []Slot{}, 41 | }, 42 | { 43 | name: "4", 44 | args: args{ 45 | slots: []Slot{}, 46 | removedSlots: []Slot{2, 10}, 47 | }, 48 | want: []Slot{}, 49 | }, 50 | { 51 | name: "5", 52 | args: args{ 53 | slots: []Slot{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, 54 | removedSlots: []Slot{5}, 55 | }, 56 | want: []Slot{0, 1, 2, 3, 4, 6, 7, 8, 9, 10}, 57 | }, 58 | } 59 | for _, tt := range tests { 60 | t.Run(tt.name, func(t *testing.T) { 61 | if got := RemoveSlots(tt.args.slots, tt.args.removedSlots); !reflect.DeepEqual(got, tt.want) { 62 | t.Errorf("RemoveSlots() = %v, want %v", got, tt.want) 63 | } 64 | }) 65 | } 66 | } 67 | 68 | func TestRemoveSlot(t *testing.T) { 69 | type args struct { 70 | slots []Slot 71 | removedSlot Slot 72 | } 73 | tests := []struct { 74 | name string 75 | args args 76 | want []Slot 77 | }{ 78 | { 79 | name: "1", 80 | args: args{ 81 | slots: []Slot{2, 3, 4, 5, 6, 7, 8, 9, 10}, 82 | removedSlot: 2, 83 | }, 84 | want: []Slot{3, 4, 5, 6, 7, 8, 9, 10}, 85 | }, 86 | { 87 | name: "2", 88 | args: args{ 89 | slots: []Slot{2, 5}, 90 | removedSlot: 2, 91 | }, 92 | want: []Slot{5}, 93 | }, 94 | { 95 | name: "3", 96 | args: args{ 97 | slots: []Slot{0, 1, 3, 4}, 98 | removedSlot: 3, 99 | }, 100 | want: []Slot{0, 1, 4}, 101 | }, 102 | { 103 | name: "4", 104 | args: args{ 105 | slots: []Slot{}, 106 | removedSlot: 2, 107 | }, 108 | want: []Slot{}, 109 | }, 110 | { 111 | name: "5", 112 | args: args{ 113 | slots: []Slot{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, 114 | removedSlot: 5, 115 | }, 116 | want: []Slot{0, 1, 2, 3, 4, 6, 7, 8, 9, 10}, 117 | }, 118 | } 119 | for _, tt := range tests { 120 | t.Run(tt.name, func(t *testing.T) { 121 | if got := RemoveSlot(tt.args.slots, tt.args.removedSlot); !reflect.DeepEqual(got, tt.want) { 122 | t.Errorf("RemoveSlot() = %v, want %v", got, tt.want) 123 | } 124 | }) 125 | } 126 | } 127 | -------------------------------------------------------------------------------- /pkg/resources/configmaps/configmap.go: -------------------------------------------------------------------------------- 1 | package configmaps 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "sort" 7 | "strconv" 8 | 9 | corev1 "k8s.io/api/core/v1" 10 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 11 | 12 | redisv1alpha1 "github.com/ucloud/redis-cluster-operator/pkg/apis/redis/v1alpha1" 13 | ) 14 | 15 | const ( 16 | RestoreSucceeded = "succeeded" 17 | 18 | RedisConfKey = "redis.conf" 19 | ) 20 | 21 | // NewConfigMapForCR creates a new ConfigMap for the given Cluster 22 | func NewConfigMapForCR(cluster *redisv1alpha1.DistributedRedisCluster, labels map[string]string) *corev1.ConfigMap { 23 | // Do CLUSTER FAILOVER when master down 24 | shutdownContent := `#!/bin/sh 25 | CLUSTER_CONFIG="/data/nodes.conf" 26 | failover() { 27 | echo "Do CLUSTER FAILOVER" 28 | masterID=$(cat ${CLUSTER_CONFIG} | grep "myself" | awk '{print $1}') 29 | echo "Master: ${masterID}" 30 | slave=$(cat ${CLUSTER_CONFIG} | grep ${masterID} | grep "slave" | awk 'NR==1{print $2}' | sed 's/:6379@16379//') 31 | echo "Slave: ${slave}" 32 | password=$(cat /data/redis_password) 33 | if [[ -z "${password}" ]]; then 34 | redis-cli -h ${slave} CLUSTER FAILOVER 35 | else 36 | redis-cli -h ${slave} -a "${password}" CLUSTER FAILOVER 37 | fi 38 | echo "Wait for MASTER <-> SLAVE syncFinished" 39 | sleep 20 40 | } 41 | if [ -f ${CLUSTER_CONFIG} ]; then 42 | cat ${CLUSTER_CONFIG} | grep "myself" | grep "master" && \ 43 | failover 44 | fi` 45 | 46 | // Fixed Nodes.conf does not update IP address of a node when IP changes after restart, 47 | // see more https://github.com/antirez/redis/issues/4645. 48 | fixIPContent := `#!/bin/sh 49 | CLUSTER_CONFIG="/data/nodes.conf" 50 | if [ -f ${CLUSTER_CONFIG} ]; then 51 | if [ -z "${POD_IP}" ]; then 52 | echo "Unable to determine Pod IP address!" 53 | exit 1 54 | fi 55 | echo "Updating my IP to ${POD_IP} in ${CLUSTER_CONFIG}" 56 | sed -i.bak -e "/myself/ s/ .*:6379@16379/ ${POD_IP}:6379@16379/" ${CLUSTER_CONFIG} 57 | fi 58 | exec "$@"` 59 | 60 | redisConfContent := generateRedisConfContent(cluster.Spec.Config) 61 | 62 | return &corev1.ConfigMap{ 63 | ObjectMeta: metav1.ObjectMeta{ 64 | Name: RedisConfigMapName(cluster.Name), 65 | Namespace: cluster.Namespace, 66 | Labels: labels, 67 | OwnerReferences: redisv1alpha1.DefaultOwnerReferences(cluster), 68 | }, 69 | Data: map[string]string{ 70 | "shutdown.sh": shutdownContent, 71 | "fix-ip.sh": fixIPContent, 72 | RedisConfKey: redisConfContent, 73 | }, 74 | } 75 | } 76 | 77 | func generateRedisConfContent(configMap map[string]string) string { 78 | if configMap == nil { 79 | return "" 80 | } 81 | 82 | var buffer bytes.Buffer 83 | 84 | keys := make([]string, 0, len(configMap)) 85 | for k := range configMap { 86 | keys = append(keys, k) 87 | } 88 | sort.Strings(keys) 89 | 90 | for _, k := range keys { 91 | v := configMap[k] 92 | if len(v) == 0 { 93 | continue 94 | } 95 | buffer.WriteString(fmt.Sprintf("%s %s", k, v)) 96 | buffer.WriteString("\n") 97 | } 98 | 99 | return buffer.String() 100 | } 101 | 102 | func RedisConfigMapName(clusterName string) string { 103 | return fmt.Sprintf("%s-%s", "redis-cluster", clusterName) 104 | } 105 | 106 | func NewConfigMapForRestore(cluster *redisv1alpha1.DistributedRedisCluster, labels map[string]string) *corev1.ConfigMap { 107 | return &corev1.ConfigMap{ 108 | ObjectMeta: metav1.ObjectMeta{ 109 | Name: RestoreConfigMapName(cluster.Name), 110 | Namespace: cluster.Namespace, 111 | Labels: labels, 112 | OwnerReferences: redisv1alpha1.DefaultOwnerReferences(cluster), 113 | }, 114 | Data: map[string]string{ 115 | RestoreSucceeded: strconv.Itoa(0), 116 | }, 117 | } 118 | } 119 | 120 | func RestoreConfigMapName(clusterName string) string { 121 | return fmt.Sprintf("%s-%s", "rediscluster-restore", clusterName) 122 | } 123 | -------------------------------------------------------------------------------- /pkg/resources/configmaps/configmap_test.go: -------------------------------------------------------------------------------- 1 | package configmaps 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | func Test_generateRedisConfContent(t *testing.T) { 8 | confMap := map[string]string{ 9 | "activerehashing": "yes", 10 | "appendfsync": "everysec", 11 | "appendonly": "yes", 12 | "auto-aof-rewrite-min-size": "67108864", 13 | "auto-aof-rewrite-percentage": "100", 14 | "cluster-node-timeout": "15000", 15 | "cluster-require-full-coverage": "yes", 16 | "hash-max-ziplist-entries": "512", 17 | "hash-max-ziplist-value": "64", 18 | "hll-sparse-max-bytes": "3000", 19 | "list-compress-depth": "0", 20 | "maxmemory": "1000000000", 21 | "maxmemory-policy": "noeviction", 22 | "maxmemory-samples": "5", 23 | "no-appendfsync-on-rewrite": "no", 24 | "notify-keyspace-events": "", 25 | "repl-backlog-size": "1048576", 26 | "repl-backlog-ttl": "3600", 27 | "set-max-intset-entries": "512", 28 | "slowlog-log-slower-than": "10000", 29 | "slowlog-max-len": "128", 30 | "stop-writes-on-bgsave-error": "yes", 31 | "tcp-keepalive": "0", 32 | "timeout": "0", 33 | "zset-max-ziplist-entries": "128", 34 | "zset-max-ziplist-value": "64", 35 | } 36 | want := `activerehashing yes 37 | appendfsync everysec 38 | appendonly yes 39 | auto-aof-rewrite-min-size 67108864 40 | auto-aof-rewrite-percentage 100 41 | cluster-node-timeout 15000 42 | cluster-require-full-coverage yes 43 | hash-max-ziplist-entries 512 44 | hash-max-ziplist-value 64 45 | hll-sparse-max-bytes 3000 46 | list-compress-depth 0 47 | maxmemory 1000000000 48 | maxmemory-policy noeviction 49 | maxmemory-samples 5 50 | no-appendfsync-on-rewrite no 51 | repl-backlog-size 1048576 52 | repl-backlog-ttl 3600 53 | set-max-intset-entries 512 54 | slowlog-log-slower-than 10000 55 | slowlog-max-len 128 56 | stop-writes-on-bgsave-error yes 57 | tcp-keepalive 0 58 | timeout 0 59 | zset-max-ziplist-entries 128 60 | zset-max-ziplist-value 64 61 | ` 62 | type args struct { 63 | configMap map[string]string 64 | } 65 | tests := []struct { 66 | name string 67 | args args 68 | want string 69 | }{ 70 | { 71 | name: "test", 72 | args: struct{ configMap map[string]string }{configMap: confMap}, 73 | want: want, 74 | }, 75 | } 76 | for _, tt := range tests { 77 | t.Run(tt.name, func(t *testing.T) { 78 | if got := generateRedisConfContent(tt.args.configMap); got != tt.want { 79 | t.Errorf("generateRedisConfContent()\n[%v], want\n[%v]", got, tt.want) 80 | } 81 | }) 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /pkg/resources/poddisruptionbudgets/poddisruptionbudget.go: -------------------------------------------------------------------------------- 1 | package poddisruptionbudgets 2 | 3 | import ( 4 | policyv1beta1 "k8s.io/api/policy/v1beta1" 5 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 6 | "k8s.io/apimachinery/pkg/util/intstr" 7 | 8 | redisv1alpha1 "github.com/ucloud/redis-cluster-operator/pkg/apis/redis/v1alpha1" 9 | ) 10 | 11 | func NewPodDisruptionBudgetForCR(cluster *redisv1alpha1.DistributedRedisCluster, name string, labels map[string]string) *policyv1beta1.PodDisruptionBudget { 12 | maxUnavailable := intstr.FromInt(1) 13 | 14 | return &policyv1beta1.PodDisruptionBudget{ 15 | ObjectMeta: metav1.ObjectMeta{ 16 | Labels: labels, 17 | Name: name, 18 | Namespace: cluster.Namespace, 19 | OwnerReferences: redisv1alpha1.DefaultOwnerReferences(cluster), 20 | }, 21 | Spec: policyv1beta1.PodDisruptionBudgetSpec{ 22 | MaxUnavailable: &maxUnavailable, 23 | Selector: &metav1.LabelSelector{ 24 | MatchLabels: labels, 25 | }, 26 | }, 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /pkg/resources/services/service.go: -------------------------------------------------------------------------------- 1 | package services 2 | 3 | import ( 4 | corev1 "k8s.io/api/core/v1" 5 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 6 | 7 | redisv1alpha1 "github.com/ucloud/redis-cluster-operator/pkg/apis/redis/v1alpha1" 8 | ) 9 | 10 | // NewHeadLessSvcForCR creates a new headless service for the given Cluster. 11 | func NewHeadLessSvcForCR(cluster *redisv1alpha1.DistributedRedisCluster, name string, labels map[string]string) *corev1.Service { 12 | clientPort := corev1.ServicePort{Name: "client", Port: 6379} 13 | gossipPort := corev1.ServicePort{Name: "gossip", Port: 16379} 14 | svc := &corev1.Service{ 15 | ObjectMeta: metav1.ObjectMeta{ 16 | Labels: labels, 17 | Name: name, 18 | Namespace: cluster.Namespace, 19 | OwnerReferences: redisv1alpha1.DefaultOwnerReferences(cluster), 20 | }, 21 | Spec: corev1.ServiceSpec{ 22 | Ports: []corev1.ServicePort{clientPort, gossipPort}, 23 | Selector: labels, 24 | ClusterIP: corev1.ClusterIPNone, 25 | }, 26 | } 27 | 28 | return svc 29 | } 30 | 31 | func NewSvcForCR(cluster *redisv1alpha1.DistributedRedisCluster, name string, labels map[string]string) *corev1.Service { 32 | var ports []corev1.ServicePort 33 | clientPort := corev1.ServicePort{Name: "client", Port: 6379} 34 | gossipPort := corev1.ServicePort{Name: "gossip", Port: 16379} 35 | if cluster.Spec.Monitor == nil { 36 | ports = append(ports, clientPort, gossipPort) 37 | } else { 38 | ports = append(ports, clientPort, gossipPort, 39 | corev1.ServicePort{Name: "prom-http", Port: cluster.Spec.Monitor.Prometheus.Port}) 40 | } 41 | 42 | svc := &corev1.Service{ 43 | ObjectMeta: metav1.ObjectMeta{ 44 | Labels: labels, 45 | Name: name, 46 | Namespace: cluster.Namespace, 47 | OwnerReferences: redisv1alpha1.DefaultOwnerReferences(cluster), 48 | }, 49 | Spec: corev1.ServiceSpec{ 50 | Ports: ports, 51 | Selector: labels, 52 | }, 53 | } 54 | 55 | return svc 56 | } 57 | -------------------------------------------------------------------------------- /pkg/resources/statefulsets/helper.go: -------------------------------------------------------------------------------- 1 | package statefulsets 2 | 3 | import ( 4 | "context" 5 | 6 | appsv1 "k8s.io/api/apps/v1" 7 | corev1 "k8s.io/api/core/v1" 8 | "k8s.io/apimachinery/pkg/types" 9 | "sigs.k8s.io/controller-runtime/pkg/client" 10 | 11 | redisv1alpha1 "github.com/ucloud/redis-cluster-operator/pkg/apis/redis/v1alpha1" 12 | ) 13 | 14 | const passwordKey = "password" 15 | 16 | // IsPasswordChanged determine whether the password is changed. 17 | func IsPasswordChanged(cluster *redisv1alpha1.DistributedRedisCluster, sts *appsv1.StatefulSet) bool { 18 | if cluster.Spec.PasswordSecret != nil { 19 | envSet := sts.Spec.Template.Spec.Containers[0].Env 20 | secretName := getSecretKeyRefByKey(redisv1alpha1.PasswordENV, envSet) 21 | if secretName == "" { 22 | return true 23 | } 24 | if secretName != cluster.Spec.PasswordSecret.Name { 25 | return true 26 | } 27 | } 28 | return false 29 | } 30 | 31 | func getSecretKeyRefByKey(key string, envSet []corev1.EnvVar) string { 32 | for _, value := range envSet { 33 | if key == value.Name { 34 | if value.ValueFrom != nil && value.ValueFrom.SecretKeyRef != nil { 35 | return value.ValueFrom.SecretKeyRef.Name 36 | } 37 | } 38 | } 39 | return "" 40 | } 41 | 42 | // GetOldRedisClusterPassword return old redis cluster's password. 43 | func GetOldRedisClusterPassword(client client.Client, sts *appsv1.StatefulSet) (string, error) { 44 | envSet := sts.Spec.Template.Spec.Containers[0].Env 45 | secretName := getSecretKeyRefByKey(redisv1alpha1.PasswordENV, envSet) 46 | if secretName == "" { 47 | return "", nil 48 | } 49 | secret := &corev1.Secret{} 50 | err := client.Get(context.TODO(), types.NamespacedName{ 51 | Name: secretName, 52 | Namespace: sts.Namespace, 53 | }, secret) 54 | if err != nil { 55 | return "", err 56 | } 57 | return string(secret.Data[passwordKey]), nil 58 | } 59 | 60 | // GetClusterPassword return current redis cluster's password. 61 | func GetClusterPassword(client client.Client, cluster *redisv1alpha1.DistributedRedisCluster) (string, error) { 62 | if cluster.Spec.PasswordSecret == nil { 63 | return "", nil 64 | } 65 | secret := &corev1.Secret{} 66 | err := client.Get(context.TODO(), types.NamespacedName{ 67 | Name: cluster.Spec.PasswordSecret.Name, 68 | Namespace: cluster.Namespace, 69 | }, secret) 70 | if err != nil { 71 | return "", err 72 | } 73 | return string(secret.Data[passwordKey]), nil 74 | } 75 | -------------------------------------------------------------------------------- /pkg/resources/statefulsets/statefulset_test.go: -------------------------------------------------------------------------------- 1 | package statefulsets 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | 7 | corev1 "k8s.io/api/core/v1" 8 | ) 9 | 10 | func Test_mergeRenameCmds(t *testing.T) { 11 | type args struct { 12 | userCmds []string 13 | systemRenameCmdMap map[string]string 14 | } 15 | tests := []struct { 16 | name string 17 | args args 18 | want []string 19 | }{ 20 | { 21 | name: "test No intersection", 22 | args: args{ 23 | userCmds: []string{ 24 | "--maxmemory 2gb", 25 | "--rename-command BGSAVE pp14qluk", 26 | "--rename-command CONFIG lni07z1p", 27 | }, 28 | systemRenameCmdMap: map[string]string{ 29 | "SAVE": "6on30p6z", 30 | "DEBUG": "8a4insyv", 31 | }, 32 | }, 33 | want: []string{ 34 | "--maxmemory 2gb", 35 | "--rename-command BGSAVE pp14qluk", 36 | "--rename-command CONFIG lni07z1p", 37 | "--rename-command DEBUG 8a4insyv", 38 | "--rename-command SAVE 6on30p6z", 39 | }, 40 | }, 41 | { 42 | name: "test intersection", 43 | args: args{ 44 | userCmds: []string{ 45 | "--rename-command BGSAVE pp14qluk", 46 | "--rename-command CONFIG lni07z1p", 47 | }, 48 | systemRenameCmdMap: map[string]string{ 49 | "BGSAVE": "fadfgad", 50 | "SAVE": "6on30p6z", 51 | "DEBUG": "8a4insyv", 52 | }, 53 | }, 54 | want: []string{ 55 | "--rename-command CONFIG lni07z1p", 56 | "--rename-command BGSAVE fadfgad", 57 | "--rename-command DEBUG 8a4insyv", 58 | "--rename-command SAVE 6on30p6z", 59 | }, 60 | }, 61 | { 62 | name: "test complex", 63 | args: args{ 64 | userCmds: []string{ 65 | "--maxmemory 2gb", 66 | "--rename-command BGSAVE pp14qluk", 67 | "--rename-command CONFIG lni07z1p", 68 | `--rename-command FLUSHALL ""`, 69 | }, 70 | systemRenameCmdMap: map[string]string{ 71 | "BGSAVE": "fadfgad", 72 | "SAVE": "6on30p6z", 73 | "DEBUG": "8a4insyv", 74 | }, 75 | }, 76 | want: []string{ 77 | "--maxmemory 2gb", 78 | "--rename-command CONFIG lni07z1p", 79 | `--rename-command FLUSHALL ""`, 80 | "--rename-command BGSAVE fadfgad", 81 | "--rename-command DEBUG 8a4insyv", 82 | "--rename-command SAVE 6on30p6z", 83 | }, 84 | }, 85 | } 86 | for _, tt := range tests { 87 | t.Run(tt.name, func(t *testing.T) { 88 | if got := mergeRenameCmds(tt.args.userCmds, tt.args.systemRenameCmdMap); !reflect.DeepEqual(got, tt.want) { 89 | t.Errorf("mergeRenameCmds() = %v, want %v", got, tt.want) 90 | } 91 | }) 92 | } 93 | } 94 | 95 | func Test_customContainerEnv(t *testing.T) { 96 | type args struct { 97 | env []corev1.EnvVar 98 | customEnv []corev1.EnvVar 99 | } 100 | tests := []struct { 101 | name string 102 | args args 103 | want []corev1.EnvVar 104 | }{ 105 | { 106 | name: "nil all", 107 | args: args{ 108 | env: nil, 109 | customEnv: nil, 110 | }, 111 | want: nil, 112 | }, 113 | { 114 | name: "nil env", 115 | args: args{ 116 | env: nil, 117 | customEnv: []corev1.EnvVar{{ 118 | Name: "foo", 119 | Value: "", 120 | ValueFrom: nil, 121 | }}, 122 | }, 123 | want: []corev1.EnvVar{{ 124 | Name: "foo", 125 | Value: "", 126 | ValueFrom: nil, 127 | }}, 128 | }, 129 | { 130 | name: "nil custom env", 131 | args: args{ 132 | customEnv: nil, 133 | env: []corev1.EnvVar{{ 134 | Name: "foo", 135 | Value: "", 136 | ValueFrom: nil, 137 | }}, 138 | }, 139 | want: []corev1.EnvVar{{ 140 | Name: "foo", 141 | Value: "", 142 | ValueFrom: nil, 143 | }}, 144 | }, 145 | { 146 | name: "env for bar", 147 | args: args{ 148 | env: []corev1.EnvVar{{ 149 | Name: "foo", 150 | Value: "", 151 | ValueFrom: nil, 152 | }}, 153 | customEnv: []corev1.EnvVar{{ 154 | Name: "bar", 155 | Value: "", 156 | ValueFrom: nil, 157 | }}, 158 | }, 159 | want: []corev1.EnvVar{{ 160 | Name: "foo", 161 | Value: "", 162 | ValueFrom: nil, 163 | }, { 164 | Name: "bar", 165 | Value: "", 166 | ValueFrom: nil, 167 | }}, 168 | }, 169 | } 170 | for _, tt := range tests { 171 | t.Run(tt.name, func(t *testing.T) { 172 | if got := customContainerEnv(tt.args.env, tt.args.customEnv); !reflect.DeepEqual(got, tt.want) { 173 | t.Errorf("customContainerEnv() = %v, want %v", got, tt.want) 174 | } 175 | }) 176 | } 177 | } 178 | -------------------------------------------------------------------------------- /pkg/utils/compare.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/go-logr/logr" 7 | ) 8 | 9 | func CompareIntValue(name string, old, new *int32, reqLogger logr.Logger) bool { 10 | if old == nil && new == nil { 11 | return true 12 | } else if old == nil || new == nil { 13 | return false 14 | } else if *old != *new { 15 | reqLogger.V(4).Info(fmt.Sprintf("compare status.%s: %d - %d", name, *old, *new)) 16 | return true 17 | } 18 | 19 | return false 20 | } 21 | 22 | func CompareInt32(name string, old, new int32, reqLogger logr.Logger) bool { 23 | if old != new { 24 | reqLogger.V(4).Info(fmt.Sprintf("compare status.%s: %d - %d", name, old, new)) 25 | return true 26 | } 27 | 28 | return false 29 | } 30 | 31 | func CompareStringValue(name string, old, new string, reqLogger logr.Logger) bool { 32 | if old != new { 33 | reqLogger.V(4).Info(fmt.Sprintf("compare %s: %s - %s", name, old, new)) 34 | return true 35 | } 36 | 37 | return false 38 | } 39 | -------------------------------------------------------------------------------- /pkg/utils/labels.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | // MergeLabels merges all the label maps received as argument into a single new label map. 4 | func MergeLabels(allLabels ...map[string]string) map[string]string { 5 | res := map[string]string{} 6 | 7 | for _, labels := range allLabels { 8 | if labels != nil { 9 | for k, v := range labels { 10 | res[k] = v 11 | } 12 | } 13 | } 14 | return res 15 | } 16 | -------------------------------------------------------------------------------- /pkg/utils/math.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import "math" 4 | 5 | func Round(num float64) int { 6 | return int(num + math.Copysign(0.5, num)) 7 | } 8 | -------------------------------------------------------------------------------- /pkg/utils/parse.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "strconv" 5 | "strings" 6 | ) 7 | 8 | func ParseRedisMemConf(p string) (string, error) { 9 | var mul int64 = 1 10 | u := strings.ToLower(p) 11 | digits := u 12 | 13 | if strings.HasSuffix(u, "k") { 14 | digits = u[:len(u)-len("k")] 15 | mul = 1000 16 | } else if strings.HasSuffix(u, "kb") { 17 | digits = u[:len(u)-len("kb")] 18 | mul = 1024 19 | } else if strings.HasSuffix(u, "m") { 20 | digits = u[:len(u)-len("m")] 21 | mul = 1000 * 1000 22 | } else if strings.HasSuffix(u, "mb") { 23 | digits = u[:len(u)-len("mb")] 24 | mul = 1024 * 1024 25 | } else if strings.HasSuffix(u, "g") { 26 | digits = u[:len(u)-len("g")] 27 | mul = 1000 * 1000 * 1000 28 | } else if strings.HasSuffix(u, "gb") { 29 | digits = u[:len(u)-len("gb")] 30 | mul = 1024 * 1024 * 1024 31 | } else if strings.HasSuffix(u, "b") { 32 | digits = u[:len(u)-len("b")] 33 | mul = 1 34 | } 35 | 36 | val, err := strconv.ParseInt(digits, 10, 64) 37 | if err != nil { 38 | return "", err 39 | } 40 | 41 | return strconv.FormatInt(val*mul, 10), nil 42 | } 43 | -------------------------------------------------------------------------------- /pkg/utils/parse_test.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import "testing" 4 | 5 | func TestParseRedisMemConf(t *testing.T) { 6 | type args struct { 7 | p string 8 | } 9 | tests := []struct { 10 | name string 11 | args args 12 | want string 13 | wantErr bool 14 | }{ 15 | { 16 | name: "b", 17 | args: args{ 18 | p: "12b", 19 | }, 20 | want: "12", 21 | wantErr: false, 22 | }, 23 | { 24 | name: "digit", 25 | args: args{ 26 | p: "1202", 27 | }, 28 | want: "1202", 29 | wantErr: false, 30 | }, 31 | { 32 | name: "B", 33 | args: args{ 34 | p: "12B", 35 | }, 36 | want: "12", 37 | wantErr: false, 38 | }, 39 | { 40 | name: "k", 41 | args: args{ 42 | p: "12k", 43 | }, 44 | want: "12000", 45 | wantErr: false, 46 | }, 47 | { 48 | name: "kk", 49 | args: args{ 50 | p: "12kk", 51 | }, 52 | want: "", 53 | wantErr: true, 54 | }, 55 | { 56 | name: "kb", 57 | args: args{ 58 | p: "12kb", 59 | }, 60 | want: "12288", 61 | wantErr: false, 62 | }, 63 | { 64 | name: "Kb", 65 | args: args{ 66 | p: "12Kb", 67 | }, 68 | want: "12288", 69 | wantErr: false, 70 | }, 71 | { 72 | name: "m", 73 | args: args{ 74 | p: "12m", 75 | }, 76 | want: "12000000", 77 | wantErr: false, 78 | }, 79 | { 80 | name: "mB", 81 | args: args{ 82 | p: "12mb", 83 | }, 84 | want: "12582912", 85 | wantErr: false, 86 | }, 87 | { 88 | name: "g", 89 | args: args{ 90 | p: "12g", 91 | }, 92 | want: "12000000000", 93 | wantErr: false, 94 | }, 95 | { 96 | name: "gb", 97 | args: args{ 98 | p: "12gb", 99 | }, 100 | want: "12884901888", 101 | wantErr: false, 102 | }, 103 | } 104 | for _, tt := range tests { 105 | t.Run(tt.name, func(t *testing.T) { 106 | got, err := ParseRedisMemConf(tt.args.p) 107 | if (err != nil) != tt.wantErr { 108 | t.Errorf("ParseRedisMemConf() error = %v, wantErr %v", err, tt.wantErr) 109 | return 110 | } 111 | if got != tt.want { 112 | t.Errorf("ParseRedisMemConf() got = %v, want %v", got, tt.want) 113 | } 114 | }) 115 | } 116 | } 117 | -------------------------------------------------------------------------------- /pkg/utils/rename_cmd.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "bufio" 5 | "fmt" 6 | "os" 7 | "strings" 8 | 9 | "github.com/go-logr/logr" 10 | ) 11 | 12 | // buildCommandReplaceMapping reads the config file with the command-replace lines and build a mapping of 13 | // bad lines are ignored silently 14 | func BuildCommandReplaceMapping(filePath string, log logr.Logger) map[string]string { 15 | mapping := make(map[string]string) 16 | file, err := os.Open(filePath) 17 | if err != nil { 18 | log.Error(err, fmt.Sprintf("cannot open %s", filePath)) 19 | return mapping 20 | } 21 | defer file.Close() 22 | 23 | scanner := bufio.NewScanner(file) 24 | for scanner.Scan() { 25 | elems := strings.Fields(scanner.Text()) 26 | if len(elems) == 3 && strings.ToLower(elems[0]) == "rename-command" { 27 | mapping[strings.ToUpper(elems[1])] = elems[2] 28 | } 29 | } 30 | 31 | if err := scanner.Err(); err != nil { 32 | log.Error(err, fmt.Sprintf("cannot parse %s", filePath)) 33 | return mapping 34 | } 35 | return mapping 36 | } 37 | -------------------------------------------------------------------------------- /pkg/utils/scoped.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 5 | ) 6 | 7 | const ( 8 | // AnnotationScope annotation name for defining instance scope. Used for specifying cluster wide clusters. 9 | // A namespace-scoped operator watches and manages resources in a single namespace, whereas a cluster-scoped operator watches and manages resources cluster-wide. 10 | AnnotationScope = "redis.kun/scope" 11 | //AnnotationClusterScoped annotation value for cluster wide clusters. 12 | AnnotationClusterScoped = "cluster-scoped" 13 | ) 14 | 15 | var isClusterScoped = true 16 | 17 | func IsClusterScoped() bool { 18 | return isClusterScoped 19 | } 20 | 21 | func SetClusterScoped(namespace string) { 22 | if namespace != "" { 23 | isClusterScoped = false 24 | } 25 | } 26 | 27 | func ShoudManage(meta metav1.Object) bool { 28 | if v, ok := meta.GetAnnotations()[AnnotationScope]; ok { 29 | if IsClusterScoped() { 30 | return v == AnnotationClusterScoped 31 | } 32 | } else { 33 | if !IsClusterScoped() { 34 | return true 35 | } 36 | } 37 | return false 38 | } 39 | -------------------------------------------------------------------------------- /pkg/utils/string.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | // Stringer implement the string interface 4 | type Stringer interface { 5 | String() string 6 | } 7 | 8 | // SliceJoin concatenates the elements of a to create a single string. The separator string 9 | // sep is placed between elements in the resulting string. 10 | func SliceJoin(a []Stringer, sep string) string { 11 | switch len(a) { 12 | case 0: 13 | return "" 14 | case 1: 15 | return a[0].String() 16 | case 2: 17 | // Special case for common small values. 18 | // Remove if golang.org/issue/6714 is fixed 19 | return a[0].String() + sep + a[1].String() 20 | case 3: 21 | // Special case for common small values. 22 | // Remove if golang.org/issue/6714 is fixed 23 | return a[0].String() + sep + a[1].String() + sep + a[2].String() 24 | } 25 | n := len(sep) * (len(a) - 1) 26 | for i := 0; i < len(a); i++ { 27 | n += len(a[i].String()) 28 | } 29 | 30 | b := make([]byte, n) 31 | bp := copy(b, a[0].String()) 32 | for _, s := range a[1:] { 33 | bp += copy(b[bp:], sep) 34 | bp += copy(b[bp:], s.String()) 35 | } 36 | return string(b) 37 | } 38 | -------------------------------------------------------------------------------- /pkg/utils/types.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | // Int32 returns the value of the int32 pointer passed in or 4 | // 0 if the pointer is nil. 5 | func Int32(v *int32) int32 { 6 | if v != nil { 7 | return *v 8 | } 9 | return 0 10 | } 11 | -------------------------------------------------------------------------------- /static/redis-cluster.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ucloud/redis-cluster-operator/d1824cf248b3d68992158c0d7994ba74b4a36a8f/static/redis-cluster.png -------------------------------------------------------------------------------- /test/e2e/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG GOLANG_VERSION=1.13.3 2 | FROM golang:${GOLANG_VERSION} 3 | 4 | ENV GOLANG_VERSION=${GOLANG_VERSION} 5 | 6 | RUN apt update && apt install -y git 7 | 8 | RUN go get -u github.com/onsi/ginkgo/ginkgo github.com/onsi/gomega/... 9 | 10 | ARG PROJECT_NAME=redis-cluster-operator 11 | ARG REPO_PATH=github.com/ucloud/$PROJECT_NAME 12 | 13 | RUN mkdir -p /go/src/${REPO_PATH} 14 | COPY . /go/src/${REPO_PATH} 15 | RUN chmod +x /go/src/${REPO_PATH}/hack/e2e.sh 16 | 17 | CMD /go/src/github.com/ucloud/redis-cluster-operator/hack/e2e.sh -------------------------------------------------------------------------------- /test/e2e/README.md: -------------------------------------------------------------------------------- 1 | # End to end tests for DistributedRedisCluster and RedisClusterBackup 2 | 3 | ## Run test in Kubernetes 4 | 5 | `kubectl create -f deploy/e2e.yml` 6 | 7 | ## Build and push e2e test images 8 | ` DOCKER_REGISTRY=your_registry make push-e2e` 9 | -------------------------------------------------------------------------------- /test/e2e/drc/drc_suite_test.go: -------------------------------------------------------------------------------- 1 | package drc_test 2 | 3 | import ( 4 | "testing" 5 | 6 | . "github.com/onsi/ginkgo" 7 | . "github.com/onsi/gomega" 8 | 9 | redisv1alpha1 "github.com/ucloud/redis-cluster-operator/pkg/apis/redis/v1alpha1" 10 | "github.com/ucloud/redis-cluster-operator/test/e2e" 11 | ) 12 | 13 | var f *e2e.Framework 14 | var drc *redisv1alpha1.DistributedRedisCluster 15 | 16 | func TestDrc(t *testing.T) { 17 | RegisterFailHandler(Fail) 18 | RunSpecs(t, "Drc Suite") 19 | } 20 | 21 | var _ = BeforeSuite(func() { 22 | f = e2e.NewFramework("test") 23 | if err := f.BeforeEach(); err != nil { 24 | f.Failf("Framework BeforeEach err: %s", err.Error()) 25 | } 26 | }) 27 | 28 | var _ = AfterSuite(func() { 29 | if err := f.DeleteRedisCluster(drc); err != nil { 30 | f.Logf("deleting DistributedRedisCluster err: %s", err.Error()) 31 | } 32 | if err := f.AfterEach(); err != nil { 33 | f.Failf("Framework AfterSuite err: %s", err.Error()) 34 | } 35 | }) 36 | -------------------------------------------------------------------------------- /test/e2e/drc/drc_test.go: -------------------------------------------------------------------------------- 1 | package drc_test 2 | 3 | import ( 4 | "time" 5 | 6 | . "github.com/onsi/ginkgo" 7 | . "github.com/onsi/gomega" 8 | 9 | "github.com/ucloud/redis-cluster-operator/test/e2e" 10 | ) 11 | 12 | var ( 13 | goredis *e2e.GoRedis 14 | dbsize int64 15 | err error 16 | ) 17 | 18 | var _ = Describe("DistributedRedisCluster CRUD", func() { 19 | It("should create a DistributedRedisCluster", func() { 20 | name := e2e.RandString(8) 21 | password := e2e.RandString(8) 22 | drc = e2e.NewDistributedRedisCluster(name, f.Namespace(), e2e.Redis5_0_4, f.PasswordName(), 3, 1) 23 | Ω(f.CreateRedisClusterPassword(f.PasswordName(), password)).Should(Succeed()) 24 | Ω(f.CreateRedisCluster(drc)).Should(Succeed()) 25 | Eventually(e2e.IsDistributedRedisClusterProperly(f, drc), "10m", "10s").ShouldNot(HaveOccurred()) 26 | goredis = e2e.NewGoRedisClient(name, f.Namespace(), password) 27 | Expect(goredis.StuffingData(10, 300000)).NotTo(HaveOccurred()) 28 | dbsize, err = goredis.DBSize() 29 | Expect(err).NotTo(HaveOccurred()) 30 | f.Logf("%s DBSIZE: %d", name, dbsize) 31 | }) 32 | 33 | Context("when the DistributedRedisCluster is created", func() { 34 | It("should change redis config for a DistributedRedisCluster", func() { 35 | e2e.ChangeDRCRedisConfig(drc) 36 | Ω(f.UpdateRedisCluster(drc)).Should(Succeed()) 37 | Eventually(e2e.IsDistributedRedisClusterProperly(f, drc), "10m", "10s").ShouldNot(HaveOccurred()) 38 | Expect(e2e.IsDBSizeConsistent(dbsize, goredis)).NotTo(HaveOccurred()) 39 | }) 40 | It("should recover from accidentally deleting master pods", func() { 41 | e2e.DeleteMasterPodForDRC(drc, f.Client) 42 | Eventually(e2e.IsDRCPodBeDeleted(f, drc), "5m", "10s").ShouldNot(HaveOccurred()) 43 | Eventually(e2e.IsDistributedRedisClusterProperly(f, drc), "10m", "10s").ShouldNot(HaveOccurred()) 44 | goredis = e2e.NewGoRedisClient(drc.Name, f.Namespace(), goredis.Password()) 45 | Expect(e2e.IsDBSizeConsistent(dbsize, goredis)).NotTo(HaveOccurred()) 46 | }) 47 | It("should scale up a DistributedRedisCluster", func() { 48 | e2e.ScaleUPDRC(drc) 49 | Ω(f.UpdateRedisCluster(drc)).Should(Succeed()) 50 | Eventually(e2e.IsDistributedRedisClusterProperly(f, drc), "10m", "10s").ShouldNot(HaveOccurred()) 51 | goredis = e2e.NewGoRedisClient(drc.Name, f.Namespace(), goredis.Password()) 52 | Expect(e2e.IsDBSizeConsistent(dbsize, goredis)).NotTo(HaveOccurred()) 53 | }) 54 | Context("when the scale up succeeded", func() { 55 | It("should scale down a DistributedRedisCluster", func() { 56 | e2e.ScaleUPDown(drc) 57 | Ω(f.UpdateRedisCluster(drc)).Should(Succeed()) 58 | Eventually(e2e.IsDistributedRedisClusterProperly(f, drc), "10m", "10s").ShouldNot(HaveOccurred()) 59 | goredis = e2e.NewGoRedisClient(drc.Name, f.Namespace(), goredis.Password()) 60 | Expect(e2e.IsDBSizeConsistent(dbsize, goredis)).NotTo(HaveOccurred()) 61 | }) 62 | }) 63 | It("should reset the DistributedRedisCluster password", func() { 64 | newPassword := e2e.RandString(8) 65 | Ω(f.CreateRedisClusterPassword(f.NewPasswordName(), newPassword)).Should(Succeed()) 66 | e2e.ResetPassword(drc, f.NewPasswordName()) 67 | Ω(f.UpdateRedisCluster(drc)).Should(Succeed()) 68 | time.Sleep(5 * time.Second) 69 | Eventually(e2e.IsDistributedRedisClusterProperly(f, drc), "10m", "10s").ShouldNot(HaveOccurred()) 70 | goredis = e2e.NewGoRedisClient(drc.Name, f.Namespace(), newPassword) 71 | Expect(e2e.IsDBSizeConsistent(dbsize, goredis)).NotTo(HaveOccurred()) 72 | }) 73 | It("should update the DistributedRedisCluster minor version", func() { 74 | e2e.RollingUpdateDRC(drc) 75 | Ω(f.UpdateRedisCluster(drc)).Should(Succeed()) 76 | Eventually(e2e.IsDistributedRedisClusterProperly(f, drc), "10m", "10s").ShouldNot(HaveOccurred()) 77 | goredis = e2e.NewGoRedisClient(drc.Name, f.Namespace(), goredis.Password()) 78 | Expect(e2e.IsDBSizeConsistent(dbsize, goredis)).NotTo(HaveOccurred()) 79 | }) 80 | }) 81 | }) 82 | -------------------------------------------------------------------------------- /test/e2e/drcb/drcb_suite_test.go: -------------------------------------------------------------------------------- 1 | package drcb_test 2 | 3 | import ( 4 | "testing" 5 | 6 | . "github.com/onsi/ginkgo" 7 | . "github.com/onsi/gomega" 8 | 9 | redisv1alpha1 "github.com/ucloud/redis-cluster-operator/pkg/apis/redis/v1alpha1" 10 | "github.com/ucloud/redis-cluster-operator/test/e2e" 11 | ) 12 | 13 | var f *e2e.Framework 14 | var drc *redisv1alpha1.DistributedRedisCluster 15 | var rdrc *redisv1alpha1.DistributedRedisCluster 16 | var drcb *redisv1alpha1.RedisClusterBackup 17 | 18 | func TestDrcb(t *testing.T) { 19 | RegisterFailHandler(Fail) 20 | RunSpecs(t, "Drcb Suite") 21 | } 22 | 23 | var _ = BeforeSuite(func() { 24 | f = e2e.NewFramework("drcb") 25 | if err := f.BeforeEach(); err != nil { 26 | f.Failf("Framework BeforeEach err: %s", err.Error()) 27 | } 28 | }) 29 | 30 | var _ = AfterSuite(func() { 31 | if err := f.DeleteRedisCluster(rdrc); err != nil { 32 | f.Logf("deleting DistributedRedisCluster err: %s", err.Error()) 33 | } 34 | if err := f.AfterEach(); err != nil { 35 | f.Failf("Framework AfterSuite err: %s", err.Error()) 36 | } 37 | }) 38 | -------------------------------------------------------------------------------- /test/e2e/drcb/drcb_test.go: -------------------------------------------------------------------------------- 1 | package drcb_test 2 | 3 | import ( 4 | "os" 5 | "time" 6 | 7 | . "github.com/onsi/ginkgo" 8 | . "github.com/onsi/gomega" 9 | 10 | "github.com/ucloud/redis-cluster-operator/test/e2e" 11 | ) 12 | 13 | var ( 14 | goredis *e2e.GoRedis 15 | dbsize int64 16 | err error 17 | ) 18 | 19 | var _ = Describe("Restore DistributedRedisCluster From RedisClusterBackup", func() { 20 | It("should create a DistributedRedisCluster", func() { 21 | name := e2e.RandString(8) 22 | password := e2e.RandString(8) 23 | drc = e2e.NewDistributedRedisCluster(name, f.Namespace(), e2e.Redis5_0_4, f.PasswordName(), 3, 1) 24 | Ω(f.CreateRedisClusterPassword(f.PasswordName(), password)).Should(Succeed()) 25 | Ω(f.CreateRedisCluster(drc)).Should(Succeed()) 26 | Eventually(e2e.IsDistributedRedisClusterProperly(f, drc), "10m", "10s").ShouldNot(HaveOccurred()) 27 | goredis = e2e.NewGoRedisClient(name, f.Namespace(), password) 28 | Expect(goredis.StuffingData(10, 300000)).NotTo(HaveOccurred()) 29 | dbsize, err = goredis.DBSize() 30 | Expect(err).NotTo(HaveOccurred()) 31 | f.Logf("%s DBSIZE: %d", name, dbsize) 32 | }) 33 | 34 | Context("when the DistributedRedisCluster is created", func() { 35 | It("should create a RedisClusterBackup", func() { 36 | name := e2e.RandString(8) 37 | s3ID := os.Getenv(e2e.S3ID) 38 | s3Key := os.Getenv(e2e.S3KEY) 39 | endpoint := os.Getenv(e2e.S3ENDPOINT) 40 | bucket := os.Getenv(e2e.S3BUCKET) 41 | drcb = e2e.NewRedisClusterBackup(name, f.Namespace(), e2e.BackupImage, drc.Name, f.S3SecretName(), endpoint, bucket) 42 | Ω(f.CreateS3Secret(s3ID, s3Key)).Should(Succeed()) 43 | Ω(f.CreateRedisClusterBackup(drcb)).Should(Succeed()) 44 | Eventually(e2e.IsRedisClusterBackupProperly(f, drcb), "10m", "10s").ShouldNot(HaveOccurred()) 45 | Expect(e2e.IsDBSizeConsistent(dbsize, goredis)).NotTo(HaveOccurred()) 46 | }) 47 | Context("when the RedisClusterBackup is created", func() { 48 | It("should restore from backup", func() { 49 | Ω(f.DeleteRedisCluster(drc)).Should(Succeed()) 50 | rdrc = e2e.RestoreDRC(drc, drcb) 51 | Ω(f.CreateRedisCluster(rdrc)).Should(Succeed()) 52 | Eventually(e2e.IsDistributedRedisClusterProperly(f, rdrc), "10m", "10s").ShouldNot(HaveOccurred()) 53 | goredis = e2e.NewGoRedisClient(rdrc.Name, f.Namespace(), goredis.Password()) 54 | Expect(e2e.IsDBSizeConsistent(dbsize, goredis)).NotTo(HaveOccurred()) 55 | }) 56 | Context("when restore is succeeded", func() { 57 | It("should change redis config for a DistributedRedisCluster", func() { 58 | e2e.ChangeDRCRedisConfig(rdrc) 59 | Ω(f.UpdateRedisCluster(rdrc)).Should(Succeed()) 60 | Eventually(e2e.IsDistributedRedisClusterProperly(f, rdrc), "10m", "10s").ShouldNot(HaveOccurred()) 61 | Expect(e2e.IsDBSizeConsistent(dbsize, goredis)).NotTo(HaveOccurred()) 62 | }) 63 | It("should recover from accidentally deleting master pods", func() { 64 | e2e.DeleteMasterPodForDRC(rdrc, f.Client) 65 | Eventually(e2e.IsDRCPodBeDeleted(f, rdrc), "5m", "10s").ShouldNot(HaveOccurred()) 66 | Eventually(e2e.IsDistributedRedisClusterProperly(f, rdrc), "10m", "10s").ShouldNot(HaveOccurred()) 67 | goredis = e2e.NewGoRedisClient(rdrc.Name, f.Namespace(), goredis.Password()) 68 | Expect(e2e.IsDBSizeConsistent(dbsize, goredis)).NotTo(HaveOccurred()) 69 | }) 70 | It("should scale up a DistributedRedisCluster", func() { 71 | e2e.ScaleUPDRC(rdrc) 72 | Ω(f.UpdateRedisCluster(rdrc)).Should(Succeed()) 73 | Eventually(e2e.IsDistributedRedisClusterProperly(f, rdrc), "10m", "10s").ShouldNot(HaveOccurred()) 74 | goredis = e2e.NewGoRedisClient(rdrc.Name, f.Namespace(), goredis.Password()) 75 | Expect(e2e.IsDBSizeConsistent(dbsize, goredis)).NotTo(HaveOccurred()) 76 | }) 77 | Context("when the scale up succeeded", func() { 78 | It("should scale down a DistributedRedisCluster", func() { 79 | e2e.ScaleUPDown(rdrc) 80 | Ω(f.UpdateRedisCluster(rdrc)).Should(Succeed()) 81 | Eventually(e2e.IsDistributedRedisClusterProperly(f, rdrc), "10m", "10s").ShouldNot(HaveOccurred()) 82 | goredis = e2e.NewGoRedisClient(rdrc.Name, f.Namespace(), goredis.Password()) 83 | Expect(e2e.IsDBSizeConsistent(dbsize, goredis)).NotTo(HaveOccurred()) 84 | }) 85 | }) 86 | It("should reset the DistributedRedisCluster password", func() { 87 | newPassword := e2e.RandString(8) 88 | Ω(f.CreateRedisClusterPassword(f.NewPasswordName(), newPassword)).Should(Succeed()) 89 | e2e.ResetPassword(rdrc, f.NewPasswordName()) 90 | Ω(f.UpdateRedisCluster(rdrc)).Should(Succeed()) 91 | time.Sleep(5 * time.Second) 92 | Eventually(e2e.IsDistributedRedisClusterProperly(f, rdrc), "10m", "10s").ShouldNot(HaveOccurred()) 93 | goredis = e2e.NewGoRedisClient(rdrc.Name, f.Namespace(), newPassword) 94 | Expect(e2e.IsDBSizeConsistent(dbsize, goredis)).NotTo(HaveOccurred()) 95 | }) 96 | It("should update the DistributedRedisCluster minor version", func() { 97 | e2e.RollingUpdateDRC(rdrc) 98 | Ω(f.UpdateRedisCluster(rdrc)).Should(Succeed()) 99 | Eventually(e2e.IsDistributedRedisClusterProperly(f, rdrc), "10m", "10s").ShouldNot(HaveOccurred()) 100 | goredis = e2e.NewGoRedisClient(rdrc.Name, f.Namespace(), goredis.Password()) 101 | Expect(e2e.IsDBSizeConsistent(dbsize, goredis)).NotTo(HaveOccurred()) 102 | }) 103 | }) 104 | }) 105 | }) 106 | }) 107 | -------------------------------------------------------------------------------- /test/e2e/goredis_util.go: -------------------------------------------------------------------------------- 1 | package e2e 2 | 3 | import ( 4 | "github.com/go-redis/redis" 5 | uuid "github.com/satori/go.uuid" 6 | "golang.org/x/sync/errgroup" 7 | "time" 8 | ) 9 | 10 | const defaultTimeOut = time.Second * 2 11 | 12 | // GoRedis contains ClusterClient. 13 | type GoRedis struct { 14 | client *redis.ClusterClient 15 | password string 16 | } 17 | 18 | // NewGoRedis return a new ClusterClient. 19 | func NewGoRedis(addr, password string) *GoRedis { 20 | return &GoRedis{ 21 | client: redis.NewClusterClient(&redis.ClusterOptions{ 22 | Addrs: []string{addr}, 23 | Password: password, 24 | //MaxRetries: 5, 25 | // 26 | //PoolSize: 3, 27 | //MinIdleConns: 1, 28 | //PoolTimeout: defaultTimeOut, 29 | //IdleTimeout: defaultTimeOut, 30 | }), 31 | password: password, 32 | } 33 | } 34 | 35 | // StuffingData filled with (round * n)'s key. 36 | func (g *GoRedis) StuffingData(round, n int) error { 37 | var group errgroup.Group 38 | for i := 0; i < round; i++ { 39 | group.Go(func() error { 40 | for j := 0; j < n; j++ { 41 | key := uuid.NewV4().String() 42 | if err := g.client.Set(key, key, 0).Err(); err != nil { 43 | return err 44 | } 45 | } 46 | return nil 47 | }) 48 | } 49 | if err := group.Wait(); err != nil { 50 | return err 51 | } 52 | return nil 53 | } 54 | 55 | // DBSize return DBsize of all master nodes. 56 | func (g *GoRedis) DBSize() (int64, error) { 57 | return g.client.DBSize().Result() 58 | } 59 | 60 | // Password return redis password. 61 | func (g *GoRedis) Password() string { 62 | return g.password 63 | } 64 | 65 | // Close closes the cluster client. 66 | func (g *GoRedis) Close() error { 67 | return g.client.Close() 68 | } 69 | -------------------------------------------------------------------------------- /test/e2e/rename.conf: -------------------------------------------------------------------------------- 1 | rename-command CONFIG lni07z1p -------------------------------------------------------------------------------- /test/e2e/util.go: -------------------------------------------------------------------------------- 1 | package e2e 2 | 3 | import ( 4 | "fmt" 5 | "math/rand" 6 | "time" 7 | 8 | "github.com/onsi/ginkgo" 9 | ) 10 | 11 | func nowStamp() string { 12 | return time.Now().Format(time.RFC3339) 13 | } 14 | 15 | func log(level string, format string, args ...interface{}) { 16 | fmt.Fprintf(ginkgo.GinkgoWriter, nowStamp()+": "+level+": "+format+"\n", args...) 17 | } 18 | 19 | // Logf logs in e2e framework 20 | func Logf(format string, args ...interface{}) { 21 | log("INFO", format, args...) 22 | } 23 | 24 | // Failf reports a failure in the current e2e 25 | func Failf(format string, args ...interface{}) { 26 | msg := fmt.Sprintf(format, args...) 27 | log("ERROR", msg) 28 | ginkgo.Fail(nowStamp()+": "+msg, 1) 29 | } 30 | 31 | // LogAndReturnErrorf logs and return an error 32 | func LogAndReturnErrorf(format string, args ...interface{}) error { 33 | Logf(format, args...) 34 | return fmt.Errorf(format, args...) 35 | } 36 | 37 | var letters = []rune("abcdefghijklmnopqrstuvwxyz") 38 | 39 | // RandString create a random string 40 | func RandString(n int) string { 41 | b := make([]rune, n) 42 | for i := range b { 43 | b[i] = letters[rand.Intn(len(letters))] 44 | } 45 | return string(b) 46 | } 47 | 48 | func init() { 49 | rand.Seed(time.Now().UnixNano()) 50 | } 51 | -------------------------------------------------------------------------------- /test/testclient/client.go: -------------------------------------------------------------------------------- 1 | package testclient 2 | 3 | import ( 4 | "k8s.io/client-go/kubernetes/scheme" 5 | "k8s.io/client-go/rest" 6 | "sigs.k8s.io/controller-runtime/pkg/client" 7 | "sigs.k8s.io/controller-runtime/pkg/client/apiutil" 8 | 9 | "github.com/ucloud/redis-cluster-operator/pkg/apis" 10 | ) 11 | 12 | func NewClient(config *rest.Config) (client.Client, error) { 13 | scheme := scheme.Scheme 14 | err := apis.AddToScheme(scheme) 15 | 16 | mapper, err := apiutil.NewDiscoveryRESTMapper(config) 17 | if err != nil { 18 | return nil, err 19 | } 20 | options := client.Options{ 21 | Scheme: scheme, 22 | Mapper: mapper, 23 | } 24 | cli, err := client.New(config, options) 25 | if err != nil { 26 | return nil, err 27 | } 28 | return cli, nil 29 | } 30 | -------------------------------------------------------------------------------- /tools.go: -------------------------------------------------------------------------------- 1 | // +build tools 2 | 3 | // Place any runtime dependencies as imports in this file. 4 | // Go modules will be forced to download and install them. 5 | package tools 6 | -------------------------------------------------------------------------------- /version/version.go: -------------------------------------------------------------------------------- 1 | package version 2 | 3 | var ( 4 | // Version represents the software version of the Redis Cluster Operator 5 | Version = "0.2.2" 6 | // GitSHA represents the Git commit hash in short format 7 | GitSHA = "" 8 | ) 9 | --------------------------------------------------------------------------------