├── .gitattributes ├── .github ├── CODEOWNERS ├── PULL_REQUEST_TEMPLATE.md ├── dependabot.yml ├── ISSUE_TEMPLATE.md └── workflows │ ├── staleissues.yaml │ ├── helm.yml │ └── ci.yaml ├── .gitignore ├── charts ├── chart-release-config.yaml └── redisoperator │ ├── Chart.yaml │ ├── .helmignore │ ├── templates │ ├── _versions.tpl │ ├── service.yaml │ ├── private-registry.yaml │ ├── monitoring.yaml │ ├── service-account.yaml │ ├── _helpers.tpl │ └── deployment.yaml │ └── values.yaml ├── scripts ├── run.sh ├── check.sh ├── helm-tests.sh ├── build.sh └── integration-tests.sh ├── api └── redisfailover │ ├── register.go │ └── v1 │ ├── doc.go │ ├── bootstrapping.go │ ├── defaults.go │ ├── register.go │ ├── validate.go │ ├── bootstrapping_test.go │ └── validate_test.go ├── example ├── operator │ ├── serviceaccount.yaml │ ├── rolebinding.yaml │ ├── custom-startup-config.yaml │ ├── operator.yaml │ ├── roles.yaml │ └── all-redis-operator-resources.yaml └── redisfailover │ ├── minimum.yaml │ ├── bootstrapping.yaml │ ├── bootstrapping-with-port.yaml │ ├── bootstrapping-with-sentinels.yaml │ ├── security-context.yaml │ ├── custom-image.yaml │ ├── custom-renames.yaml │ ├── container-security-context.yaml │ ├── custom-port.yaml │ ├── basic.yaml │ ├── tolerations.yaml │ ├── custom-command.yaml │ ├── persistent-storage.yaml │ ├── custom-annotations.yaml │ ├── enable-exporter.yaml │ ├── persistent-storage-no-pvc-deletion.yaml │ ├── custom-config.yaml │ ├── pod-anti-affinity.yaml │ ├── control-label-propagation.yaml │ ├── custom-shutdown.yaml │ ├── node-affinity.yaml │ ├── extravolumes-mounts.yaml │ ├── sidecars.yaml │ ├── topology-spread-contraints.yaml │ └── pmem.yaml ├── manifests └── kustomize │ ├── components │ ├── rbac-full │ │ ├── serviceaccount.yaml │ │ ├── deployment.yaml │ │ ├── kustomization.yaml │ │ └── clusterrolebinding.yaml │ ├── rbac │ │ ├── kustomization.yaml │ │ └── clusterrole.yaml │ ├── resources │ │ ├── kustomization.yaml │ │ └── deployment.yaml │ ├── monitoring │ │ ├── kustomizeconfig.yaml │ │ ├── servicemonitor.yaml │ │ ├── service.yaml │ │ ├── kustomization.yaml │ │ └── deployment.yaml │ └── version │ │ └── kustomization.yaml │ ├── base │ ├── kustomization.yaml │ └── deployment.yaml │ └── overlays │ ├── full │ └── kustomization.yaml │ ├── default │ └── kustomization.yaml │ └── minimal │ └── kustomization.yaml ├── client └── k8s │ └── clientset │ └── versioned │ ├── doc.go │ ├── fake │ ├── doc.go │ ├── register.go │ └── clientset_generated.go │ ├── typed │ └── redisfailover │ │ └── v1 │ │ ├── doc.go │ │ ├── fake │ │ ├── doc.go │ │ ├── fake_redisfailover_client.go │ │ └── fake_redisfailover.go │ │ ├── generated_expansion.go │ │ ├── redisfailover_client.go │ │ └── redisfailover.go │ ├── scheme │ ├── doc.go │ └── register.go │ └── clientset.go ├── operator └── redisfailover │ ├── doc.go │ ├── service │ ├── constants_test.go │ ├── constants.go │ └── names.go │ ├── config.go │ ├── util │ ├── pod.go │ └── label.go │ ├── ensurer.go │ ├── factory.go │ ├── handler.go │ └── ensurer_test.go ├── test └── integration │ └── redisfailover │ └── doc.go ├── docker ├── app │ └── Dockerfile └── development │ └── Dockerfile ├── mocks ├── doc.go └── operator │ └── redisfailover │ ├── RedisFailover.go │ └── service │ └── RedisFailoverHeal.go ├── service └── k8s │ ├── secret.go │ ├── k8s.go │ ├── util.go │ ├── secret_test.go │ ├── redisfailover.go │ ├── pod_test.go │ ├── service_test.go │ ├── configmap_test.go │ ├── deployment_test.go │ ├── configmap.go │ ├── poddisruptionbudget.go │ ├── rbac_test.go │ ├── poddisruptionbudget_test.go │ ├── service.go │ ├── pod.go │ └── deployment.go ├── metrics ├── dummy.go └── metrics_test.go ├── docs ├── development.md └── logic.md ├── log └── dummy.go ├── cmd ├── utils │ ├── k8s.go │ └── flags.go └── redisoperator │ └── main.go ├── go.mod └── Makefile /.gitattributes: -------------------------------------------------------------------------------- 1 | vendor/* -diff 2 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @spotahome/core 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /bin 2 | .bash_history 3 | .vscode 4 | .idea/ 5 | /tmp 6 | vendor -------------------------------------------------------------------------------- /charts/chart-release-config.yaml: -------------------------------------------------------------------------------- 1 | release-name-template: Chart-{{ .Version }} 2 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | Fixes # . 2 | 3 | Changes proposed on the PR: 4 | - 5 | - 6 | - -------------------------------------------------------------------------------- /scripts/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | ./scripts/build.sh && ./bin/linux/redis-operator --kubeconfig=/.kube/config 4 | -------------------------------------------------------------------------------- /api/redisfailover/register.go: -------------------------------------------------------------------------------- 1 | package redisfailover 2 | 3 | const ( 4 | GroupName = "databases.spotahome.com" 5 | ) 6 | -------------------------------------------------------------------------------- /example/operator/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: redisoperator 5 | -------------------------------------------------------------------------------- /scripts/check.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | set -o errexit 4 | set -o nounset 5 | 6 | golangci-lint run -E goimports --timeout 3m 7 | -------------------------------------------------------------------------------- /example/redisfailover/minimum.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: databases.spotahome.com/v1 2 | kind: RedisFailover 3 | metadata: 4 | name: redisfailover 5 | -------------------------------------------------------------------------------- /manifests/kustomize/components/rbac-full/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: redis-operator 5 | -------------------------------------------------------------------------------- /manifests/kustomize/components/rbac/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1alpha1 2 | kind: Component 3 | 4 | resources: 5 | - clusterrole.yaml 6 | -------------------------------------------------------------------------------- /api/redisfailover/v1/doc.go: -------------------------------------------------------------------------------- 1 | // +k8s:deepcopy-gen=package 2 | 3 | // Package v1 is the v1 version of the API. 4 | // +groupName=databases.spotahome.com 5 | package v1 6 | -------------------------------------------------------------------------------- /client/k8s/clientset/versioned/doc.go: -------------------------------------------------------------------------------- 1 | // Code generated by client-gen. DO NOT EDIT. 2 | 3 | // This package has the automatically generated clientset. 4 | package versioned 5 | -------------------------------------------------------------------------------- /client/k8s/clientset/versioned/fake/doc.go: -------------------------------------------------------------------------------- 1 | // Code generated by client-gen. DO NOT EDIT. 2 | 3 | // This package has the automatically generated fake clientset. 4 | package fake 5 | -------------------------------------------------------------------------------- /manifests/kustomize/components/resources/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1alpha1 2 | kind: Component 3 | 4 | patchesStrategicMerge: 5 | - deployment.yaml 6 | -------------------------------------------------------------------------------- /client/k8s/clientset/versioned/typed/redisfailover/v1/doc.go: -------------------------------------------------------------------------------- 1 | // Code generated by client-gen. DO NOT EDIT. 2 | 3 | // This package has the automatically generated typed clients. 4 | package v1 5 | -------------------------------------------------------------------------------- /client/k8s/clientset/versioned/typed/redisfailover/v1/fake/doc.go: -------------------------------------------------------------------------------- 1 | // Code generated by client-gen. DO NOT EDIT. 2 | 3 | // Package fake has the automatically generated clients. 4 | package fake 5 | -------------------------------------------------------------------------------- /client/k8s/clientset/versioned/typed/redisfailover/v1/generated_expansion.go: -------------------------------------------------------------------------------- 1 | // Code generated by client-gen. DO NOT EDIT. 2 | 3 | package v1 4 | 5 | type RedisFailoverExpansion interface{} 6 | -------------------------------------------------------------------------------- /client/k8s/clientset/versioned/scheme/doc.go: -------------------------------------------------------------------------------- 1 | // Code generated by client-gen. DO NOT EDIT. 2 | 3 | // This package contains the scheme of the automatically generated clientset. 4 | package scheme 5 | -------------------------------------------------------------------------------- /manifests/kustomize/components/monitoring/kustomizeconfig.yaml: -------------------------------------------------------------------------------- 1 | commonLabels: 2 | - group: monitoring.coreos.com 3 | version: v1 4 | kind: ServiceMonitor 5 | path: spec/selector/matchLabels 6 | create: true 7 | -------------------------------------------------------------------------------- /manifests/kustomize/components/rbac-full/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: redis-operator 5 | spec: 6 | template: 7 | spec: 8 | serviceAccountName: redis-operator 9 | -------------------------------------------------------------------------------- /scripts/helm-tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eu 4 | 5 | chart=charts/redisoperator 6 | 7 | echo ">> Testing chart ${chart}" 8 | 9 | helm lint ${chart} 10 | helm template ${chart} 11 | 12 | echo "> Chart OK" 13 | -------------------------------------------------------------------------------- /operator/redisfailover/doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | Redis operator handles a redis failover, this RF 3 | will create a redis failover, containing a group of redis and a group 4 | of sentinels managing the redis. 5 | */ 6 | 7 | package redisfailover 8 | -------------------------------------------------------------------------------- /manifests/kustomize/components/monitoring/servicemonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | name: redis-operator 5 | spec: 6 | endpoints: 7 | - port: metrics 8 | interval: 15s 9 | -------------------------------------------------------------------------------- /test/integration/redisfailover/doc.go: -------------------------------------------------------------------------------- 1 | package redisfailover 2 | 3 | // This tests are intended to check if the Redis Operator is working as spected. 4 | // To be able to run this tests, it is mandatory to have access to a Kubernetes 5 | // cluster 6 | -------------------------------------------------------------------------------- /manifests/kustomize/components/monitoring/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: redis-operator 5 | spec: 6 | type: ClusterIP 7 | ports: 8 | - name: metrics 9 | port: 9710 10 | protocol: TCP 11 | -------------------------------------------------------------------------------- /example/redisfailover/bootstrapping.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: databases.spotahome.com/v1 2 | kind: RedisFailover 3 | metadata: 4 | name: redisfailover 5 | spec: 6 | bootstrapNode: 7 | host: "127.0.0.1" 8 | sentinel: 9 | replicas: 3 10 | redis: 11 | replicas: 3 12 | -------------------------------------------------------------------------------- /manifests/kustomize/base/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | commonLabels: 5 | app.kubernetes.io/name: redis-operator 6 | 7 | resources: 8 | - databases.spotahome.com_redisfailovers.yaml 9 | - deployment.yaml 10 | -------------------------------------------------------------------------------- /operator/redisfailover/service/constants_test.go: -------------------------------------------------------------------------------- 1 | package service_test 2 | 3 | const ( 4 | name = "test" 5 | namespace = "testns" 6 | sentinelName = "rfs-test" 7 | redisName = "rfr-test" 8 | masterName = "rfrm-test" 9 | slaveName = "rfrs-test" 10 | ) 11 | -------------------------------------------------------------------------------- /manifests/kustomize/components/version/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1alpha1 2 | kind: Component 3 | 4 | labels: 5 | - pairs: 6 | app.kubernetes.io/version: 1.3.0 7 | 8 | images: 9 | - name: quay.io/spotahome/redis-operator 10 | newTag: v1.3.0 11 | -------------------------------------------------------------------------------- /operator/redisfailover/config.go: -------------------------------------------------------------------------------- 1 | package redisfailover 2 | 3 | // Config is the configuration for the redis operator. 4 | type Config struct { 5 | ListenAddress string 6 | MetricsPath string 7 | Concurrency int 8 | SupportedNamespacesRegex string 9 | } 10 | -------------------------------------------------------------------------------- /example/redisfailover/bootstrapping-with-port.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: databases.spotahome.com/v1 2 | kind: RedisFailover 3 | metadata: 4 | name: redisfailover 5 | spec: 6 | bootstrapNode: 7 | host: "127.0.0.1" 8 | port: 6388 9 | sentinel: 10 | replicas: 3 11 | redis: 12 | replicas: 3 13 | -------------------------------------------------------------------------------- /manifests/kustomize/components/rbac-full/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1alpha1 2 | kind: Component 3 | 4 | components: 5 | - ../rbac/ 6 | 7 | resources: 8 | - clusterrolebinding.yaml 9 | - serviceaccount.yaml 10 | 11 | patchesStrategicMerge: 12 | - deployment.yaml 13 | -------------------------------------------------------------------------------- /manifests/kustomize/components/monitoring/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1alpha1 2 | kind: Component 3 | 4 | resources: 5 | - service.yaml 6 | - servicemonitor.yaml 7 | 8 | patchesStrategicMerge: 9 | - deployment.yaml 10 | 11 | configurations: 12 | - kustomizeconfig.yaml 13 | -------------------------------------------------------------------------------- /example/redisfailover/bootstrapping-with-sentinels.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: databases.spotahome.com/v1 2 | kind: RedisFailover 3 | metadata: 4 | name: redisfailover 5 | spec: 6 | bootstrapNode: 7 | host: "127.0.0.1" 8 | allowSentinels: true 9 | sentinel: 10 | replicas: 3 11 | redis: 12 | replicas: 3 13 | -------------------------------------------------------------------------------- /example/redisfailover/security-context.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: databases.spotahome.com/v1 2 | kind: RedisFailover 3 | metadata: 4 | name: redisfailover 5 | spec: 6 | sentinel: 7 | replicas: 3 8 | redis: 9 | replicas: 3 10 | securityContext: 11 | runAsUser: 1000 12 | runAsGroup: 1000 13 | fsGroup: 1000 14 | -------------------------------------------------------------------------------- /example/operator/rolebinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: redisoperator 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: redisoperator 9 | subjects: 10 | - kind: ServiceAccount 11 | name: redisoperator 12 | namespace: default 13 | -------------------------------------------------------------------------------- /manifests/kustomize/components/rbac-full/clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: redis-operator 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: redis-operator 9 | subjects: 10 | - kind: ServiceAccount 11 | name: redis-operator 12 | -------------------------------------------------------------------------------- /manifests/kustomize/components/monitoring/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: redis-operator 5 | spec: 6 | template: 7 | spec: 8 | containers: 9 | - name: redis-operator 10 | ports: 11 | - name: metrics 12 | containerPort: 9710 13 | protocol: TCP 14 | -------------------------------------------------------------------------------- /operator/redisfailover/util/pod.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import v1 "k8s.io/api/core/v1" 4 | 5 | func PodIsTerminal(pod *v1.Pod) bool { 6 | return pod.Status.Phase == v1.PodFailed || pod.Status.Phase == v1.PodSucceeded 7 | } 8 | 9 | func PodIsScheduling(pod *v1.Pod) bool { 10 | return pod.DeletionTimestamp != nil || pod.Status.Phase == v1.PodPending 11 | } 12 | -------------------------------------------------------------------------------- /example/redisfailover/custom-image.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: databases.spotahome.com/v1 2 | kind: RedisFailover 3 | metadata: 4 | name: redisfailover 5 | spec: 6 | sentinel: 7 | replicas: 3 8 | image: redis:4.0-alpine 9 | imagePullPolicy: IfNotPresent 10 | redis: 11 | replicas: 3 12 | image: redis:4.0-alpine 13 | imagePullPolicy: IfNotPresent 14 | -------------------------------------------------------------------------------- /manifests/kustomize/overlays/full/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | commonLabels: 5 | app.kubernetes.io/name: redis-operator 6 | app.kubernetes.io/instance: redis-operator 7 | 8 | components: 9 | - ../../components/monitoring/ 10 | - ../../components/version/ 11 | 12 | resources: 13 | - ../default/ 14 | -------------------------------------------------------------------------------- /manifests/kustomize/overlays/default/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | commonLabels: 5 | app.kubernetes.io/name: redis-operator 6 | app.kubernetes.io/instance: redis-operator 7 | 8 | components: 9 | - ../../components/resources/ 10 | - ../../components/version/ 11 | 12 | resources: 13 | - ../minimal/ 14 | -------------------------------------------------------------------------------- /manifests/kustomize/overlays/minimal/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | commonLabels: 5 | app.kubernetes.io/name: redis-operator 6 | app.kubernetes.io/instance: redis-operator 7 | 8 | components: 9 | - ../../components/rbac-full/ 10 | - ../../components/version/ 11 | 12 | resources: 13 | - ../../base/ 14 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "gomod" 4 | directory: "/" 5 | schedule: 6 | interval: "daily" 7 | ignore: 8 | # Ignore Kubernetes dependencies to have full control on them. 9 | - dependency-name: "k8s.io/*" 10 | - package-ecosystem: "github-actions" 11 | directory: "/" 12 | schedule: 13 | interval: "daily" 14 | -------------------------------------------------------------------------------- /example/redisfailover/custom-renames.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: databases.spotahome.com/v1 2 | kind: RedisFailover 3 | metadata: 4 | name: redisfailover 5 | spec: 6 | sentinel: 7 | replicas: 3 8 | redis: 9 | replicas: 3 10 | customCommandRenames: 11 | - from: "monitor" 12 | to: "" 13 | - from: "flushall" 14 | to: "fa" 15 | - from: flushdb 16 | to: xxfd 17 | -------------------------------------------------------------------------------- /example/redisfailover/container-security-context.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: databases.spotahome.com/v1 2 | kind: RedisFailover 3 | metadata: 4 | name: redisfailover 5 | spec: 6 | sentinel: 7 | replicas: 3 8 | redis: 9 | replicas: 3 10 | securityContext: 11 | runAsUser: 1000 12 | runAsGroup: 1000 13 | fsGroup: 1000 14 | containerSecurityContext: 15 | readOnlyRootFilesystem: false 16 | -------------------------------------------------------------------------------- /manifests/kustomize/components/resources/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: redis-operator 5 | spec: 6 | template: 7 | spec: 8 | containers: 9 | - name: redis-operator 10 | resources: 11 | limits: 12 | # cpu: 100m 13 | memory: 50Mi 14 | requests: 15 | cpu: 10m 16 | memory: 50Mi 17 | -------------------------------------------------------------------------------- /example/redisfailover/custom-port.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: ports 6 | --- 7 | apiVersion: databases.spotahome.com/v1 8 | kind: RedisFailover 9 | metadata: 10 | name: redis-ports 11 | namespace: ports 12 | spec: 13 | redis: 14 | exporter: 15 | enabled: true 16 | port: 12345 17 | replicas: 3 18 | sentinel: 19 | exporter: 20 | enabled: true 21 | replicas: 3 22 | -------------------------------------------------------------------------------- /charts/redisoperator/Chart.yaml: -------------------------------------------------------------------------------- 1 | annotations: 2 | category: Redis Operator 3 | appVersion: 1.3.0 4 | apiVersion: v1 5 | description: A Helm chart for the Spotahome Redis Operator 6 | name: redis-operator 7 | version: 3.3.0 8 | home: https://github.com/spotahome/redis-operator 9 | keywords: 10 | - "golang" 11 | - "operator" 12 | - "cluster" 13 | sources: 14 | - https://github.com/spotahome/redis-operator 15 | kubeVersion: ">=1.21.0-0" 16 | -------------------------------------------------------------------------------- /charts/redisoperator/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | -------------------------------------------------------------------------------- /example/redisfailover/basic.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: databases.spotahome.com/v1 2 | kind: RedisFailover 3 | metadata: 4 | name: redisfailover 5 | spec: 6 | sentinel: 7 | replicas: 3 8 | resources: 9 | requests: 10 | cpu: 100m 11 | limits: 12 | memory: 100Mi 13 | redis: 14 | replicas: 3 15 | resources: 16 | requests: 17 | cpu: 100m 18 | memory: 100Mi 19 | limits: 20 | cpu: 400m 21 | memory: 500Mi 22 | -------------------------------------------------------------------------------- /example/redisfailover/tolerations.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: databases.spotahome.com/v1 2 | kind: RedisFailover 3 | metadata: 4 | name: redis 5 | spec: 6 | redis: 7 | replicas: 3 8 | tolerations: 9 | - effect: NoExecute 10 | key: dedicated 11 | operator: Equal 12 | value: production 13 | sentinel: 14 | replicas: 3 15 | tolerations: 16 | - effect: NoExecute 17 | key: dedicated 18 | operator: Equal 19 | value: production 20 | -------------------------------------------------------------------------------- /example/redisfailover/custom-command.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: databases.spotahome.com/v1 2 | kind: RedisFailover 3 | metadata: 4 | name: redisfailover 5 | spec: 6 | redis: 7 | replicas: 3 8 | command: 9 | - "redis-server" 10 | - "/redis/redis.conf" 11 | - "--protected-mode" 12 | - "no" 13 | sentinel: 14 | replicas: 3 15 | command: 16 | - "redis-server" 17 | - "/redis/sentinel.conf" 18 | - "--sentinel" 19 | - "--protected-mode" 20 | - "no" 21 | -------------------------------------------------------------------------------- /example/redisfailover/persistent-storage.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: databases.spotahome.com/v1 2 | kind: RedisFailover 3 | metadata: 4 | name: redisfailover-persistent 5 | spec: 6 | sentinel: 7 | replicas: 3 8 | redis: 9 | replicas: 3 10 | storage: 11 | persistentVolumeClaim: 12 | metadata: 13 | name: redisfailover-persistent-data 14 | spec: 15 | accessModes: 16 | - ReadWriteOnce 17 | resources: 18 | requests: 19 | storage: 1Gi 20 | -------------------------------------------------------------------------------- /example/redisfailover/custom-annotations.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: databases.spotahome.com/v1 2 | kind: RedisFailover 3 | metadata: 4 | name: redisfailover 5 | spec: 6 | sentinel: 7 | replicas: 3 8 | podAnnotations: 9 | imageregistry: "https://hub.docker.com/" 10 | serviceAnnotations: 11 | imageregistry: "https://hub.docker.com/" 12 | redis: 13 | replicas: 3 14 | podAnnotations: 15 | imageregistry: "https://hub.docker.com/" 16 | serviceAnnotations: 17 | imageregistry: "https://hub.docker.com/" 18 | -------------------------------------------------------------------------------- /manifests/kustomize/base/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: redis-operator 5 | spec: 6 | replicas: 1 7 | strategy: 8 | type: RollingUpdate 9 | template: 10 | spec: 11 | containers: 12 | - name: redis-operator 13 | image: quay.io/spotahome/redis-operator:latest 14 | imagePullPolicy: IfNotPresent 15 | securityContext: 16 | readOnlyRootFilesystem: true 17 | runAsNonRoot: true 18 | runAsUser: 1000 19 | restartPolicy: Always 20 | -------------------------------------------------------------------------------- /api/redisfailover/v1/bootstrapping.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | // Bootstrapping returns true when a BootstrapNode is provided to the RedisFailover spec. Otherwise, it returns false. 4 | func (r *RedisFailover) Bootstrapping() bool { 5 | return r.Spec.BootstrapNode != nil 6 | } 7 | 8 | // SentinelsAllowed returns true if not Bootstrapping orif BootstrapNode settings allow sentinels to exist 9 | func (r *RedisFailover) SentinelsAllowed() bool { 10 | bootstrapping := r.Bootstrapping() 11 | return !bootstrapping || (bootstrapping && r.Spec.BootstrapNode.AllowSentinels) 12 | } 13 | -------------------------------------------------------------------------------- /example/redisfailover/enable-exporter.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: databases.spotahome.com/v1 2 | kind: RedisFailover 3 | metadata: 4 | name: redisfailover 5 | spec: 6 | sentinel: 7 | replicas: 3 8 | exporter: 9 | enabled: true 10 | image: leominov/redis_sentinel_exporter:1.3.0 11 | redis: 12 | replicas: 3 13 | exporter: 14 | enabled: true 15 | image: oliver006/redis_exporter:v1.3.5-alpine 16 | args: 17 | - --web.telemetry-path 18 | - /metrics 19 | env: 20 | - name: REDIS_EXPORTER_LOG_FORMAT 21 | value: txt 22 | -------------------------------------------------------------------------------- /example/redisfailover/persistent-storage-no-pvc-deletion.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: databases.spotahome.com/v1 2 | kind: RedisFailover 3 | metadata: 4 | name: redisfailover-persistent-keep 5 | spec: 6 | sentinel: 7 | replicas: 3 8 | redis: 9 | replicas: 3 10 | storage: 11 | keepAfterDeletion: true 12 | persistentVolumeClaim: 13 | metadata: 14 | name: redisfailover-persistent-keep-data 15 | spec: 16 | accessModes: 17 | - ReadWriteOnce 18 | resources: 19 | requests: 20 | storage: 1Gi 21 | -------------------------------------------------------------------------------- /docker/app/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM --platform=$BUILDPLATFORM golang:1.20-alpine AS build 2 | RUN apk --no-cache add \ 3 | bash 4 | 5 | WORKDIR /src 6 | COPY . . 7 | 8 | ARG TARGETOS 9 | ARG TARGETARCH 10 | ARG VERSION 11 | RUN GOOS=$TARGETOS GOARCH=$TARGETARCH VERSION=$VERSION ./scripts/build.sh 12 | 13 | FROM alpine:latest 14 | RUN apk --no-cache add \ 15 | ca-certificates 16 | COPY --from=build /src/bin/redis-operator /usr/local/bin 17 | RUN addgroup -g 1000 rf && \ 18 | adduser -D -u 1000 -G rf rf && \ 19 | chown rf:rf /usr/local/bin/redis-operator 20 | USER rf 21 | 22 | ENTRYPOINT ["/usr/local/bin/redis-operator"] 23 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | ### Expected behaviour 2 | 3 | What do you want to achieve? 4 | 5 | ### Actual behaviour 6 | 7 | What is happening? Are all the pieces created? Can you access to the service? 8 | 9 | ### Steps to reproduce the behaviour 10 | 11 | Describe step by step what you've have done to get to this point 12 | 13 | ### Environment 14 | 15 | How are the pieces configured? 16 | * Redis Operator version 17 | * Kubernetes version 18 | * Kubernetes configuration used (eg: Is RBAC active?) 19 | 20 | ### Logs 21 | 22 | Please, add the debugging logs. In order to be able to gather them, add `-debug` flag when running the operator. -------------------------------------------------------------------------------- /example/redisfailover/custom-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: databases.spotahome.com/v1 2 | kind: RedisFailover 3 | metadata: 4 | name: redisfailover 5 | spec: 6 | sentinel: 7 | replicas: 3 8 | customConfig: 9 | - "down-after-milliseconds 2000" 10 | - "failover-timeout 3000" 11 | redis: 12 | replicas: 3 13 | customConfig: 14 | - "maxclients 100" 15 | - "hz 50" 16 | - "timeout 60" 17 | - "tcp-keepalive 60" 18 | - "client-output-buffer-limit normal 0 0 0" 19 | - "client-output-buffer-limit slave 1000000000 1000000000 0" 20 | - "client-output-buffer-limit pubsub 33554432 8388608 60" 21 | -------------------------------------------------------------------------------- /example/redisfailover/pod-anti-affinity.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: databases.spotahome.com/v1 2 | kind: RedisFailover 3 | metadata: 4 | name: redisfailover 5 | spec: 6 | sentinel: 7 | replicas: 3 8 | redis: 9 | replicas: 3 10 | affinity: 11 | podAntiAffinity: 12 | preferredDuringSchedulingIgnoredDuringExecution: 13 | - weight: 100 14 | podAffinityTerm: 15 | labelSelector: 16 | matchExpressions: 17 | - key: security 18 | operator: In 19 | values: 20 | - us-west-1 21 | topologyKey: failure-domain.beta.kubernetes.io/zone 22 | -------------------------------------------------------------------------------- /scripts/build.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -o errexit 4 | set -o nounset 5 | 6 | src=./cmd/redisoperator 7 | out=./bin/redis-operator 8 | 9 | if [[ ! -z ${TARGETOS} ]] && [[ ! -z ${TARGETARCH} ]]; 10 | then 11 | echo "Building ${TARGETOS}/${TARGETARCH} release..." 12 | export GOOS=${TARGETOS} 13 | export GOARCH=${TARGETARCH} 14 | binary_ext=-${TARGETOS}-${TARGETARCH} 15 | else 16 | echo "Building native release..." 17 | fi 18 | 19 | final_out=${out}${binary_ext} 20 | ldf_cmp="-w -extldflags '-static'" 21 | f_ver="-X main.Version=${VERSION:-dev}" 22 | 23 | echo "Building binary at ${out}" 24 | CGO_ENABLED=0 go build -o ${out} --ldflags "${ldf_cmp} ${f_ver}" ${src} 25 | -------------------------------------------------------------------------------- /api/redisfailover/v1/defaults.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | const ( 4 | defaultRedisNumber = 3 5 | defaultSentinelNumber = 3 6 | defaultSentinelExporterImage = "quay.io/oliver006/redis_exporter:v1.43.0" 7 | defaultExporterImage = "quay.io/oliver006/redis_exporter:v1.43.0" 8 | defaultImage = "redis:6.2.6-alpine" 9 | defaultRedisPort = 6379 10 | ) 11 | 12 | var ( 13 | defaultSentinelCustomConfig = []string{ 14 | "down-after-milliseconds 5000", 15 | "failover-timeout 10000", 16 | } 17 | defaultRedisCustomConfig = []string{ 18 | "replica-priority 100", 19 | } 20 | bootstrappingRedisCustomConfig = []string{ 21 | "replica-priority 0", 22 | } 23 | ) 24 | -------------------------------------------------------------------------------- /charts/redisoperator/templates/_versions.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Return the appropriate apiVersion for deployment. 4 | */}} 5 | {{- define "common.capabilities.deployment.apiVersion" -}} 6 | {{- print "apps/v1" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Return the appropriate apiVersion for ingress. 11 | */}} 12 | {{- define "common.capabilities.ingress.apiVersion" -}} 13 | {{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} 14 | {{- print "networking.k8s.io/v1" -}} 15 | {{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} 16 | {{- print "extensions/v1beta1" -}} 17 | {{- else -}} 18 | {{- print "networking.k8s.io/v1beta1" -}} 19 | {{- end -}} 20 | {{- end -}} 21 | -------------------------------------------------------------------------------- /charts/redisoperator/templates/service.yaml: -------------------------------------------------------------------------------- 1 | {{- $fullName := include "chart.fullname" . -}} 2 | {{- $svcPort := .Values.service.port -}} 3 | {{- $data := dict "Chart" .Chart "Release" .Release "Values" .Values -}} 4 | apiVersion: v1 5 | kind: Service 6 | metadata: 7 | name: {{ $fullName }} 8 | namespace: {{ include "chart.namespaceName" . }} 9 | labels: 10 | {{- include "chart.labels" $data | nindent 4 }} 11 | {{- if .Values.annotations }} 12 | annotations: 13 | {{ toYaml .Values.annotations | indent 4 }} 14 | {{- end }} 15 | spec: 16 | type: {{ .Values.service.type}} 17 | ports: 18 | - name: metrics 19 | port: {{ $svcPort }} 20 | protocol: TCP 21 | selector: 22 | {{- include "chart.selectorLabels" $data | nindent 4 }} 23 | 24 | -------------------------------------------------------------------------------- /charts/redisoperator/templates/private-registry.yaml: -------------------------------------------------------------------------------- 1 | {{- if (and .Values.imageCredentials.create (not .Values.imageCredentials.existsSecrets)) -}} 2 | {{- $fullName := include "chart.fullname" . -}} 3 | {{- $name := "registry" -}} 4 | {{- $data := dict "name" $name "Chart" .Chart "Release" .Release "Values" .Values -}} 5 | apiVersion: v1 6 | kind: Secret 7 | metadata: 8 | name: {{ $fullName }}-{{ $name }} 9 | namespace: {{ include "chart.namespaceName" . }} 10 | labels: 11 | {{- include "chart.labels" $data | nindent 4 }} 12 | {{- if .Values.annotations }} 13 | annotations: 14 | {{ toYaml .Values.annotations | indent 4 }} 15 | {{- end }} 16 | type: kubernetes.io/dockerconfigjson 17 | data: 18 | .dockerconfigjson: {{ template "imagePullSecret" . }} 19 | {{- end }} -------------------------------------------------------------------------------- /operator/redisfailover/util/label.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | // MergeLabels merges all the label maps received as argument into a single new label map. 4 | func MergeLabels(allLabels ...map[string]string) map[string]string { 5 | res := map[string]string{} 6 | 7 | for _, labels := range allLabels { 8 | for k, v := range labels { 9 | res[k] = v 10 | } 11 | } 12 | return res 13 | } 14 | 15 | // MergeAnnotations merges all the annotations maps received as argument into a single new label map. 16 | func MergeAnnotations(allMergeAnnotations ...map[string]string) map[string]string { 17 | res := map[string]string{} 18 | 19 | for _, annotations := range allMergeAnnotations { 20 | for k, v := range annotations { 21 | res[k] = v 22 | } 23 | } 24 | return res 25 | } 26 | -------------------------------------------------------------------------------- /example/operator/custom-startup-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: startup-config 6 | data: 7 | startup.sh: | 8 | #!/bin/bash 9 | redis-cli -h 127.0.0.1 -p ${REDIS_PORT} --user pinger --pass pingpass --no-auth-warning ping | grep PONG 10 | --- 11 | apiVersion: databases.spotahome.com/v1 12 | kind: RedisFailover 13 | metadata: 14 | name: redisfailover 15 | spec: 16 | sentinel: 17 | replicas: 3 18 | resources: 19 | requests: 20 | cpu: 100m 21 | limits: 22 | memory: 100Mi 23 | redis: 24 | replicas: 3 25 | startupConfigMap: startup-config 26 | resources: 27 | requests: 28 | cpu: 100m 29 | memory: 200Mi 30 | limits: 31 | cpu: 400m 32 | memory: 500Mi 33 | -------------------------------------------------------------------------------- /scripts/integration-tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eu 4 | 5 | SUDO='' 6 | if [[ $(id -u) -ne 0 ]] 7 | then 8 | SUDO="sudo" 9 | fi 10 | 11 | function cleanup { 12 | echo "=> Removing minikube cluster" 13 | $SUDO minikube delete 14 | } 15 | trap cleanup EXIT 16 | 17 | echo "=> Preparing minikube for running integration tests" 18 | $SUDO minikube start --vm-driver=none --kubernetes-version=v1.22.3 19 | 20 | echo "=> Waiting for minikube to start" 21 | sleep 30 22 | 23 | # Hack for Travis. The kubeconfig has to be readable 24 | if [[ -v IN_TRAVIS ]] 25 | then 26 | $SUDO chown -R travis: ${HOME}/.minikube/ 27 | $SUDO chmod a+r ${HOME}/.kube/config 28 | fi 29 | 30 | echo "=> Running integration tests" 31 | go test `go list ./... | grep test/integration` -v -tags='integration' 32 | -------------------------------------------------------------------------------- /client/k8s/clientset/versioned/typed/redisfailover/v1/fake/fake_redisfailover_client.go: -------------------------------------------------------------------------------- 1 | // Code generated by client-gen. DO NOT EDIT. 2 | 3 | package fake 4 | 5 | import ( 6 | v1 "github.com/spotahome/redis-operator/client/k8s/clientset/versioned/typed/redisfailover/v1" 7 | rest "k8s.io/client-go/rest" 8 | testing "k8s.io/client-go/testing" 9 | ) 10 | 11 | type FakeDatabasesV1 struct { 12 | *testing.Fake 13 | } 14 | 15 | func (c *FakeDatabasesV1) RedisFailovers(namespace string) v1.RedisFailoverInterface { 16 | return &FakeRedisFailovers{c, namespace} 17 | } 18 | 19 | // RESTClient returns a RESTClient that is used to communicate 20 | // with API server by this client implementation. 21 | func (c *FakeDatabasesV1) RESTClient() rest.Interface { 22 | var ret *rest.RESTClient 23 | return ret 24 | } 25 | -------------------------------------------------------------------------------- /example/redisfailover/control-label-propagation.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: databases.spotahome.com/v1 2 | kind: RedisFailover 3 | metadata: 4 | name: redisfailover2 5 | labels: 6 | # These two labels will be propagated. 7 | app.example.com/label1: value 8 | app.example.com/label2: value 9 | # This one wont be, as there is a non-empty whitelist and the regexp doesnt match it. 10 | anotherlabel: value 11 | spec: 12 | sentinel: 13 | replicas: 3 14 | resources: 15 | requests: 16 | cpu: 100m 17 | limits: 18 | memory: 100Mi 19 | redis: 20 | replicas: 3 21 | resources: 22 | requests: 23 | cpu: 100m 24 | memory: 100Mi 25 | limits: 26 | cpu: 400m 27 | memory: 500Mi 28 | labelWhitelist: 29 | - ^app.example.com.* 30 | -------------------------------------------------------------------------------- /.github/workflows/staleissues.yaml: -------------------------------------------------------------------------------- 1 | name: Close inactive issues 2 | on: 3 | schedule: 4 | - cron: "30 1 * * *" 5 | 6 | jobs: 7 | close-issues: 8 | runs-on: ubuntu-latest 9 | permissions: 10 | issues: write 11 | pull-requests: write 12 | steps: 13 | - uses: actions/stale@v8 14 | with: 15 | days-before-issue-stale: 45 16 | days-before-issue-close: 14 17 | stale-issue-label: "stale" 18 | stale-issue-message: "This issue is stale because it has been open for 45 days with no activity." 19 | close-issue-message: "This issue was closed because it has been inactive for 14 days since being marked as stale." 20 | days-before-pr-stale: -1 21 | days-before-pr-close: -1 22 | repo-token: ${{ secrets.GITHUB_TOKEN }} 23 | -------------------------------------------------------------------------------- /.github/workflows/helm.yml: -------------------------------------------------------------------------------- 1 | name: Release Charts 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | 8 | jobs: 9 | release: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v3 13 | with: 14 | fetch-depth: 0 15 | - name: Configure Git 16 | run: | 17 | git config user.name "$GITHUB_ACTOR" 18 | git config user.email "${GITHUB_ACTOR}@users.noreply.github.com" 19 | 20 | - name: Install Helm 21 | uses: azure/setup-helm@v3 22 | with: 23 | version: v3.7.2 24 | 25 | - name: Release 26 | uses: helm/chart-releaser-action@v1.5.0 27 | with: 28 | charts_dir: charts 29 | config: charts/chart-release-config.yaml 30 | env: 31 | CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}" 32 | -------------------------------------------------------------------------------- /example/redisfailover/custom-shutdown.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: databases.spotahome.com/v1 2 | kind: RedisFailover 3 | metadata: 4 | name: redisfailover 5 | spec: 6 | sentinel: 7 | replicas: 3 8 | redis: 9 | replicas: 3 10 | shutdownConfigMap: "shutdown-configmap" 11 | 12 | --- 13 | apiVersion: v1 14 | kind: ConfigMap 15 | metadata: 16 | name: shutdown-configmap 17 | data: 18 | shutdown.sh: | 19 | echo "shutdown in progress..." 20 | master=$(redis-cli -h ${RFS_REDISFAILOVER_SERVICE_HOST} -p ${RFS_REDISFAILOVER_SERVICE_PORT_SENTINEL} --csv SENTINEL get-master-addr-by-name mymaster | tr ',' ' ' | tr -d '\"' |cut -d' ' -f1) 21 | redis-cli SAVE 22 | if [[ $master == $(hostname -i) ]]; then 23 | redis-cli -h ${RFS_REDISFAILOVER_SERVICE_HOST} -p ${RFS_REDISFAILOVER_SERVICE_PORT_SENTINEL} SENTINEL failover mymaster 24 | fi 25 | -------------------------------------------------------------------------------- /example/redisfailover/node-affinity.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: databases.spotahome.com/v1 2 | kind: RedisFailover 3 | metadata: 4 | name: redis 5 | spec: 6 | redis: 7 | replicas: 3 8 | affinity: 9 | nodeAffinity: 10 | requiredDuringSchedulingIgnoredDuringExecution: 11 | nodeSelectorTerms: 12 | - matchExpressions: 13 | - key: kops.k8s.io/instancegroup 14 | operator: In 15 | values: 16 | - productionnodes 17 | sentinel: 18 | replicas: 3 19 | affinity: 20 | nodeAffinity: 21 | requiredDuringSchedulingIgnoredDuringExecution: 22 | nodeSelectorTerms: 23 | - matchExpressions: 24 | - key: kops.k8s.io/instancegroup 25 | operator: In 26 | values: 27 | - productionnodes 28 | -------------------------------------------------------------------------------- /example/redisfailover/extravolumes-mounts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: exm 6 | --- 7 | apiVersion: v1 8 | kind: Secret 9 | metadata: 10 | name: foo 11 | namespace: exm 12 | type: Opaque 13 | stringData: 14 | password: MWYyZDFlMmU2N2Rm 15 | --- 16 | apiVersion: databases.spotahome.com/v1 17 | kind: RedisFailover 18 | metadata: 19 | name: foo 20 | namespace: exm 21 | spec: 22 | sentinel: 23 | replicas: 3 24 | extraVolumes: 25 | - name: foo 26 | secret: 27 | secretName: foo 28 | optional: false 29 | extraVolumeMounts: 30 | - name: foo 31 | mountPath: "/etc/foo" 32 | readOnly: true 33 | redis: 34 | replicas: 3 35 | extraVolumes: 36 | - name: foo 37 | secret: 38 | secretName: foo 39 | optional: false 40 | extraVolumeMounts: 41 | - name: foo 42 | mountPath: "/etc/foo" 43 | readOnly: true -------------------------------------------------------------------------------- /example/operator/operator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app: redisoperator 6 | name: redisoperator 7 | spec: 8 | replicas: 2 9 | selector: 10 | matchLabels: 11 | app: redisoperator 12 | strategy: 13 | type: RollingUpdate 14 | template: 15 | metadata: 16 | labels: 17 | app: redisoperator 18 | spec: 19 | serviceAccountName: redisoperator 20 | containers: 21 | - image: quay.io/spotahome/redis-operator:latest 22 | imagePullPolicy: IfNotPresent 23 | name: app 24 | securityContext: 25 | readOnlyRootFilesystem: true 26 | runAsNonRoot: true 27 | runAsUser: 1000 28 | resources: 29 | limits: 30 | cpu: 100m 31 | memory: 50Mi 32 | requests: 33 | cpu: 10m 34 | memory: 50Mi 35 | env: 36 | - name: POD_NAMESPACE 37 | valueFrom: 38 | fieldRef: 39 | apiVersion: v1 40 | fieldPath: metadata.namespace 41 | restartPolicy: Always 42 | -------------------------------------------------------------------------------- /docker/development/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.20-alpine 2 | 3 | ENV CODEGEN_VERSION="1.11.9" 4 | 5 | RUN apk --no-cache add \ 6 | bash \ 7 | git \ 8 | g++ \ 9 | openssl 10 | 11 | # Code generator stuff 12 | # Check: https://github.com/kubernetes/kubernetes/pull/57656 13 | RUN wget http://github.com/kubernetes/code-generator/archive/kubernetes-${CODEGEN_VERSION}.tar.gz && \ 14 | mkdir -p /go/src/k8s.io/code-generator/ && \ 15 | tar zxvf kubernetes-${CODEGEN_VERSION}.tar.gz --strip 1 -C /go/src/k8s.io/code-generator/ && \ 16 | mkdir -p /go/src/k8s.io/kubernetes/hack/boilerplate/ && \ 17 | touch /go/src/k8s.io/kubernetes/hack/boilerplate/boilerplate.go.txt 18 | 19 | # Mock creator 20 | ARG MOCKERY_VERSION="2.9.6" 21 | RUN wget -c https://github.com/vektra/mockery/releases/download/v${MOCKERY_VERSION}/mockery_${MOCKERY_VERSION}_$(uname -o)_$(uname -m).tar.gz -O - | tar -xz -C /go/bin/ 22 | 23 | # Create user 24 | ARG uid=1000 25 | ARG gid=1000 26 | RUN addgroup -g $gid rf && \ 27 | adduser -D -u $uid -G rf rf && \ 28 | chown rf:rf -R /go 29 | 30 | 31 | USER rf 32 | WORKDIR /go/src/github.com/spotahome/redis-operator 33 | -------------------------------------------------------------------------------- /mocks/doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | Package mocks will have all the mocks of the application. 3 | */ 4 | package mocks // import "github.com/spotahome/redis-operator/mocks" 5 | 6 | // Logger mocks 7 | //go:generate mockery --output log --dir ../log --name Logger 8 | 9 | // RedisClient mocks 10 | //go:generate mockery --output service/redis --dir ../service/redis --name Client 11 | 12 | // K8SClient mocks 13 | //go:generate mockery --output service/k8s --dir ../service/k8s --name Services 14 | 15 | // RedisFailover mocks 16 | //go:generate mockery --output operator/redisfailover --dir ../service/k8s --name RedisFailover 17 | 18 | // RedisFailover Operator service Checker mocks 19 | //go:generate mockery --output operator/redisfailover/service --dir ../operator/redisfailover/service --name RedisFailoverCheck 20 | 21 | // RedisFailover Operator service Client mocks 22 | //go:generate mockery --output operator/redisfailover/service --dir ../operator/redisfailover/service --name RedisFailoverClient 23 | 24 | // RedisFailover Operator service Healer mocks 25 | //go:generate mockery --output operator/redisfailover/service --dir ../operator/redisfailover/service --name RedisFailoverHeal 26 | -------------------------------------------------------------------------------- /example/operator/roles.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: redisoperator 5 | rules: 6 | - apiGroups: 7 | - databases.spotahome.com 8 | resources: 9 | - redisfailovers 10 | - redisfailovers/finalizers 11 | verbs: 12 | - "*" 13 | - apiGroups: 14 | - apiextensions.k8s.io 15 | resources: 16 | - customresourcedefinitions 17 | verbs: 18 | - "*" 19 | - apiGroups: 20 | - coordination.k8s.io 21 | resources: 22 | - leases 23 | verbs: 24 | - create 25 | - get 26 | - list 27 | - update 28 | - apiGroups: 29 | - "" 30 | resources: 31 | - pods 32 | - services 33 | - endpoints 34 | - events 35 | - configmaps 36 | - secrets 37 | - persistentvolumeclaims 38 | - persistentvolumeclaims/finalizers 39 | verbs: 40 | - "*" 41 | - apiGroups: 42 | - apps 43 | resources: 44 | - deployments 45 | - statefulsets 46 | verbs: 47 | - "*" 48 | - apiGroups: 49 | - policy 50 | resources: 51 | - poddisruptionbudgets 52 | verbs: 53 | - "*" 54 | -------------------------------------------------------------------------------- /example/redisfailover/sidecars.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: sc 6 | --- 7 | apiVersion: v1 8 | kind: Secret 9 | metadata: 10 | name: redis-auth 11 | namespace: sc 12 | type: Opaque 13 | stringData: 14 | password: pass 15 | --- 16 | apiVersion: databases.spotahome.com/v1 17 | kind: RedisFailover 18 | metadata: 19 | name: sidecars 20 | namespace: sc 21 | spec: 22 | auth: 23 | secretPath: redis-auth 24 | sentinel: 25 | initContainers: 26 | - name: echo 27 | image: busybox 28 | command: 29 | - "/bin/sh" 30 | - "-c" 31 | - "echo 'init container sentinel'" 32 | replicas: 3 33 | extraContainers: 34 | - name: busybox 35 | image: busybox 36 | command: 37 | - "/bin/sh" 38 | - "-c" 39 | - "sleep infinity" 40 | redis: 41 | replicas: 3 42 | initContainers: 43 | - name: echo 44 | image: busybox 45 | command: 46 | - "/bin/sh" 47 | - "-c" 48 | - "echo 'init container redis'" 49 | extraContainers: 50 | - name: busybox 51 | image: busybox 52 | command: 53 | - "/bin/sh" 54 | - "-c" 55 | - "sleep infinity" 56 | -------------------------------------------------------------------------------- /manifests/kustomize/components/rbac/clusterrole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: redis-operator 5 | rules: 6 | - apiGroups: 7 | - databases.spotahome.com 8 | resources: 9 | - redisfailovers 10 | - redisfailovers/finalizers 11 | verbs: 12 | - "*" 13 | - apiGroups: 14 | - apiextensions.k8s.io 15 | resources: 16 | - customresourcedefinitions 17 | verbs: 18 | - "*" 19 | - apiGroups: 20 | - coordination.k8s.io 21 | resources: 22 | - leases 23 | verbs: 24 | - create 25 | - get 26 | - list 27 | - update 28 | - apiGroups: 29 | - "" 30 | resources: 31 | - pods 32 | - services 33 | - endpoints 34 | - events 35 | - configmaps 36 | - secrets 37 | - persistentvolumeclaims 38 | - persistentvolumeclaims/finalizers 39 | verbs: 40 | - "*" 41 | - apiGroups: 42 | - apps 43 | resources: 44 | - deployments 45 | - statefulsets 46 | verbs: 47 | - "*" 48 | - apiGroups: 49 | - policy 50 | resources: 51 | - poddisruptionbudgets 52 | verbs: 53 | - "*" 54 | -------------------------------------------------------------------------------- /example/redisfailover/topology-spread-contraints.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: tsc 6 | --- 7 | apiVersion: databases.spotahome.com/v1 8 | kind: RedisFailover 9 | metadata: 10 | name: redis-tsc 11 | namespace: tsc 12 | spec: 13 | redis: 14 | topologySpreadConstraints: 15 | - labelSelector: 16 | matchLabels: 17 | app.kubernetes.io/component: redis 18 | maxSkew: 1 19 | topologyKey: topology.kubernetes.io/zone 20 | whenUnsatisfiable: DoNotSchedule 21 | - labelSelector: 22 | matchLabels: 23 | app.kubernetes.io/component: redis 24 | maxSkew: 1 25 | topologyKey: kubernetes.io/hostname 26 | whenUnsatisfiable: ScheduleAnyway 27 | sentinel: 28 | replicas: 3 29 | topologySpreadConstraints: 30 | - labelSelector: 31 | matchLabels: 32 | app.kubernetes.io/component: sentinel 33 | maxSkew: 1 34 | topologyKey: topology.kubernetes.io/zone 35 | whenUnsatisfiable: DoNotSchedule 36 | - labelSelector: 37 | matchLabels: 38 | app.kubernetes.io/component: sentinel 39 | maxSkew: 1 40 | topologyKey: kubernetes.io/hostname 41 | whenUnsatisfiable: ScheduleAnyway 42 | -------------------------------------------------------------------------------- /example/redisfailover/pmem.yaml: -------------------------------------------------------------------------------- 1 | # Deployment that uses persistent volumes provided by pmem-CSI. To use this example you need: 2 | # - Proper setup of the persistent memory container storage interface driver from https://github.com/intel/pmem-CSI 3 | apiVersion: databases.spotahome.com/v1 4 | kind: RedisFailover 5 | metadata: 6 | name: redisfailover-pmem 7 | spec: 8 | sentinel: 9 | replicas: 3 10 | command: 11 | - "redis-server" 12 | - "/redis/sentinel.conf" 13 | - "--sentinel" 14 | - "--protected-mode" 15 | - "no" 16 | redis: 17 | securityContext: 18 | runAsNonRoot: False 19 | replicas: 3 20 | image: pmem/redis 21 | version: latest 22 | command: 23 | - "redis-server" 24 | - "/redis/redis.conf" 25 | - "--pmdir" 26 | - "/data" 27 | - "100Mb" 28 | - "--protected-mode" 29 | - "no" 30 | storage: 31 | persistentVolumeClaim: 32 | metadata: 33 | name: redisfailover-pmem-data 34 | spec: 35 | accessModes: 36 | - ReadWriteOnce 37 | resources: 38 | requests: 39 | storage: 100Mi 40 | storageClassName: pmem-csi-sc-ext4 # From https://github.com/intel/pmem-CSI 41 | -------------------------------------------------------------------------------- /operator/redisfailover/service/constants.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | // variables refering to the redis exporter port 4 | const ( 5 | exporterPort = 9121 6 | sentinelExporterPort = 9355 7 | exporterPortName = "http-metrics" 8 | exporterContainerName = "redis-exporter" 9 | sentinelExporterContainerName = "sentinel-exporter" 10 | exporterDefaultRequestCPU = "10m" 11 | exporterDefaultLimitCPU = "1000m" 12 | exporterDefaultRequestMemory = "50Mi" 13 | exporterDefaultLimitMemory = "100Mi" 14 | ) 15 | 16 | const ( 17 | baseName = "rf" 18 | sentinelName = "s" 19 | sentinelRoleName = "sentinel" 20 | sentinelConfigFileName = "sentinel.conf" 21 | redisConfigFileName = "redis.conf" 22 | redisName = "r" 23 | redisMasterName = "rm" 24 | redisSlaveName = "rs" 25 | redisShutdownName = "r-s" 26 | redisReadinessName = "r-readiness" 27 | redisRoleName = "redis" 28 | appLabel = "redis-failover" 29 | hostnameTopologyKey = "kubernetes.io/hostname" 30 | ) 31 | 32 | const ( 33 | redisRoleLabelKey = "redisfailovers-role" 34 | redisRoleLabelMaster = "master" 35 | redisRoleLabelSlave = "slave" 36 | ) 37 | -------------------------------------------------------------------------------- /service/k8s/secret.go: -------------------------------------------------------------------------------- 1 | package k8s 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/spotahome/redis-operator/log" 7 | "github.com/spotahome/redis-operator/metrics" 8 | corev1 "k8s.io/api/core/v1" 9 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 10 | "k8s.io/client-go/kubernetes" 11 | ) 12 | 13 | // Secret interacts with k8s to get secrets 14 | type Secret interface { 15 | GetSecret(namespace, name string) (*corev1.Secret, error) 16 | } 17 | 18 | // SecretService is the secret service implementation using API calls to kubernetes. 19 | type SecretService struct { 20 | kubeClient kubernetes.Interface 21 | logger log.Logger 22 | metricsRecorder metrics.Recorder 23 | } 24 | 25 | func NewSecretService(kubeClient kubernetes.Interface, logger log.Logger, metricsRecorder metrics.Recorder) *SecretService { 26 | 27 | logger = logger.With("service", "k8s.secret") 28 | return &SecretService{ 29 | kubeClient: kubeClient, 30 | logger: logger, 31 | metricsRecorder: metricsRecorder, 32 | } 33 | } 34 | 35 | func (s *SecretService) GetSecret(namespace, name string) (*corev1.Secret, error) { 36 | 37 | secret, err := s.kubeClient.CoreV1().Secrets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) 38 | recordMetrics(namespace, "Secret", name, "GET", err, s.metricsRecorder) 39 | if err != nil { 40 | return nil, err 41 | } 42 | 43 | return secret, err 44 | } 45 | -------------------------------------------------------------------------------- /client/k8s/clientset/versioned/fake/register.go: -------------------------------------------------------------------------------- 1 | // Code generated by client-gen. DO NOT EDIT. 2 | 3 | package fake 4 | 5 | import ( 6 | databasesv1 "github.com/spotahome/redis-operator/api/redisfailover/v1" 7 | v1 "k8s.io/apimachinery/pkg/apis/meta/v1" 8 | runtime "k8s.io/apimachinery/pkg/runtime" 9 | schema "k8s.io/apimachinery/pkg/runtime/schema" 10 | serializer "k8s.io/apimachinery/pkg/runtime/serializer" 11 | utilruntime "k8s.io/apimachinery/pkg/util/runtime" 12 | ) 13 | 14 | var scheme = runtime.NewScheme() 15 | var codecs = serializer.NewCodecFactory(scheme) 16 | 17 | var localSchemeBuilder = runtime.SchemeBuilder{ 18 | databasesv1.AddToScheme, 19 | } 20 | 21 | // AddToScheme adds all types of this clientset into the given scheme. This allows composition 22 | // of clientsets, like in: 23 | // 24 | // import ( 25 | // "k8s.io/client-go/kubernetes" 26 | // clientsetscheme "k8s.io/client-go/kubernetes/scheme" 27 | // aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" 28 | // ) 29 | // 30 | // kclientset, _ := kubernetes.NewForConfig(c) 31 | // _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) 32 | // 33 | // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types 34 | // correctly. 35 | var AddToScheme = localSchemeBuilder.AddToScheme 36 | 37 | func init() { 38 | v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) 39 | utilruntime.Must(AddToScheme(scheme)) 40 | } 41 | -------------------------------------------------------------------------------- /metrics/dummy.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import ( 4 | koopercontroller "github.com/spotahome/kooper/v2/controller" 5 | ) 6 | 7 | // Dummy is a handy instnce of a dummy instrumenter, most of the times it will be used on tests. 8 | var Dummy = &dummy{ 9 | MetricsRecorder: koopercontroller.DummyMetricsRecorder, 10 | } 11 | 12 | // dummy is a dummy implementation of Instrumenter. 13 | type dummy struct { 14 | koopercontroller.MetricsRecorder 15 | } 16 | 17 | func (d *dummy) SetClusterOK(namespace string, name string) {} 18 | func (d *dummy) SetClusterError(namespace string, name string) {} 19 | func (d *dummy) DeleteCluster(namespace string, name string) {} 20 | func (d *dummy) SetRedisInstance(IP string, masterIP string, role string) {} 21 | func (d *dummy) ResetRedisInstance() {} 22 | func (d *dummy) RecordEnsureOperation(objectNamespace string, objectName string, objectKind string, resourceName string, status string) { 23 | } 24 | func (d *dummy) RecordRedisCheck(namespace string, resource string, indicator string, instance string, status string) { 25 | } 26 | func (d *dummy) RecordSentinelCheck(namespace string, resource string, indicator string, instance string, status string) { 27 | } 28 | func (d dummy) RecordK8sOperation(namespace string, kind string, object string, operation string, status string, err string) { 29 | } 30 | func (d dummy) RecordRedisOperation(kind string, IP string, operation string, status string, err string) { 31 | } 32 | -------------------------------------------------------------------------------- /client/k8s/clientset/versioned/scheme/register.go: -------------------------------------------------------------------------------- 1 | // Code generated by client-gen. DO NOT EDIT. 2 | 3 | package scheme 4 | 5 | import ( 6 | databasesv1 "github.com/spotahome/redis-operator/api/redisfailover/v1" 7 | v1 "k8s.io/apimachinery/pkg/apis/meta/v1" 8 | runtime "k8s.io/apimachinery/pkg/runtime" 9 | schema "k8s.io/apimachinery/pkg/runtime/schema" 10 | serializer "k8s.io/apimachinery/pkg/runtime/serializer" 11 | utilruntime "k8s.io/apimachinery/pkg/util/runtime" 12 | ) 13 | 14 | var Scheme = runtime.NewScheme() 15 | var Codecs = serializer.NewCodecFactory(Scheme) 16 | var ParameterCodec = runtime.NewParameterCodec(Scheme) 17 | var localSchemeBuilder = runtime.SchemeBuilder{ 18 | databasesv1.AddToScheme, 19 | } 20 | 21 | // AddToScheme adds all types of this clientset into the given scheme. This allows composition 22 | // of clientsets, like in: 23 | // 24 | // import ( 25 | // "k8s.io/client-go/kubernetes" 26 | // clientsetscheme "k8s.io/client-go/kubernetes/scheme" 27 | // aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" 28 | // ) 29 | // 30 | // kclientset, _ := kubernetes.NewForConfig(c) 31 | // _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) 32 | // 33 | // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types 34 | // correctly. 35 | var AddToScheme = localSchemeBuilder.AddToScheme 36 | 37 | func init() { 38 | v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) 39 | utilruntime.Must(AddToScheme(Scheme)) 40 | } 41 | -------------------------------------------------------------------------------- /charts/redisoperator/templates/monitoring.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.monitoring.enabled -}} 2 | {{- $fullName := include "chart.fullname" . -}} 3 | {{- $svcPort := .Values.service.port -}} 4 | {{- $data := dict "Chart" .Chart "Release" .Release "Values" .Values -}} 5 | apiVersion: v1 6 | kind: Service 7 | metadata: 8 | name: {{ $fullName }}-prometheus 9 | namespace: {{ include "chart.namespaceName" . }} 10 | labels: 11 | prometheus: {{ .Values.monitoring.prometheus.name }} 12 | {{- include "chart.labels" $data | nindent 4 }} 13 | {{- if .Values.monitoring.serviceAnnotations }} 14 | annotations: 15 | {{ toYaml .Values.monitoring.serviceAnnotations | indent 4 }} 16 | {{- end }} 17 | spec: 18 | ports: 19 | - port: {{ $svcPort }} 20 | protocol: TCP 21 | name: metrics 22 | targetPort: metrics 23 | selector: 24 | {{- include "chart.selectorLabels" $data | nindent 4 }} 25 | 26 | --- 27 | {{- if .Values.monitoring.serviceMonitor -}} 28 | apiVersion: monitoring.coreos.com/v1 29 | kind: ServiceMonitor 30 | metadata: 31 | name: {{ $fullName }} 32 | namespace: {{ include "chart.namespaceName" . }} 33 | labels: 34 | prometheus: {{ .Values.monitoring.prometheus.name }} 35 | {{- include "chart.labels" $data | nindent 4 }} 36 | spec: 37 | selector: 38 | matchLabels: 39 | prometheus: {{ .Values.monitoring.prometheus.name }} 40 | {{- include "chart.selectorLabels" $data | nindent 6 }} 41 | namespaceSelector: 42 | matchNames: 43 | - {{ include "chart.namespaceName" . }} 44 | endpoints: 45 | - port: metrics 46 | interval: 15s 47 | {{- end -}} 48 | {{- end -}} -------------------------------------------------------------------------------- /operator/redisfailover/service/names.go: -------------------------------------------------------------------------------- 1 | package service 2 | 3 | import ( 4 | "fmt" 5 | 6 | redisfailoverv1 "github.com/spotahome/redis-operator/api/redisfailover/v1" 7 | ) 8 | 9 | // GetRedisShutdownConfigMapName returns the name for redis configmap 10 | func GetRedisShutdownConfigMapName(rf *redisfailoverv1.RedisFailover) string { 11 | if rf.Spec.Redis.ShutdownConfigMap != "" { 12 | return rf.Spec.Redis.ShutdownConfigMap 13 | } 14 | return GetRedisShutdownName(rf) 15 | } 16 | 17 | // GetRedisName returns the name for redis resources 18 | func GetRedisName(rf *redisfailoverv1.RedisFailover) string { 19 | return generateName(redisName, rf.Name) 20 | } 21 | 22 | // GetRedisShutdownName returns the name for redis resources 23 | func GetRedisShutdownName(rf *redisfailoverv1.RedisFailover) string { 24 | return generateName(redisShutdownName, rf.Name) 25 | } 26 | 27 | // GetRedisReadinessName returns the name for redis resources 28 | func GetRedisReadinessName(rf *redisfailoverv1.RedisFailover) string { 29 | return generateName(redisReadinessName, rf.Name) 30 | } 31 | 32 | // GetSentinelName returns the name for sentinel resources 33 | func GetSentinelName(rf *redisfailoverv1.RedisFailover) string { 34 | return generateName(sentinelName, rf.Name) 35 | } 36 | 37 | func GetRedisMasterName(rf *redisfailoverv1.RedisFailover) string { 38 | return generateName(redisMasterName, rf.Name) 39 | } 40 | 41 | func GetRedisSlaveName(rf *redisfailoverv1.RedisFailover) string { 42 | return generateName(redisSlaveName, rf.Name) 43 | } 44 | 45 | func generateName(typeName, metaName string) string { 46 | return fmt.Sprintf("%s%s-%s", baseName, typeName, metaName) 47 | } 48 | -------------------------------------------------------------------------------- /docs/development.md: -------------------------------------------------------------------------------- 1 | # Development 2 | 3 | ## Folder structure 4 | 5 | ### Code folder structure 6 | 7 | - **api**: definition of the RedisFailover CRD. 8 | - **client**: autogenerated client to interact with redis-failovers. 9 | - **cmd**: contains the starting point of the application. 10 | - **log**: wrapper of logrus, created to be able to mock it. 11 | - **metrics**: exposer of status of the failovers created. 12 | - **mocks**: contains the mocked interfaces for testing the application. 13 | - **operator**: the main logic. Manages the requests from k8s and creates/updates/deletes the pieces as needed. 14 | - **service**: services/clients to interact with k8s and redises. 15 | - **vendor**: vendored packages used by the application. 16 | 17 | ### Non-code folder structure 18 | 19 | - **charts**: helm chart to deploy the operator. 20 | - **docker**: Dockerfiles to generate redis-failover docker images. 21 | - **example**: yaml files with spec of redis-failover. 22 | - **hack**: scripts to generate the redis-failover api-client. 23 | - **scripts**: scripts used to build and run the app. 24 | 25 | ## Make development commands 26 | 27 | You can do the following commands with make: 28 | 29 | - Build the development container. 30 | `make docker-build` 31 | - Generate mocks. 32 | `make go-generate` 33 | - Generate client 34 | `make update-codegen` 35 | - Run tests. 36 | `make test` 37 | - Build the executable file. 38 | `make build` 39 | - Run the app. 40 | `make run` 41 | - Access the docker instance with a shell. 42 | `make shell` 43 | - Install dependencies 44 | `make get-deps` 45 | - Update dependencies 46 | `make update-deps` 47 | - Build the app image. 48 | `make image` 49 | -------------------------------------------------------------------------------- /docs/logic.md: -------------------------------------------------------------------------------- 1 | # Controller logic 2 | 3 | ## Creation pipeline 4 | 5 | The Redis-Operator creates Redis Failovers, with all the needed pieces. So, when a event arrives from Kubernetes (add or sync), the following steps are executed: 6 | 7 | - Ensure: checks that all the pieces needed are created. It is important to notice that if a change is performed manually on the objects created, the operator will override them. This is done to ensure a healthy status. It will create the following: 8 | - Redis service (if exporter enabled) 9 | - Redis configmap 10 | - Redis shutdown configmap 11 | - Redis statefulset 12 | - Sentinel service 13 | - Sentinel configmap 14 | - Sentinel deployment 15 | - Check & Heal: will connect to every Redis and Sentinel and will ensure that they are working as they are supposed to do. If this is not the case, it will reconfigure the nodes to move them to the desire state. It will check the following: 16 | - Number of Redis is equal as the set on the RF spec 17 | - Number of Sentinel is equal as the set on the RF spec 18 | - Only one Redis working as a master 19 | - All Redis slaves have the same master 20 | - All Redis slaves are connected to the master 21 | - All Sentinels points to the same Redis master 22 | - Sentinel has not death nodes 23 | - Sentinel knows the correct slave number 24 | - Ensure Redis has the custom configuration set 25 | - Ensure Sentinel has the custom configuration set 26 | 27 | Most of the problems that may occur will be treated and tried to fix by the controller, except the case that there are a [split-brain](). **If happens to be a split-brain, an error will be logged waiting for manual fix**. 28 | -------------------------------------------------------------------------------- /service/k8s/k8s.go: -------------------------------------------------------------------------------- 1 | package k8s 2 | 3 | import ( 4 | apiextensionscli "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" 5 | "k8s.io/client-go/kubernetes" 6 | 7 | redisfailoverclientset "github.com/spotahome/redis-operator/client/k8s/clientset/versioned" 8 | "github.com/spotahome/redis-operator/log" 9 | "github.com/spotahome/redis-operator/metrics" 10 | ) 11 | 12 | // Service is the K8s service entrypoint. 13 | type Services interface { 14 | ConfigMap 15 | Secret 16 | Pod 17 | PodDisruptionBudget 18 | RedisFailover 19 | Service 20 | RBAC 21 | Deployment 22 | StatefulSet 23 | } 24 | 25 | type services struct { 26 | ConfigMap 27 | Secret 28 | Pod 29 | PodDisruptionBudget 30 | RedisFailover 31 | Service 32 | RBAC 33 | Deployment 34 | StatefulSet 35 | } 36 | 37 | // New returns a new Kubernetes service. 38 | func New(kubecli kubernetes.Interface, crdcli redisfailoverclientset.Interface, apiextcli apiextensionscli.Interface, logger log.Logger, metricsRecorder metrics.Recorder) Services { 39 | return &services{ 40 | ConfigMap: NewConfigMapService(kubecli, logger, metricsRecorder), 41 | Secret: NewSecretService(kubecli, logger, metricsRecorder), 42 | Pod: NewPodService(kubecli, logger, metricsRecorder), 43 | PodDisruptionBudget: NewPodDisruptionBudgetService(kubecli, logger, metricsRecorder), 44 | RedisFailover: NewRedisFailoverService(crdcli, logger, metricsRecorder), 45 | Service: NewServiceService(kubecli, logger, metricsRecorder), 46 | RBAC: NewRBACService(kubecli, logger, metricsRecorder), 47 | Deployment: NewDeploymentService(kubecli, logger, metricsRecorder), 48 | StatefulSet: NewStatefulSetService(kubecli, logger, metricsRecorder), 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /service/k8s/util.go: -------------------------------------------------------------------------------- 1 | package k8s 2 | 3 | import ( 4 | "fmt" 5 | 6 | redisfailoverv1 "github.com/spotahome/redis-operator/api/redisfailover/v1" 7 | "github.com/spotahome/redis-operator/metrics" 8 | "k8s.io/apimachinery/pkg/api/errors" 9 | ) 10 | 11 | // GetRedisPassword retreives password from kubernetes secret or, if 12 | // unspecified, returns a blank string 13 | func GetRedisPassword(s Services, rf *redisfailoverv1.RedisFailover) (string, error) { 14 | 15 | if rf.Spec.Auth.SecretPath == "" { 16 | // no auth settings specified, return blank password 17 | return "", nil 18 | } 19 | 20 | secret, err := s.GetSecret(rf.ObjectMeta.Namespace, rf.Spec.Auth.SecretPath) 21 | if err != nil { 22 | return "", err 23 | } 24 | 25 | if password, ok := secret.Data["password"]; ok { 26 | return string(password), nil 27 | } 28 | 29 | return "", fmt.Errorf("secret \"%s\" does not have a password field", rf.Spec.Auth.SecretPath) 30 | } 31 | 32 | func recordMetrics(namespace string, kind string, object string, operation string, err error, metricsRecorder metrics.Recorder) { 33 | if nil == err { 34 | metricsRecorder.RecordK8sOperation(namespace, kind, object, operation, metrics.SUCCESS, metrics.NOT_APPLICABLE) 35 | } else if errors.IsForbidden(err) { 36 | metricsRecorder.RecordK8sOperation(namespace, kind, object, operation, metrics.FAIL, metrics.K8S_FORBIDDEN_ERR) 37 | } else if errors.IsUnauthorized(err) { 38 | metricsRecorder.RecordK8sOperation(namespace, kind, object, operation, metrics.FAIL, metrics.K8S_UNAUTH) 39 | } else if errors.IsNotFound(err) { 40 | metricsRecorder.RecordK8sOperation(namespace, kind, object, operation, metrics.FAIL, metrics.K8S_NOT_FOUND) 41 | } else { 42 | metricsRecorder.RecordK8sOperation(namespace, kind, object, operation, metrics.FAIL, metrics.K8S_MISC) 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /api/redisfailover/v1/register.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import ( 4 | "github.com/spotahome/redis-operator/api/redisfailover" 5 | 6 | apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" 7 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 8 | "k8s.io/apimachinery/pkg/runtime" 9 | "k8s.io/apimachinery/pkg/runtime/schema" 10 | ) 11 | 12 | const ( 13 | version = "v1" 14 | ) 15 | 16 | // Team constants 17 | const ( 18 | RFKind = "RedisFailover" 19 | RFName = "redisfailover" 20 | RFNamePlural = "redisfailovers" 21 | RFScope = apiextensionsv1.NamespaceScoped 22 | ) 23 | 24 | // SchemeGroupVersion is group version used to register these objects 25 | var SchemeGroupVersion = schema.GroupVersion{Group: redisfailover.GroupName, Version: version} 26 | 27 | // Kind takes an unqualified kind and returns back a Group qualified GroupKind 28 | func Kind(kind string) schema.GroupKind { 29 | return VersionKind(kind).GroupKind() 30 | } 31 | 32 | // VersionKind takes an unqualified kind and returns back a Group qualified GroupVersionKind 33 | func VersionKind(kind string) schema.GroupVersionKind { 34 | return SchemeGroupVersion.WithKind(kind) 35 | } 36 | 37 | // Resource takes an unqualified resource and returns a Group qualified GroupResource 38 | func Resource(resource string) schema.GroupResource { 39 | return SchemeGroupVersion.WithResource(resource).GroupResource() 40 | } 41 | 42 | var ( 43 | SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) 44 | AddToScheme = SchemeBuilder.AddToScheme 45 | ) 46 | 47 | // Adds the list of known types to Scheme. 48 | func addKnownTypes(scheme *runtime.Scheme) error { 49 | scheme.AddKnownTypes(SchemeGroupVersion, 50 | &RedisFailover{}, 51 | &RedisFailoverList{}, 52 | ) 53 | metav1.AddToGroupVersion(scheme, SchemeGroupVersion) 54 | return nil 55 | } 56 | -------------------------------------------------------------------------------- /operator/redisfailover/ensurer.go: -------------------------------------------------------------------------------- 1 | package redisfailover 2 | 3 | import ( 4 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 5 | 6 | redisfailoverv1 "github.com/spotahome/redis-operator/api/redisfailover/v1" 7 | "github.com/spotahome/redis-operator/metrics" 8 | ) 9 | 10 | // Ensure is called to ensure all of the resources associated with a RedisFailover are created 11 | func (w *RedisFailoverHandler) Ensure(rf *redisfailoverv1.RedisFailover, labels map[string]string, or []metav1.OwnerReference, metricsClient metrics.Recorder) error { 12 | if rf.Spec.Redis.Exporter.Enabled { 13 | if err := w.rfService.EnsureRedisService(rf, labels, or); err != nil { 14 | return err 15 | } 16 | } else { 17 | if err := w.rfService.EnsureNotPresentRedisService(rf); err != nil { 18 | return err 19 | } 20 | } 21 | 22 | sentinelsAllowed := rf.SentinelsAllowed() 23 | if sentinelsAllowed { 24 | if err := w.rfService.EnsureSentinelService(rf, labels, or); err != nil { 25 | return err 26 | } 27 | if err := w.rfService.EnsureSentinelConfigMap(rf, labels, or); err != nil { 28 | return err 29 | } 30 | } 31 | 32 | if err := w.rfService.EnsureRedisMasterService(rf, labels, or); err != nil { 33 | return err 34 | } 35 | 36 | if err := w.rfService.EnsureRedisSlaveService(rf, labels, or); err != nil { 37 | return err 38 | } 39 | 40 | if err := w.rfService.EnsureRedisShutdownConfigMap(rf, labels, or); err != nil { 41 | return err 42 | } 43 | if err := w.rfService.EnsureRedisReadinessConfigMap(rf, labels, or); err != nil { 44 | return err 45 | } 46 | if err := w.rfService.EnsureRedisConfigMap(rf, labels, or); err != nil { 47 | return err 48 | } 49 | if err := w.rfService.EnsureRedisStatefulset(rf, labels, or); err != nil { 50 | return err 51 | } 52 | 53 | if sentinelsAllowed { 54 | if err := w.rfService.EnsureSentinelDeployment(rf, labels, or); err != nil { 55 | return err 56 | } 57 | } 58 | 59 | return nil 60 | } 61 | -------------------------------------------------------------------------------- /log/dummy.go: -------------------------------------------------------------------------------- 1 | package log 2 | 3 | // Dummy is a dummy logger 4 | var Dummy = DummyLogger{} 5 | 6 | // DummyLogger is an empty logger mainly used for tests 7 | type DummyLogger struct{} 8 | 9 | func (l DummyLogger) Debug(...interface{}) {} 10 | func (l DummyLogger) Debugln(...interface{}) {} 11 | func (l DummyLogger) Debugf(string, ...interface{}) {} 12 | func (l DummyLogger) Info(...interface{}) {} 13 | func (l DummyLogger) Infoln(...interface{}) {} 14 | func (l DummyLogger) Infof(string, ...interface{}) {} 15 | func (l DummyLogger) Warn(...interface{}) {} 16 | func (l DummyLogger) Warnln(...interface{}) {} 17 | func (l DummyLogger) Warnf(string, ...interface{}) {} 18 | func (l DummyLogger) Warningf(format string, args ...interface{}) {} 19 | func (l DummyLogger) Error(...interface{}) {} 20 | func (l DummyLogger) Errorln(...interface{}) {} 21 | func (l DummyLogger) Errorf(string, ...interface{}) {} 22 | func (l DummyLogger) Fatal(...interface{}) {} 23 | func (l DummyLogger) Fatalln(...interface{}) {} 24 | func (l DummyLogger) Fatalf(string, ...interface{}) {} 25 | func (l DummyLogger) Panic(...interface{}) {} 26 | func (l DummyLogger) Panicln(...interface{}) {} 27 | func (l DummyLogger) Panicf(string, ...interface{}) {} 28 | func (l DummyLogger) With(key string, value interface{}) Logger { return l } 29 | func (l DummyLogger) WithField(key string, value interface{}) Logger { return l } 30 | func (l DummyLogger) WithFields(values map[string]interface{}) Logger { return l } 31 | func (l DummyLogger) Set(level Level) error { return nil } 32 | -------------------------------------------------------------------------------- /.github/workflows/ci.yaml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | pull_request: 8 | 9 | jobs: 10 | check: 11 | name: Golang Check 12 | runs-on: ubuntu-latest 13 | steps: 14 | - uses: actions/checkout@v3 15 | - uses: actions/setup-go@v4 16 | with: 17 | go-version-file: go.mod 18 | cache: false 19 | - name: golangci-lint 20 | uses: golangci/golangci-lint-action@v3 21 | with: 22 | version: v1.53 23 | args: --timeout=15m 24 | 25 | unit-test: 26 | name: Unit test 27 | runs-on: ubuntu-latest 28 | steps: 29 | - uses: actions/checkout@v3 30 | - uses: actions/setup-go@v4 31 | with: 32 | go-version-file: go.mod 33 | - run: make ci-unit-test 34 | 35 | integration-test: 36 | name: Integration test 37 | runs-on: ubuntu-latest 38 | needs: [check, unit-test] 39 | strategy: 40 | matrix: 41 | kubernetes: [1.24.16, 1.25.12, 1.26.7, 1.27.3 ] 42 | steps: 43 | - uses: actions/checkout@v3 44 | - uses: actions/setup-go@v4 45 | with: 46 | go-version-file: go.mod 47 | - name: Install conntrack 48 | run: sudo apt-get install -y conntrack 49 | - uses: medyagh/setup-minikube@v0.0.14 50 | with: 51 | kubernetes-version: ${{ matrix.kubernetes }} 52 | minikube-version: 1.31.1 53 | driver: none 54 | - name: Add redisfailover CRD 55 | run: kubectl create -f manifests/databases.spotahome.com_redisfailovers.yaml 56 | - run: make ci-integration-test 57 | 58 | chart-test: 59 | name: Chart testing 60 | runs-on: ubuntu-latest 61 | steps: 62 | - uses: actions/checkout@v3 63 | with: 64 | fetch-depth: 0 65 | 66 | - name: Install Helm 67 | uses: azure/setup-helm@v3 68 | with: 69 | version: v3.7.2 70 | 71 | - name: Helm test 72 | run: make helm-test 73 | -------------------------------------------------------------------------------- /cmd/utils/k8s.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "fmt" 5 | 6 | apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" 7 | "k8s.io/client-go/kubernetes" 8 | "k8s.io/client-go/rest" 9 | "k8s.io/client-go/tools/clientcmd" 10 | 11 | redisfailoverclientset "github.com/spotahome/redis-operator/client/k8s/clientset/versioned" 12 | ) 13 | 14 | // LoadKubernetesConfig loads kubernetes configuration based on flags. 15 | func LoadKubernetesConfig(flags *CMDFlags) (*rest.Config, error) { 16 | var cfg *rest.Config 17 | // If devel mode then use configuration flag path. 18 | if flags.Development { 19 | config, err := clientcmd.BuildConfigFromFlags("", flags.KubeConfig) 20 | if err != nil { 21 | return nil, fmt.Errorf("could not load configuration: %s", err) 22 | } 23 | cfg = config 24 | } else { 25 | config, err := rest.InClusterConfig() 26 | if err != nil { 27 | return nil, fmt.Errorf("error loading kubernetes configuration inside cluster, check app is running outside kubernetes cluster or run in development mode: %s", err) 28 | } 29 | cfg = config 30 | } 31 | 32 | cfg.QPS = float32(flags.K8sQueriesPerSecond) 33 | cfg.Burst = flags.K8sQueriesBurstable 34 | 35 | return cfg, nil 36 | } 37 | 38 | // CreateKubernetesClients create the clients to connect to kubernetes 39 | func CreateKubernetesClients(flags *CMDFlags) (kubernetes.Interface, redisfailoverclientset.Interface, apiextensionsclientset.Interface, error) { 40 | config, err := LoadKubernetesConfig(flags) 41 | if err != nil { 42 | return nil, nil, nil, err 43 | } 44 | 45 | clientset, err := kubernetes.NewForConfig(config) 46 | if err != nil { 47 | return nil, nil, nil, err 48 | } 49 | customClientset, err := redisfailoverclientset.NewForConfig(config) 50 | if err != nil { 51 | return nil, nil, nil, err 52 | } 53 | 54 | aeClientset, err := apiextensionsclientset.NewForConfig(config) 55 | if err != nil { 56 | return nil, nil, nil, err 57 | } 58 | 59 | return clientset, customClientset, aeClientset, nil 60 | } 61 | -------------------------------------------------------------------------------- /service/k8s/secret_test.go: -------------------------------------------------------------------------------- 1 | package k8s 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/spotahome/redis-operator/log" 8 | "github.com/spotahome/redis-operator/metrics" 9 | "github.com/stretchr/testify/assert" 10 | corev1 "k8s.io/api/core/v1" 11 | 12 | errors "k8s.io/apimachinery/pkg/api/errors" 13 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 14 | "k8s.io/apimachinery/pkg/runtime" 15 | kubernetes "k8s.io/client-go/kubernetes/fake" 16 | kubetesting "k8s.io/client-go/testing" 17 | ) 18 | 19 | func TestSecretServiceGet(t *testing.T) { 20 | 21 | t.Run("Test getting a secret", func(t *testing.T) { 22 | assert := assert.New(t) 23 | 24 | secret := corev1.Secret{ 25 | ObjectMeta: metav1.ObjectMeta{ 26 | Name: "test_secret", 27 | Namespace: "test_namespace", 28 | }, 29 | Data: map[string][]byte{ 30 | "foo": []byte("bar"), 31 | }, 32 | } 33 | 34 | mcli := &kubernetes.Clientset{} 35 | mcli.AddReactor("create", "secrets", func(action kubetesting.Action) (bool, runtime.Object, error) { 36 | return true, &secret, nil 37 | }) 38 | mcli.AddReactor("get", "secrets", func(action kubetesting.Action) (bool, runtime.Object, error) { 39 | a := (action).(kubetesting.GetActionImpl) 40 | if a.Namespace == secret.ObjectMeta.Namespace && a.Name == secret.ObjectMeta.Name { 41 | return true, &secret, nil 42 | } 43 | return true, nil, errors.NewNotFound(action.GetResource().GroupResource(), a.Name) 44 | }) 45 | 46 | _, err := mcli.CoreV1().Secrets(secret.ObjectMeta.Namespace).Create(context.TODO(), &secret, metav1.CreateOptions{}) 47 | assert.NoError(err) 48 | 49 | // test getting the secret 50 | service := NewSecretService(mcli, log.Dummy, metrics.Dummy) 51 | ss, err := service.GetSecret(secret.ObjectMeta.Namespace, secret.ObjectMeta.Name) 52 | assert.NotNil(ss) 53 | assert.NoError(err) 54 | 55 | // test getting a nonexistent secret 56 | _, err = service.GetSecret(secret.ObjectMeta.Namespace, secret.ObjectMeta.Name+"nonexistent") 57 | assert.Error(err) 58 | assert.True(errors.IsNotFound(err)) 59 | }) 60 | } 61 | -------------------------------------------------------------------------------- /api/redisfailover/v1/validate.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "strconv" 7 | ) 8 | 9 | const ( 10 | maxNameLength = 48 11 | ) 12 | 13 | // Validate set the values by default if not defined and checks if the values given are valid 14 | func (r *RedisFailover) Validate() error { 15 | if len(r.Name) > maxNameLength { 16 | return fmt.Errorf("name length can't be higher than %d", maxNameLength) 17 | } 18 | 19 | if r.Bootstrapping() { 20 | if r.Spec.BootstrapNode.Host == "" { 21 | return errors.New("BootstrapNode must include a host when provided") 22 | } 23 | 24 | if r.Spec.BootstrapNode.Port == "" { 25 | r.Spec.BootstrapNode.Port = strconv.Itoa(defaultRedisPort) 26 | } 27 | r.Spec.Redis.CustomConfig = deduplicateStr(append(bootstrappingRedisCustomConfig, r.Spec.Redis.CustomConfig...)) 28 | } else { 29 | r.Spec.Redis.CustomConfig = deduplicateStr(append(defaultRedisCustomConfig, r.Spec.Redis.CustomConfig...)) 30 | } 31 | 32 | if r.Spec.Redis.Image == "" { 33 | r.Spec.Redis.Image = defaultImage 34 | } 35 | 36 | if r.Spec.Sentinel.Image == "" { 37 | r.Spec.Sentinel.Image = defaultImage 38 | } 39 | 40 | if r.Spec.Redis.Replicas <= 0 { 41 | r.Spec.Redis.Replicas = defaultRedisNumber 42 | } 43 | 44 | if r.Spec.Redis.Port <= 0 { 45 | r.Spec.Redis.Port = defaultRedisPort 46 | } 47 | 48 | if r.Spec.Sentinel.Replicas <= 0 { 49 | r.Spec.Sentinel.Replicas = defaultSentinelNumber 50 | } 51 | 52 | if r.Spec.Redis.Exporter.Image == "" { 53 | r.Spec.Redis.Exporter.Image = defaultExporterImage 54 | } 55 | 56 | if r.Spec.Sentinel.Exporter.Image == "" { 57 | r.Spec.Sentinel.Exporter.Image = defaultSentinelExporterImage 58 | } 59 | 60 | if len(r.Spec.Sentinel.CustomConfig) == 0 { 61 | r.Spec.Sentinel.CustomConfig = defaultSentinelCustomConfig 62 | } 63 | 64 | return nil 65 | } 66 | 67 | func deduplicateStr(strSlice []string) []string { 68 | allKeys := make(map[string]bool) 69 | list := []string{} 70 | for _, item := range strSlice { 71 | if _, value := allKeys[item]; !value { 72 | allKeys[item] = true 73 | list = append(list, item) 74 | } 75 | } 76 | return list 77 | } 78 | -------------------------------------------------------------------------------- /api/redisfailover/v1/bootstrapping_test.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 8 | ) 9 | 10 | func generateRedisFailover(name string, bootstrapNode *BootstrapSettings) *RedisFailover { 11 | return &RedisFailover{ 12 | ObjectMeta: metav1.ObjectMeta{ 13 | Name: name, 14 | Namespace: "namespace", 15 | }, 16 | Spec: RedisFailoverSpec{ 17 | BootstrapNode: bootstrapNode, 18 | }, 19 | } 20 | } 21 | 22 | func TestBootstrapping(t *testing.T) { 23 | tests := []struct { 24 | name string 25 | expectation bool 26 | bootstrapSettings *BootstrapSettings 27 | }{ 28 | { 29 | name: "without BootstrapSettings", 30 | expectation: false, 31 | }, 32 | { 33 | name: "with BootstrapSettings", 34 | expectation: true, 35 | bootstrapSettings: &BootstrapSettings{ 36 | Host: "127.0.0.1", 37 | Port: "6379", 38 | }, 39 | }, 40 | } 41 | 42 | for _, test := range tests { 43 | t.Run(test.name, func(t *testing.T) { 44 | rf := generateRedisFailover("test", test.bootstrapSettings) 45 | assert.Equal(t, test.expectation, rf.Bootstrapping()) 46 | }) 47 | } 48 | } 49 | 50 | func TestSentinelsAllowed(t *testing.T) { 51 | tests := []struct { 52 | name string 53 | expectation bool 54 | bootstrapSettings *BootstrapSettings 55 | }{ 56 | { 57 | name: "without BootstrapSettings", 58 | expectation: true, 59 | }, 60 | { 61 | name: "with BootstrapSettings", 62 | expectation: false, 63 | bootstrapSettings: &BootstrapSettings{ 64 | Host: "127.0.0.1", 65 | Port: "6379", 66 | }, 67 | }, 68 | { 69 | name: "with BootstrapSettings that allows sentinels", 70 | expectation: true, 71 | bootstrapSettings: &BootstrapSettings{ 72 | Host: "127.0.0.1", 73 | Port: "6379", 74 | AllowSentinels: true, 75 | }, 76 | }, 77 | } 78 | 79 | for _, test := range tests { 80 | t.Run(test.name, func(t *testing.T) { 81 | rf := generateRedisFailover("test", test.bootstrapSettings) 82 | assert.Equal(t, test.expectation, rf.SentinelsAllowed()) 83 | }) 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /service/k8s/redisfailover.go: -------------------------------------------------------------------------------- 1 | package k8s 2 | 3 | import ( 4 | "context" 5 | 6 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 7 | "k8s.io/apimachinery/pkg/watch" 8 | 9 | redisfailoverv1 "github.com/spotahome/redis-operator/api/redisfailover/v1" 10 | redisfailoverclientset "github.com/spotahome/redis-operator/client/k8s/clientset/versioned" 11 | "github.com/spotahome/redis-operator/log" 12 | "github.com/spotahome/redis-operator/metrics" 13 | ) 14 | 15 | // RedisFailover the RF service that knows how to interact with k8s to get them 16 | type RedisFailover interface { 17 | // ListRedisFailovers lists the redisfailovers on a cluster. 18 | ListRedisFailovers(ctx context.Context, namespace string, opts metav1.ListOptions) (*redisfailoverv1.RedisFailoverList, error) 19 | // WatchRedisFailovers watches the redisfailovers on a cluster. 20 | WatchRedisFailovers(ctx context.Context, namespace string, opts metav1.ListOptions) (watch.Interface, error) 21 | } 22 | 23 | // RedisFailoverService is the RedisFailover service implementation using API calls to kubernetes. 24 | type RedisFailoverService struct { 25 | k8sCli redisfailoverclientset.Interface 26 | logger log.Logger 27 | metricsRecorder metrics.Recorder 28 | } 29 | 30 | // NewRedisFailoverService returns a new Workspace KubeService. 31 | func NewRedisFailoverService(k8scli redisfailoverclientset.Interface, logger log.Logger, metricsRecorder metrics.Recorder) *RedisFailoverService { 32 | logger = logger.With("service", "k8s.redisfailover") 33 | return &RedisFailoverService{ 34 | k8sCli: k8scli, 35 | logger: logger, 36 | metricsRecorder: metricsRecorder, 37 | } 38 | } 39 | 40 | // ListRedisFailovers satisfies redisfailover.Service interface. 41 | func (r *RedisFailoverService) ListRedisFailovers(ctx context.Context, namespace string, opts metav1.ListOptions) (*redisfailoverv1.RedisFailoverList, error) { 42 | redisFailoverList, err := r.k8sCli.DatabasesV1().RedisFailovers(namespace).List(ctx, opts) 43 | recordMetrics(namespace, "RedisFailover", metrics.NOT_APPLICABLE, "LIST", err, r.metricsRecorder) 44 | return redisFailoverList, err 45 | } 46 | 47 | // WatchRedisFailovers satisfies redisfailover.Service interface. 48 | func (r *RedisFailoverService) WatchRedisFailovers(ctx context.Context, namespace string, opts metav1.ListOptions) (watch.Interface, error) { 49 | watcher, err := r.k8sCli.DatabasesV1().RedisFailovers(namespace).Watch(ctx, opts) 50 | recordMetrics(namespace, "RedisFailover", metrics.NOT_APPLICABLE, "WATCH", err, r.metricsRecorder) 51 | return watcher, err 52 | } 53 | -------------------------------------------------------------------------------- /client/k8s/clientset/versioned/fake/clientset_generated.go: -------------------------------------------------------------------------------- 1 | // Code generated by client-gen. DO NOT EDIT. 2 | 3 | package fake 4 | 5 | import ( 6 | clientset "github.com/spotahome/redis-operator/client/k8s/clientset/versioned" 7 | databasesv1 "github.com/spotahome/redis-operator/client/k8s/clientset/versioned/typed/redisfailover/v1" 8 | fakedatabasesv1 "github.com/spotahome/redis-operator/client/k8s/clientset/versioned/typed/redisfailover/v1/fake" 9 | "k8s.io/apimachinery/pkg/runtime" 10 | "k8s.io/apimachinery/pkg/watch" 11 | "k8s.io/client-go/discovery" 12 | fakediscovery "k8s.io/client-go/discovery/fake" 13 | "k8s.io/client-go/testing" 14 | ) 15 | 16 | // NewSimpleClientset returns a clientset that will respond with the provided objects. 17 | // It's backed by a very simple object tracker that processes creates, updates and deletions as-is, 18 | // without applying any validations and/or defaults. It shouldn't be considered a replacement 19 | // for a real clientset and is mostly useful in simple unit tests. 20 | func NewSimpleClientset(objects ...runtime.Object) *Clientset { 21 | o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) 22 | for _, obj := range objects { 23 | if err := o.Add(obj); err != nil { 24 | panic(err) 25 | } 26 | } 27 | 28 | cs := &Clientset{tracker: o} 29 | cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} 30 | cs.AddReactor("*", "*", testing.ObjectReaction(o)) 31 | cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { 32 | gvr := action.GetResource() 33 | ns := action.GetNamespace() 34 | watch, err := o.Watch(gvr, ns) 35 | if err != nil { 36 | return false, nil, err 37 | } 38 | return true, watch, nil 39 | }) 40 | 41 | return cs 42 | } 43 | 44 | // Clientset implements clientset.Interface. Meant to be embedded into a 45 | // struct to get a default implementation. This makes faking out just the method 46 | // you want to test easier. 47 | type Clientset struct { 48 | testing.Fake 49 | discovery *fakediscovery.FakeDiscovery 50 | tracker testing.ObjectTracker 51 | } 52 | 53 | func (c *Clientset) Discovery() discovery.DiscoveryInterface { 54 | return c.discovery 55 | } 56 | 57 | func (c *Clientset) Tracker() testing.ObjectTracker { 58 | return c.tracker 59 | } 60 | 61 | var ( 62 | _ clientset.Interface = &Clientset{} 63 | _ testing.FakeClient = &Clientset{} 64 | ) 65 | 66 | // DatabasesV1 retrieves the DatabasesV1Client 67 | func (c *Clientset) DatabasesV1() databasesv1.DatabasesV1Interface { 68 | return &fakedatabasesv1.FakeDatabasesV1{Fake: &c.Fake} 69 | } 70 | -------------------------------------------------------------------------------- /cmd/utils/flags.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "path/filepath" 7 | "regexp" 8 | 9 | "github.com/spotahome/redis-operator/operator/redisfailover" 10 | "k8s.io/client-go/util/homedir" 11 | ) 12 | 13 | // CMDFlags are the flags used by the cmd 14 | // TODO: improve flags. 15 | type CMDFlags struct { 16 | KubeConfig string 17 | SupportedNamespacesRegex string 18 | Development bool 19 | ListenAddr string 20 | MetricsPath string 21 | K8sQueriesPerSecond int 22 | K8sQueriesBurstable int 23 | Concurrency int 24 | LogLevel string 25 | } 26 | 27 | // Init initializes and parse the flags 28 | func (c *CMDFlags) Init() { 29 | kubehome := filepath.Join(homedir.HomeDir(), ".kube", "config") 30 | // register flags 31 | flag.StringVar(&c.KubeConfig, "kubeconfig", kubehome, "kubernetes configuration path, only used when development mode enabled") 32 | flag.StringVar(&c.SupportedNamespacesRegex, "supported-namespaces-regex", ".*", "To limit the namespaces this operator looks into") 33 | flag.BoolVar(&c.Development, "development", false, "development flag will allow to run the operator outside a kubernetes cluster") 34 | flag.StringVar(&c.ListenAddr, "listen-address", ":9710", "Address to listen on for metrics.") 35 | flag.StringVar(&c.MetricsPath, "metrics-path", "/metrics", "Path to serve the metrics.") 36 | flag.IntVar(&c.K8sQueriesPerSecond, "k8s-cli-qps-limit", 100, "Number of allowed queries per second by kubernetes client without client side throttling") 37 | flag.IntVar(&c.K8sQueriesBurstable, "k8s-cli-burstable-limit", 100, "Number of allowed burst requests by kubernetes client without client side throttling") 38 | // default is 3 for conccurency because kooper also defines 3 as default 39 | // reference: https://github.com/spotahome/kooper/blob/master/controller/controller.go#L89 40 | flag.IntVar(&c.Concurrency, "concurrency", 3, "Number of conccurent workers meant to process events") 41 | flag.StringVar(&c.LogLevel, "log-level", "info", "set log level") 42 | // Parse flags 43 | flag.Parse() 44 | 45 | if _, err := regexp.Compile(c.SupportedNamespacesRegex); err != nil { 46 | panic(fmt.Errorf("supported namespaces Regex is not valid: %w", err)) 47 | } 48 | } 49 | 50 | // ToRedisOperatorConfig convert the flags to redisfailover config 51 | func (c *CMDFlags) ToRedisOperatorConfig() redisfailover.Config { 52 | return redisfailover.Config{ 53 | ListenAddress: c.ListenAddr, 54 | MetricsPath: c.MetricsPath, 55 | Concurrency: c.Concurrency, 56 | SupportedNamespacesRegex: c.SupportedNamespacesRegex, 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /charts/redisoperator/templates/service-account.yaml: -------------------------------------------------------------------------------- 1 | {{ if .Values.serviceAccount.create }} 2 | {{- $fullName := include "chart.fullname" . -}} 3 | {{- $data := dict "Chart" .Chart "Release" .Release "Values" .Values -}} 4 | apiVersion: v1 5 | kind: ServiceAccount 6 | metadata: 7 | name: {{ $fullName }} 8 | namespace: {{ include "chart.namespaceName" . }} 9 | labels: 10 | {{- include "chart.labels" $data | nindent 4 }} 11 | --- 12 | apiVersion: rbac.authorization.k8s.io/v1 13 | kind: ClusterRole 14 | metadata: 15 | name: {{ $fullName }} 16 | labels: 17 | {{- include "chart.labels" $data | nindent 4 }} 18 | rules: 19 | - apiGroups: 20 | - databases.spotahome.com 21 | resources: 22 | - redisfailovers 23 | - redisfailovers/finalizers 24 | verbs: 25 | - create 26 | - delete 27 | - get 28 | - list 29 | - patch 30 | - update 31 | - watch 32 | - apiGroups: 33 | - apiextensions.k8s.io 34 | resources: 35 | - customresourcedefinitions 36 | verbs: 37 | - create 38 | - delete 39 | - get 40 | - list 41 | - patch 42 | - update 43 | - watch 44 | - apiGroups: 45 | - coordination.k8s.io 46 | resources: 47 | - leases 48 | verbs: 49 | - create 50 | - get 51 | - list 52 | - update 53 | - apiGroups: 54 | - "" 55 | resources: 56 | - pods 57 | - services 58 | - endpoints 59 | - events 60 | - configmaps 61 | - persistentvolumeclaims 62 | - persistentvolumeclaims/finalizers 63 | verbs: 64 | - create 65 | - delete 66 | - get 67 | - list 68 | - patch 69 | - update 70 | - watch 71 | - apiGroups: 72 | - "" 73 | resources: 74 | - secrets 75 | verbs: 76 | - "get" 77 | - apiGroups: 78 | - apps 79 | resources: 80 | - deployments 81 | - statefulsets 82 | verbs: 83 | - create 84 | - delete 85 | - get 86 | - list 87 | - patch 88 | - update 89 | - watch 90 | - apiGroups: 91 | - policy 92 | resources: 93 | - poddisruptionbudgets 94 | verbs: 95 | - create 96 | - delete 97 | - get 98 | - list 99 | - patch 100 | - update 101 | - watch 102 | --- 103 | kind: ClusterRoleBinding 104 | apiVersion: rbac.authorization.k8s.io/v1 105 | metadata: 106 | name: {{ $fullName }} 107 | subjects: 108 | - kind: ServiceAccount 109 | name: {{ $fullName }} 110 | namespace: {{ include "chart.namespaceName" . }} 111 | roleRef: 112 | apiGroup: rbac.authorization.k8s.io 113 | kind: ClusterRole 114 | name: {{ $fullName }} 115 | {{- end }} 116 | -------------------------------------------------------------------------------- /charts/redisoperator/values.yaml: -------------------------------------------------------------------------------- 1 | # Default values for redis-operator. 2 | # This is a YAML-formatted file. 3 | # Declare variables to be passed into your templates. 4 | 5 | # Name of the image repository to pull the container image from. 6 | image: 7 | repository: quay.io/spotahome/redis-operator 8 | pullPolicy: IfNotPresent 9 | tag: v1.3.0 10 | cli_args: "" 11 | 12 | imageCredentials: 13 | create: false 14 | registry: url.private.registry 15 | username: someone 16 | password: somepassword 17 | email: someone@example.com 18 | # Use exists secrets in namespace 19 | existsSecrets: 20 | - registrysecret 21 | 22 | updateStrategy: 23 | type: RollingUpdate 24 | 25 | replicas: 1 26 | 27 | # A name in place of the chart name for `app:` labels. 28 | nameOverride: "" 29 | 30 | # A name to substitute for the full names of resources. 31 | fullnameOverride: "" 32 | 33 | # The name of the Namespace to deploy 34 | # If not set, `.Release.Namespace` is used 35 | namespace: null 36 | 37 | serviceAccount: 38 | # Enable service account creation. 39 | create: true 40 | # Annotations to be added to the service account. 41 | annotations: {} 42 | # The name of the service account to use. 43 | # If not set and create is true, a name is generated using the fullname template. 44 | name: "" 45 | 46 | service: 47 | type: ClusterIP 48 | port: 9710 49 | 50 | container: 51 | port: 9710 52 | 53 | # Container [security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container). 54 | # See the [API reference](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#security-context-1) for details. 55 | securityContext: 56 | readOnlyRootFilesystem: true 57 | runAsNonRoot: true 58 | runAsUser: 1000 59 | 60 | # Container resource [requests and limits](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). 61 | # See the [API reference](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#resources) for details. 62 | # @default -- No requests or limits. 63 | resources: {} 64 | # requests: 65 | # cpu: 100m 66 | # memory: 128Mi 67 | # limits: 68 | # cpu: 100m 69 | # memory: 128Mi 70 | 71 | ### Monitoring 72 | ############### 73 | monitoring: 74 | # Enable Prometheus PodMonitor to monitor the operator. 75 | enabled: false 76 | serviceMonitor: false 77 | serviceAnnotations: {} 78 | prometheus: 79 | name: unknown 80 | 81 | # Annotations to be added to pods and deployments. 82 | annotations: {} 83 | 84 | nodeSelector: {} 85 | 86 | tolerations: [] 87 | 88 | affinity: {} 89 | 90 | # CRDs configuration 91 | crds: 92 | # -- Additional CRDs annotations 93 | annotations: {} 94 | # argocd.argoproj.io/sync-options: Replace=true 95 | # strategy.spinnaker.io/replace: 'true' 96 | 97 | priorityClassName: "" 98 | 99 | -------------------------------------------------------------------------------- /charts/redisoperator/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | 2 | {{/* vim: set filetype=mustache: */}} 3 | {{/* 4 | Expand the name of the chart. 5 | */}} 6 | {{- define "chart.name" -}} 7 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 8 | {{- end -}} 9 | 10 | {{/* 11 | Create a default fully qualified app name. 12 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 13 | If release name contains chart name it will be used as a full name. 14 | */}} 15 | {{- define "chart.fullname" -}} 16 | {{- if .Values.fullnameOverride -}} 17 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} 18 | {{- else -}} 19 | {{- $name := default .Chart.Name .Values.nameOverride -}} 20 | {{- if contains $name .Release.Name -}} 21 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}} 22 | {{- else -}} 23 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 24 | {{- end -}} 25 | {{- end -}} 26 | {{- end -}} 27 | 28 | {{/* 29 | Create chart name and version as used by the chart label. 30 | */}} 31 | {{- define "chart.chart" -}} 32 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} 33 | {{- end -}} 34 | 35 | 36 | {{/* 37 | Common labels 38 | timestamp: {{ now | date "2006-01-02_15-04-05" | quote }} 39 | */}} 40 | {{- define "chart.labels" -}} 41 | helm.sh/chart: {{ include "chart.chart" . }} 42 | {{ include "chart.selectorLabels" . }} 43 | {{- if .Chart.AppVersion }} 44 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} 45 | {{- end }} 46 | app.kubernetes.io/managed-by: {{ .Release.Service }} 47 | app.kubernetes.io/part-of: {{ include "chart.name" . }} 48 | {{- if .Values.labels}} 49 | {{ toYaml .Values.labels }} 50 | {{- end }} 51 | {{- end -}} 52 | 53 | {{/* 54 | Selector labels 55 | */}} 56 | {{- define "chart.selectorLabels" -}} 57 | app.kubernetes.io/name: {{ include "chart.name" . }}{{- if .name -}}-{{- .name -}}{{- end }} 58 | app.kubernetes.io/instance: {{ .Release.Name }} 59 | {{- end -}} 60 | 61 | 62 | {{/* 63 | Create the name of the service account to use 64 | */}} 65 | {{- define "chart.serviceAccountName" -}} 66 | {{- if .Values.serviceAccount.create -}} 67 | {{ default (include "chart.fullname" .) .Values.serviceAccount.name }} 68 | {{- else -}} 69 | {{ default "default" .Values.serviceAccount.name }} 70 | {{- end -}} 71 | {{- end -}} 72 | 73 | {{/* Expands data for image pull secret. */}} 74 | {{- define "imagePullSecret" }} 75 | {{- with .Values.imageCredentials }} 76 | {{- printf "{\"auths\":{\"%s\":{\"username\":\"%s\",\"password\":\"%s\",\"email\":\"%s\",\"auth\":\"%s\"}}}" .registry .username .password .email (printf "%s:%s" .username .password | b64enc) | b64enc }} 77 | {{- end }} 78 | {{- end }} 79 | 80 | {{/* 81 | Create the name of the namespace 82 | */}} 83 | {{- define "chart.namespaceName" -}} 84 | {{- default .Release.Namespace .Values.namespace }} 85 | {{- end }} 86 | -------------------------------------------------------------------------------- /mocks/operator/redisfailover/RedisFailover.go: -------------------------------------------------------------------------------- 1 | // Code generated by mockery v2.20.0. DO NOT EDIT. 2 | 3 | package mocks 4 | 5 | import ( 6 | context "context" 7 | 8 | mock "github.com/stretchr/testify/mock" 9 | 10 | redisfailoverv1 "github.com/spotahome/redis-operator/api/redisfailover/v1" 11 | 12 | v1 "k8s.io/apimachinery/pkg/apis/meta/v1" 13 | 14 | watch "k8s.io/apimachinery/pkg/watch" 15 | ) 16 | 17 | // RedisFailover is an autogenerated mock type for the RedisFailover type 18 | type RedisFailover struct { 19 | mock.Mock 20 | } 21 | 22 | // ListRedisFailovers provides a mock function with given fields: ctx, namespace, opts 23 | func (_m *RedisFailover) ListRedisFailovers(ctx context.Context, namespace string, opts v1.ListOptions) (*redisfailoverv1.RedisFailoverList, error) { 24 | ret := _m.Called(ctx, namespace, opts) 25 | 26 | var r0 *redisfailoverv1.RedisFailoverList 27 | var r1 error 28 | if rf, ok := ret.Get(0).(func(context.Context, string, v1.ListOptions) (*redisfailoverv1.RedisFailoverList, error)); ok { 29 | return rf(ctx, namespace, opts) 30 | } 31 | if rf, ok := ret.Get(0).(func(context.Context, string, v1.ListOptions) *redisfailoverv1.RedisFailoverList); ok { 32 | r0 = rf(ctx, namespace, opts) 33 | } else { 34 | if ret.Get(0) != nil { 35 | r0 = ret.Get(0).(*redisfailoverv1.RedisFailoverList) 36 | } 37 | } 38 | 39 | if rf, ok := ret.Get(1).(func(context.Context, string, v1.ListOptions) error); ok { 40 | r1 = rf(ctx, namespace, opts) 41 | } else { 42 | r1 = ret.Error(1) 43 | } 44 | 45 | return r0, r1 46 | } 47 | 48 | // WatchRedisFailovers provides a mock function with given fields: ctx, namespace, opts 49 | func (_m *RedisFailover) WatchRedisFailovers(ctx context.Context, namespace string, opts v1.ListOptions) (watch.Interface, error) { 50 | ret := _m.Called(ctx, namespace, opts) 51 | 52 | var r0 watch.Interface 53 | var r1 error 54 | if rf, ok := ret.Get(0).(func(context.Context, string, v1.ListOptions) (watch.Interface, error)); ok { 55 | return rf(ctx, namespace, opts) 56 | } 57 | if rf, ok := ret.Get(0).(func(context.Context, string, v1.ListOptions) watch.Interface); ok { 58 | r0 = rf(ctx, namespace, opts) 59 | } else { 60 | if ret.Get(0) != nil { 61 | r0 = ret.Get(0).(watch.Interface) 62 | } 63 | } 64 | 65 | if rf, ok := ret.Get(1).(func(context.Context, string, v1.ListOptions) error); ok { 66 | r1 = rf(ctx, namespace, opts) 67 | } else { 68 | r1 = ret.Error(1) 69 | } 70 | 71 | return r0, r1 72 | } 73 | 74 | type mockConstructorTestingTNewRedisFailover interface { 75 | mock.TestingT 76 | Cleanup(func()) 77 | } 78 | 79 | // NewRedisFailover creates a new instance of RedisFailover. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. 80 | func NewRedisFailover(t mockConstructorTestingTNewRedisFailover) *RedisFailover { 81 | mock := &RedisFailover{} 82 | mock.Mock.Test(t) 83 | 84 | t.Cleanup(func() { mock.AssertExpectations(t) }) 85 | 86 | return mock 87 | } 88 | -------------------------------------------------------------------------------- /client/k8s/clientset/versioned/typed/redisfailover/v1/redisfailover_client.go: -------------------------------------------------------------------------------- 1 | // Code generated by client-gen. DO NOT EDIT. 2 | 3 | package v1 4 | 5 | import ( 6 | "net/http" 7 | 8 | v1 "github.com/spotahome/redis-operator/api/redisfailover/v1" 9 | "github.com/spotahome/redis-operator/client/k8s/clientset/versioned/scheme" 10 | rest "k8s.io/client-go/rest" 11 | ) 12 | 13 | type DatabasesV1Interface interface { 14 | RESTClient() rest.Interface 15 | RedisFailoversGetter 16 | } 17 | 18 | // DatabasesV1Client is used to interact with features provided by the databases.spotahome.com group. 19 | type DatabasesV1Client struct { 20 | restClient rest.Interface 21 | } 22 | 23 | func (c *DatabasesV1Client) RedisFailovers(namespace string) RedisFailoverInterface { 24 | return newRedisFailovers(c, namespace) 25 | } 26 | 27 | // NewForConfig creates a new DatabasesV1Client for the given config. 28 | // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), 29 | // where httpClient was generated with rest.HTTPClientFor(c). 30 | func NewForConfig(c *rest.Config) (*DatabasesV1Client, error) { 31 | config := *c 32 | if err := setConfigDefaults(&config); err != nil { 33 | return nil, err 34 | } 35 | httpClient, err := rest.HTTPClientFor(&config) 36 | if err != nil { 37 | return nil, err 38 | } 39 | return NewForConfigAndClient(&config, httpClient) 40 | } 41 | 42 | // NewForConfigAndClient creates a new DatabasesV1Client for the given config and http client. 43 | // Note the http client provided takes precedence over the configured transport values. 44 | func NewForConfigAndClient(c *rest.Config, h *http.Client) (*DatabasesV1Client, error) { 45 | config := *c 46 | if err := setConfigDefaults(&config); err != nil { 47 | return nil, err 48 | } 49 | client, err := rest.RESTClientForConfigAndClient(&config, h) 50 | if err != nil { 51 | return nil, err 52 | } 53 | return &DatabasesV1Client{client}, nil 54 | } 55 | 56 | // NewForConfigOrDie creates a new DatabasesV1Client for the given config and 57 | // panics if there is an error in the config. 58 | func NewForConfigOrDie(c *rest.Config) *DatabasesV1Client { 59 | client, err := NewForConfig(c) 60 | if err != nil { 61 | panic(err) 62 | } 63 | return client 64 | } 65 | 66 | // New creates a new DatabasesV1Client for the given RESTClient. 67 | func New(c rest.Interface) *DatabasesV1Client { 68 | return &DatabasesV1Client{c} 69 | } 70 | 71 | func setConfigDefaults(config *rest.Config) error { 72 | gv := v1.SchemeGroupVersion 73 | config.GroupVersion = &gv 74 | config.APIPath = "/apis" 75 | config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() 76 | 77 | if config.UserAgent == "" { 78 | config.UserAgent = rest.DefaultKubernetesUserAgent() 79 | } 80 | 81 | return nil 82 | } 83 | 84 | // RESTClient returns a RESTClient that is used to communicate 85 | // with API server by this client implementation. 86 | func (c *DatabasesV1Client) RESTClient() rest.Interface { 87 | if c == nil { 88 | return nil 89 | } 90 | return c.restClient 91 | } 92 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/spotahome/redis-operator 2 | 3 | go 1.20 4 | 5 | require ( 6 | github.com/go-redis/redis/v8 v8.11.5 7 | github.com/prometheus/client_golang v1.16.0 8 | github.com/sirupsen/logrus v1.9.3 9 | github.com/spotahome/kooper/v2 v2.4.0 10 | github.com/stretchr/testify v1.8.4 11 | k8s.io/api v0.27.3 12 | k8s.io/apiextensions-apiserver v0.24.4 13 | k8s.io/apimachinery v0.27.3 14 | k8s.io/client-go v0.27.3 15 | ) 16 | 17 | require ( 18 | github.com/beorn7/perks v1.0.1 // indirect 19 | github.com/cespare/xxhash/v2 v2.2.0 // indirect 20 | github.com/davecgh/go-spew v1.1.1 // indirect 21 | github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect 22 | github.com/emicklei/go-restful/v3 v3.9.0 // indirect 23 | github.com/evanphx/json-patch v4.12.0+incompatible // indirect 24 | github.com/go-logr/logr v1.2.3 // indirect 25 | github.com/go-openapi/jsonpointer v0.19.6 // indirect 26 | github.com/go-openapi/jsonreference v0.20.1 // indirect 27 | github.com/go-openapi/swag v0.22.3 // indirect 28 | github.com/gogo/protobuf v1.3.2 // indirect 29 | github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect 30 | github.com/golang/protobuf v1.5.3 // indirect 31 | github.com/google/gnostic v0.5.7-v3refs // indirect 32 | github.com/google/go-cmp v0.5.9 // indirect 33 | github.com/google/gofuzz v1.2.0 // indirect 34 | github.com/google/uuid v1.3.0 // indirect 35 | github.com/imdario/mergo v0.3.12 // indirect 36 | github.com/josharian/intern v1.0.0 // indirect 37 | github.com/json-iterator/go v1.1.12 // indirect 38 | github.com/mailru/easyjson v0.7.7 // indirect 39 | github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect 40 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 41 | github.com/modern-go/reflect2 v1.0.2 // indirect 42 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 43 | github.com/pkg/errors v0.9.1 // indirect 44 | github.com/pmezard/go-difflib v1.0.0 // indirect 45 | github.com/prometheus/client_model v0.3.0 // indirect 46 | github.com/prometheus/common v0.42.0 // indirect 47 | github.com/prometheus/procfs v0.10.1 // indirect 48 | github.com/spf13/pflag v1.0.5 // indirect 49 | github.com/stretchr/objx v0.5.0 // indirect 50 | golang.org/x/net v0.8.0 // indirect 51 | golang.org/x/oauth2 v0.5.0 // indirect 52 | golang.org/x/sys v0.8.0 // indirect 53 | golang.org/x/term v0.6.0 // indirect 54 | golang.org/x/text v0.8.0 // indirect 55 | golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect 56 | google.golang.org/appengine v1.6.7 // indirect 57 | google.golang.org/protobuf v1.30.0 // indirect 58 | gopkg.in/inf.v0 v0.9.1 // indirect 59 | gopkg.in/yaml.v2 v2.4.0 // indirect 60 | gopkg.in/yaml.v3 v3.0.1 // indirect 61 | k8s.io/klog/v2 v2.90.1 // indirect 62 | k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f // indirect 63 | k8s.io/utils v0.0.0-20230209194617-a36077c30491 // indirect 64 | sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect 65 | sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect 66 | sigs.k8s.io/yaml v1.3.0 // indirect 67 | ) 68 | -------------------------------------------------------------------------------- /charts/redisoperator/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | {{- $fullName := include "chart.fullname" . -}} 2 | {{ $name := "registry" }} 3 | {{- $data := dict "Chart" .Chart "Release" .Release "Values" .Values -}} 4 | apiVersion: {{ template "common.capabilities.deployment.apiVersion" . }} 5 | kind: Deployment 6 | metadata: 7 | name: {{ $fullName }} 8 | namespace: {{ include "chart.namespaceName" . }} 9 | labels: 10 | {{- include "chart.labels" $data | nindent 4 }} 11 | {{- if .Values.annotations }} 12 | annotations: 13 | {{ toYaml .Values.annotations | indent 4 }} 14 | {{- end }} 15 | spec: 16 | replicas: {{ .Values.replicas }} 17 | selector: 18 | matchLabels: 19 | {{- include "chart.selectorLabels" $data | nindent 6 }} 20 | strategy: 21 | type: {{ .Values.updateStrategy.type }} 22 | template: 23 | metadata: 24 | {{- with .Values.annotations }} 25 | annotations: 26 | {{- toYaml . | nindent 8 }} 27 | {{- end }} 28 | labels: 29 | {{- include "chart.selectorLabels" $data | nindent 8 }} 30 | spec: 31 | serviceAccountName: {{ template "chart.serviceAccountName" . }} 32 | {{- if (and .Values.imageCredentials.create (not .Values.imageCredentials.existsSecrets)) }} 33 | imagePullSecrets: 34 | - name: {{ $fullName }}-{{ $name }} 35 | {{- else if (and .Values.imageCredentials.create .Values.imageCredentials.existsSecrets) }} 36 | {{- range .Values.imageCredentials.existsSecrets }} 37 | imagePullSecrets: 38 | {{ printf "- name: %s" . }} 39 | {{- end }} 40 | {{- end }} 41 | containers: 42 | - name: {{ .Chart.Name }} 43 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion}}" 44 | {{- if .Values.image.cli_args }} 45 | args: 46 | - {{ quote .Values.image.cli_args }} 47 | {{- end }} 48 | imagePullPolicy: {{ .Values.image.pullPolicy }} 49 | ports: 50 | - name: metrics 51 | containerPort: {{ .Values.container.port }} 52 | protocol: TCP 53 | readinessProbe: 54 | tcpSocket: 55 | port: {{ .Values.container.port }} 56 | initialDelaySeconds: 10 57 | periodSeconds: 3 58 | timeoutSeconds: 3 59 | livenessProbe: 60 | tcpSocket: 61 | port: {{ .Values.container.port }} 62 | initialDelaySeconds: 30 63 | periodSeconds: 5 64 | timeoutSeconds: 5 65 | failureThreshold: 6 66 | successThreshold: 1 67 | securityContext: 68 | {{- toYaml .Values.securityContext | nindent 12 }} 69 | resources: 70 | {{- toYaml .Values.resources | nindent 12 }} 71 | {{- with .Values.affinity }} 72 | affinity: 73 | {{- toYaml . | nindent 8 }} 74 | {{- end }} 75 | {{- with .Values.tolerations }} 76 | tolerations: 77 | {{- toYaml . | nindent 8 }} 78 | {{- end }} 79 | {{- with .Values.nodeSelector }} 80 | nodeSelector: 81 | {{- toYaml . | nindent 8 }} 82 | {{- end }} 83 | {{- if .Values.priorityClassName }} 84 | priorityClassName: {{ .Values.priorityClassName }} 85 | {{- end }} 86 | -------------------------------------------------------------------------------- /client/k8s/clientset/versioned/clientset.go: -------------------------------------------------------------------------------- 1 | // Code generated by client-gen. DO NOT EDIT. 2 | 3 | package versioned 4 | 5 | import ( 6 | "fmt" 7 | "net/http" 8 | 9 | databasesv1 "github.com/spotahome/redis-operator/client/k8s/clientset/versioned/typed/redisfailover/v1" 10 | discovery "k8s.io/client-go/discovery" 11 | rest "k8s.io/client-go/rest" 12 | flowcontrol "k8s.io/client-go/util/flowcontrol" 13 | ) 14 | 15 | type Interface interface { 16 | Discovery() discovery.DiscoveryInterface 17 | DatabasesV1() databasesv1.DatabasesV1Interface 18 | } 19 | 20 | // Clientset contains the clients for groups. 21 | type Clientset struct { 22 | *discovery.DiscoveryClient 23 | databasesV1 *databasesv1.DatabasesV1Client 24 | } 25 | 26 | // DatabasesV1 retrieves the DatabasesV1Client 27 | func (c *Clientset) DatabasesV1() databasesv1.DatabasesV1Interface { 28 | return c.databasesV1 29 | } 30 | 31 | // Discovery retrieves the DiscoveryClient 32 | func (c *Clientset) Discovery() discovery.DiscoveryInterface { 33 | if c == nil { 34 | return nil 35 | } 36 | return c.DiscoveryClient 37 | } 38 | 39 | // NewForConfig creates a new Clientset for the given config. 40 | // If config's RateLimiter is not set and QPS and Burst are acceptable, 41 | // NewForConfig will generate a rate-limiter in configShallowCopy. 42 | // NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), 43 | // where httpClient was generated with rest.HTTPClientFor(c). 44 | func NewForConfig(c *rest.Config) (*Clientset, error) { 45 | configShallowCopy := *c 46 | 47 | if configShallowCopy.UserAgent == "" { 48 | configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent() 49 | } 50 | 51 | // share the transport between all clients 52 | httpClient, err := rest.HTTPClientFor(&configShallowCopy) 53 | if err != nil { 54 | return nil, err 55 | } 56 | 57 | return NewForConfigAndClient(&configShallowCopy, httpClient) 58 | } 59 | 60 | // NewForConfigAndClient creates a new Clientset for the given config and http client. 61 | // Note the http client provided takes precedence over the configured transport values. 62 | // If config's RateLimiter is not set and QPS and Burst are acceptable, 63 | // NewForConfigAndClient will generate a rate-limiter in configShallowCopy. 64 | func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) { 65 | configShallowCopy := *c 66 | if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { 67 | if configShallowCopy.Burst <= 0 { 68 | return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") 69 | } 70 | configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) 71 | } 72 | 73 | var cs Clientset 74 | var err error 75 | cs.databasesV1, err = databasesv1.NewForConfigAndClient(&configShallowCopy, httpClient) 76 | if err != nil { 77 | return nil, err 78 | } 79 | 80 | cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) 81 | if err != nil { 82 | return nil, err 83 | } 84 | return &cs, nil 85 | } 86 | 87 | // NewForConfigOrDie creates a new Clientset for the given config and 88 | // panics if there is an error in the config. 89 | func NewForConfigOrDie(c *rest.Config) *Clientset { 90 | cs, err := NewForConfig(c) 91 | if err != nil { 92 | panic(err) 93 | } 94 | return cs 95 | } 96 | 97 | // New creates a new Clientset for the given RESTClient. 98 | func New(c rest.Interface) *Clientset { 99 | var cs Clientset 100 | cs.databasesV1 = databasesv1.New(c) 101 | 102 | cs.DiscoveryClient = discovery.NewDiscoveryClient(c) 103 | return &cs 104 | } 105 | -------------------------------------------------------------------------------- /example/operator/all-redis-operator-resources.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app: redisoperator 6 | name: redisoperator 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: redisoperator 12 | strategy: 13 | type: RollingUpdate 14 | template: 15 | metadata: 16 | labels: 17 | app: redisoperator 18 | spec: 19 | serviceAccountName: redisoperator 20 | containers: 21 | - image: quay.io/spotahome/redis-operator:latest 22 | imagePullPolicy: IfNotPresent 23 | name: app 24 | securityContext: 25 | readOnlyRootFilesystem: true 26 | runAsNonRoot: true 27 | runAsUser: 1000 28 | resources: 29 | limits: 30 | cpu: 100m 31 | memory: 50Mi 32 | requests: 33 | cpu: 10m 34 | memory: 50Mi 35 | restartPolicy: Always 36 | --- 37 | apiVersion: rbac.authorization.k8s.io/v1 38 | kind: ClusterRoleBinding 39 | metadata: 40 | name: redisoperator 41 | roleRef: 42 | apiGroup: rbac.authorization.k8s.io 43 | kind: ClusterRole 44 | name: redisoperator 45 | subjects: 46 | - kind: ServiceAccount 47 | name: redisoperator 48 | namespace: default 49 | --- 50 | apiVersion: rbac.authorization.k8s.io/v1 51 | kind: ClusterRole 52 | metadata: 53 | name: redisoperator 54 | rules: 55 | - apiGroups: 56 | - databases.spotahome.com 57 | resources: 58 | - redisfailovers 59 | - redisfailovers/finalizers 60 | verbs: 61 | - "*" 62 | - apiGroups: 63 | - apiextensions.k8s.io 64 | resources: 65 | - customresourcedefinitions 66 | verbs: 67 | - "*" 68 | - apiGroups: 69 | - "" 70 | resources: 71 | - pods 72 | - services 73 | - endpoints 74 | - events 75 | - configmaps 76 | - persistentvolumeclaims 77 | - persistentvolumeclaims/finalizers 78 | verbs: 79 | - "*" 80 | - apiGroups: 81 | - "" 82 | resources: 83 | - secrets 84 | verbs: 85 | - "get" 86 | - apiGroups: 87 | - apps 88 | resources: 89 | - deployments 90 | - statefulsets 91 | verbs: 92 | - "*" 93 | - apiGroups: 94 | - policy 95 | resources: 96 | - poddisruptionbudgets 97 | verbs: 98 | - "*" 99 | - apiGroups: 100 | - coordination.k8s.io 101 | resources: 102 | - leases 103 | verbs: 104 | - "*" 105 | 106 | --- 107 | apiVersion: v1 108 | kind: ServiceAccount 109 | metadata: 110 | name: redisoperator 111 | --- 112 | 113 | apiVersion: v1 114 | kind: Service 115 | metadata: 116 | annotations: 117 | prometheus.io/path: /metrics 118 | prometheus.io/port: http 119 | prometheus.io/scrape: "true" 120 | name: redisoperator 121 | labels: 122 | app: redisoperator 123 | spec: 124 | type: ClusterIP 125 | ports: 126 | - name: metrics 127 | port: 9710 128 | protocol: TCP 129 | targetPort: metrics 130 | selector: 131 | app: redisoperator 132 | --- 133 | 134 | apiVersion: monitoring.coreos.com/v1 135 | kind: ServiceMonitor 136 | metadata: 137 | name: redis-operator-metrics 138 | labels: 139 | app: redisoperator 140 | release: prometheus 141 | spec: 142 | selector: 143 | matchLabels: 144 | app: redisoperator 145 | endpoints: 146 | - port: metrics 147 | namespaceSelector: 148 | matchNames: 149 | - default 150 | --- 151 | 152 | 153 | apiVersion: monitoring.coreos.com/v1 154 | kind: PodMonitor 155 | metadata: 156 | name: redisoperator 157 | labels: 158 | app: redisoperator 159 | release: prometheus 160 | spec: 161 | selector: 162 | matchLabels: 163 | app: redisoperator 164 | podMetricsEndpoints: 165 | - port: metrics 166 | -------------------------------------------------------------------------------- /service/k8s/pod_test.go: -------------------------------------------------------------------------------- 1 | package k8s_test 2 | 3 | import ( 4 | "errors" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | corev1 "k8s.io/api/core/v1" 9 | kubeerrors "k8s.io/apimachinery/pkg/api/errors" 10 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 11 | "k8s.io/apimachinery/pkg/runtime" 12 | "k8s.io/apimachinery/pkg/runtime/schema" 13 | kubernetes "k8s.io/client-go/kubernetes/fake" 14 | kubetesting "k8s.io/client-go/testing" 15 | 16 | "github.com/spotahome/redis-operator/log" 17 | "github.com/spotahome/redis-operator/metrics" 18 | "github.com/spotahome/redis-operator/service/k8s" 19 | ) 20 | 21 | var ( 22 | podsGroup = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"} 23 | ) 24 | 25 | func newPodUpdateAction(ns string, pod *corev1.Pod) kubetesting.UpdateActionImpl { 26 | return kubetesting.NewUpdateAction(podsGroup, ns, pod) 27 | } 28 | 29 | func newPodGetAction(ns, name string) kubetesting.GetActionImpl { 30 | return kubetesting.NewGetAction(podsGroup, ns, name) 31 | } 32 | 33 | func newPodCreateAction(ns string, pod *corev1.Pod) kubetesting.CreateActionImpl { 34 | return kubetesting.NewCreateAction(podsGroup, ns, pod) 35 | } 36 | 37 | func TestPodServiceGetCreateOrUpdate(t *testing.T) { 38 | testPod := &corev1.Pod{ 39 | ObjectMeta: metav1.ObjectMeta{ 40 | Name: "testpod1", 41 | ResourceVersion: "10", 42 | }, 43 | } 44 | 45 | testns := "testns" 46 | 47 | tests := []struct { 48 | name string 49 | pod *corev1.Pod 50 | getPodResult *corev1.Pod 51 | errorOnGet error 52 | errorOnCreation error 53 | expActions []kubetesting.Action 54 | expErr bool 55 | }{ 56 | { 57 | name: "A new pod should create a new pod.", 58 | pod: testPod, 59 | getPodResult: nil, 60 | errorOnGet: kubeerrors.NewNotFound(schema.GroupResource{}, ""), 61 | errorOnCreation: nil, 62 | expActions: []kubetesting.Action{ 63 | newPodGetAction(testns, testPod.ObjectMeta.Name), 64 | newPodCreateAction(testns, testPod), 65 | }, 66 | expErr: false, 67 | }, 68 | { 69 | name: "A new pod should error when create a new pod fails.", 70 | pod: testPod, 71 | getPodResult: nil, 72 | errorOnGet: kubeerrors.NewNotFound(schema.GroupResource{}, ""), 73 | errorOnCreation: errors.New("wanted error"), 74 | expActions: []kubetesting.Action{ 75 | newPodGetAction(testns, testPod.ObjectMeta.Name), 76 | newPodCreateAction(testns, testPod), 77 | }, 78 | expErr: true, 79 | }, 80 | { 81 | name: "An existent pod should update the pod.", 82 | pod: testPod, 83 | getPodResult: testPod, 84 | errorOnGet: nil, 85 | errorOnCreation: nil, 86 | expActions: []kubetesting.Action{ 87 | newPodGetAction(testns, testPod.ObjectMeta.Name), 88 | newPodUpdateAction(testns, testPod), 89 | }, 90 | expErr: false, 91 | }, 92 | } 93 | 94 | for _, test := range tests { 95 | t.Run(test.name, func(t *testing.T) { 96 | assert := assert.New(t) 97 | 98 | // Mock. 99 | mcli := &kubernetes.Clientset{} 100 | mcli.AddReactor("get", "pods", func(action kubetesting.Action) (bool, runtime.Object, error) { 101 | return true, test.getPodResult, test.errorOnGet 102 | }) 103 | mcli.AddReactor("create", "pods", func(action kubetesting.Action) (bool, runtime.Object, error) { 104 | return true, nil, test.errorOnCreation 105 | }) 106 | 107 | service := k8s.NewPodService(mcli, log.Dummy, metrics.Dummy) 108 | err := service.CreateOrUpdatePod(testns, test.pod) 109 | 110 | if test.expErr { 111 | assert.Error(err) 112 | } else { 113 | assert.NoError(err) 114 | // Check calls to kubernetes. 115 | assert.Equal(test.expActions, mcli.Actions()) 116 | } 117 | }) 118 | } 119 | } 120 | -------------------------------------------------------------------------------- /service/k8s/service_test.go: -------------------------------------------------------------------------------- 1 | package k8s_test 2 | 3 | import ( 4 | "errors" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | corev1 "k8s.io/api/core/v1" 9 | kubeerrors "k8s.io/apimachinery/pkg/api/errors" 10 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 11 | "k8s.io/apimachinery/pkg/runtime" 12 | "k8s.io/apimachinery/pkg/runtime/schema" 13 | kubernetes "k8s.io/client-go/kubernetes/fake" 14 | kubetesting "k8s.io/client-go/testing" 15 | 16 | "github.com/spotahome/redis-operator/log" 17 | "github.com/spotahome/redis-operator/metrics" 18 | "github.com/spotahome/redis-operator/service/k8s" 19 | ) 20 | 21 | var ( 22 | servicesGroup = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "services"} 23 | ) 24 | 25 | func newServiceUpdateAction(ns string, service *corev1.Service) kubetesting.UpdateActionImpl { 26 | return kubetesting.NewUpdateAction(servicesGroup, ns, service) 27 | } 28 | 29 | func newServiceGetAction(ns, name string) kubetesting.GetActionImpl { 30 | return kubetesting.NewGetAction(servicesGroup, ns, name) 31 | } 32 | 33 | func newServiceCreateAction(ns string, service *corev1.Service) kubetesting.CreateActionImpl { 34 | return kubetesting.NewCreateAction(servicesGroup, ns, service) 35 | } 36 | 37 | func TestServiceServiceGetCreateOrUpdate(t *testing.T) { 38 | testService := &corev1.Service{ 39 | ObjectMeta: metav1.ObjectMeta{ 40 | Name: "testservice1", 41 | ResourceVersion: "10", 42 | }, 43 | } 44 | 45 | testns := "testns" 46 | 47 | tests := []struct { 48 | name string 49 | service *corev1.Service 50 | getServiceResult *corev1.Service 51 | errorOnGet error 52 | errorOnCreation error 53 | expActions []kubetesting.Action 54 | expErr bool 55 | }{ 56 | { 57 | name: "A new service should create a new service.", 58 | service: testService, 59 | getServiceResult: nil, 60 | errorOnGet: kubeerrors.NewNotFound(schema.GroupResource{}, ""), 61 | errorOnCreation: nil, 62 | expActions: []kubetesting.Action{ 63 | newServiceGetAction(testns, testService.ObjectMeta.Name), 64 | newServiceCreateAction(testns, testService), 65 | }, 66 | expErr: false, 67 | }, 68 | { 69 | name: "A new service should error when create a new service fails.", 70 | service: testService, 71 | getServiceResult: nil, 72 | errorOnGet: kubeerrors.NewNotFound(schema.GroupResource{}, ""), 73 | errorOnCreation: errors.New("wanted error"), 74 | expActions: []kubetesting.Action{ 75 | newServiceGetAction(testns, testService.ObjectMeta.Name), 76 | newServiceCreateAction(testns, testService), 77 | }, 78 | expErr: true, 79 | }, 80 | { 81 | name: "An existent service should update the service.", 82 | service: testService, 83 | getServiceResult: testService, 84 | errorOnGet: nil, 85 | errorOnCreation: nil, 86 | expActions: []kubetesting.Action{ 87 | newServiceGetAction(testns, testService.ObjectMeta.Name), 88 | newServiceUpdateAction(testns, testService), 89 | }, 90 | expErr: false, 91 | }, 92 | } 93 | 94 | for _, test := range tests { 95 | t.Run(test.name, func(t *testing.T) { 96 | assert := assert.New(t) 97 | 98 | // Mock. 99 | mcli := &kubernetes.Clientset{} 100 | mcli.AddReactor("get", "services", func(action kubetesting.Action) (bool, runtime.Object, error) { 101 | return true, test.getServiceResult, test.errorOnGet 102 | }) 103 | mcli.AddReactor("create", "services", func(action kubetesting.Action) (bool, runtime.Object, error) { 104 | return true, nil, test.errorOnCreation 105 | }) 106 | 107 | service := k8s.NewServiceService(mcli, log.Dummy, metrics.Dummy) 108 | err := service.CreateOrUpdateService(testns, test.service) 109 | 110 | if test.expErr { 111 | assert.Error(err) 112 | } else { 113 | assert.NoError(err) 114 | // Check calls to kubernetes. 115 | assert.Equal(test.expActions, mcli.Actions()) 116 | } 117 | }) 118 | } 119 | } 120 | -------------------------------------------------------------------------------- /operator/redisfailover/factory.go: -------------------------------------------------------------------------------- 1 | package redisfailover 2 | 3 | import ( 4 | "context" 5 | "regexp" 6 | "time" 7 | 8 | "github.com/spotahome/kooper/v2/controller" 9 | "github.com/spotahome/kooper/v2/controller/leaderelection" 10 | kooperlog "github.com/spotahome/kooper/v2/log" 11 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 12 | "k8s.io/apimachinery/pkg/runtime" 13 | "k8s.io/apimachinery/pkg/watch" 14 | "k8s.io/client-go/kubernetes" 15 | "k8s.io/client-go/tools/cache" 16 | 17 | redisfailoverv1 "github.com/spotahome/redis-operator/api/redisfailover/v1" 18 | "github.com/spotahome/redis-operator/log" 19 | "github.com/spotahome/redis-operator/metrics" 20 | rfservice "github.com/spotahome/redis-operator/operator/redisfailover/service" 21 | "github.com/spotahome/redis-operator/service/k8s" 22 | "github.com/spotahome/redis-operator/service/redis" 23 | ) 24 | 25 | const ( 26 | resync = 30 * time.Second 27 | operatorName = "redis-operator" 28 | lockKey = "redis-failover-lease" 29 | ) 30 | 31 | // New will create an operator that is responsible of managing all the required stuff 32 | // to create redis failovers. 33 | func New(cfg Config, k8sService k8s.Services, k8sClient kubernetes.Interface, lockNamespace string, redisClient redis.Client, kooperMetricsRecorder metrics.Recorder, logger log.Logger) (controller.Controller, error) { 34 | // Create internal services. 35 | rfService := rfservice.NewRedisFailoverKubeClient(k8sService, logger, kooperMetricsRecorder) 36 | rfChecker := rfservice.NewRedisFailoverChecker(k8sService, redisClient, logger, kooperMetricsRecorder) 37 | rfHealer := rfservice.NewRedisFailoverHealer(k8sService, redisClient, logger) 38 | 39 | // Create the handlers. 40 | rfHandler := NewRedisFailoverHandler(cfg, rfService, rfChecker, rfHealer, k8sService, kooperMetricsRecorder, logger) 41 | rfRetriever := NewRedisFailoverRetriever(cfg, k8sService) 42 | 43 | kooperLogger := kooperlogger{Logger: logger.WithField("operator", "redisfailover")} 44 | // Leader election service. 45 | leSVC, err := leaderelection.NewDefault(lockKey, lockNamespace, k8sClient, kooperLogger) 46 | if err != nil { 47 | return nil, err 48 | } 49 | 50 | // Create our controller. 51 | return controller.New(&controller.Config{ 52 | Handler: rfHandler, 53 | Retriever: rfRetriever, 54 | LeaderElector: leSVC, 55 | MetricsRecorder: kooperMetricsRecorder, 56 | Logger: kooperLogger, 57 | Name: "redisfailover", 58 | ResyncInterval: resync, 59 | ConcurrentWorkers: cfg.Concurrency, 60 | }) 61 | } 62 | 63 | func NewRedisFailoverRetriever(cfg Config, cli k8s.Services) controller.Retriever { 64 | isNamespaceSupported := func(rf redisfailoverv1.RedisFailover) bool { 65 | match, _ := regexp.Match(cfg.SupportedNamespacesRegex, []byte(rf.Namespace)) 66 | return match 67 | } 68 | // check in the startup whether the regex compiles 69 | 70 | return controller.MustRetrieverFromListerWatcher(&cache.ListWatch{ 71 | ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { 72 | rfList, err := cli.ListRedisFailovers(context.Background(), "", options) 73 | if err != nil { 74 | return rfList, err 75 | } 76 | 77 | targetRFList := make([]redisfailoverv1.RedisFailover, 0) 78 | for _, rf := range rfList.Items { 79 | if isNamespaceSupported(rf) { 80 | targetRFList = append(targetRFList, rf) 81 | } 82 | } 83 | rfList.Items = targetRFList 84 | 85 | return rfList, err 86 | }, 87 | WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { 88 | watcher, err := cli.WatchRedisFailovers(context.Background(), "", options) 89 | watcher = watch.Filter(watcher, func(event watch.Event) (watch.Event, bool) { 90 | rf, ok := event.Object.(*redisfailoverv1.RedisFailover) 91 | if !ok { 92 | return event, false 93 | } 94 | return event, isNamespaceSupported(*rf) 95 | }) 96 | return watcher, err 97 | }, 98 | }) 99 | } 100 | 101 | type kooperlogger struct { 102 | log.Logger 103 | } 104 | 105 | func (k kooperlogger) WithKV(kv kooperlog.KV) kooperlog.Logger { 106 | return kooperlogger{Logger: k.Logger.WithFields(kv)} 107 | } 108 | -------------------------------------------------------------------------------- /service/k8s/configmap_test.go: -------------------------------------------------------------------------------- 1 | package k8s_test 2 | 3 | import ( 4 | "errors" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | corev1 "k8s.io/api/core/v1" 9 | kubeerrors "k8s.io/apimachinery/pkg/api/errors" 10 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 11 | "k8s.io/apimachinery/pkg/runtime" 12 | "k8s.io/apimachinery/pkg/runtime/schema" 13 | kubernetes "k8s.io/client-go/kubernetes/fake" 14 | kubetesting "k8s.io/client-go/testing" 15 | 16 | "github.com/spotahome/redis-operator/log" 17 | "github.com/spotahome/redis-operator/metrics" 18 | "github.com/spotahome/redis-operator/service/k8s" 19 | ) 20 | 21 | var ( 22 | configMapsGroup = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "configmaps"} 23 | ) 24 | 25 | func newConfigMapUpdateAction(ns string, configMap *corev1.ConfigMap) kubetesting.UpdateActionImpl { 26 | return kubetesting.NewUpdateAction(configMapsGroup, ns, configMap) 27 | } 28 | 29 | func newConfigMapGetAction(ns, name string) kubetesting.GetActionImpl { 30 | return kubetesting.NewGetAction(configMapsGroup, ns, name) 31 | } 32 | 33 | func newConfigMapCreateAction(ns string, configMap *corev1.ConfigMap) kubetesting.CreateActionImpl { 34 | return kubetesting.NewCreateAction(configMapsGroup, ns, configMap) 35 | } 36 | 37 | func TestConfigMapServiceGetCreateOrUpdate(t *testing.T) { 38 | testConfigMap := &corev1.ConfigMap{ 39 | ObjectMeta: metav1.ObjectMeta{ 40 | Name: "testconfigmap1", 41 | ResourceVersion: "10", 42 | }, 43 | } 44 | 45 | testns := "testns" 46 | 47 | tests := []struct { 48 | name string 49 | configMap *corev1.ConfigMap 50 | getConfigMapResult *corev1.ConfigMap 51 | errorOnGet error 52 | errorOnCreation error 53 | expActions []kubetesting.Action 54 | expErr bool 55 | }{ 56 | { 57 | name: "A new configmap should create a new configmap.", 58 | configMap: testConfigMap, 59 | getConfigMapResult: nil, 60 | errorOnGet: kubeerrors.NewNotFound(schema.GroupResource{}, ""), 61 | errorOnCreation: nil, 62 | expActions: []kubetesting.Action{ 63 | newConfigMapGetAction(testns, testConfigMap.ObjectMeta.Name), 64 | newConfigMapCreateAction(testns, testConfigMap), 65 | }, 66 | expErr: false, 67 | }, 68 | { 69 | name: "A new configmap should error when create a new configmap fails.", 70 | configMap: testConfigMap, 71 | getConfigMapResult: nil, 72 | errorOnGet: kubeerrors.NewNotFound(schema.GroupResource{}, ""), 73 | errorOnCreation: errors.New("wanted error"), 74 | expActions: []kubetesting.Action{ 75 | newConfigMapGetAction(testns, testConfigMap.ObjectMeta.Name), 76 | newConfigMapCreateAction(testns, testConfigMap), 77 | }, 78 | expErr: true, 79 | }, 80 | { 81 | name: "An existent configmap should update the configmap.", 82 | configMap: testConfigMap, 83 | getConfigMapResult: testConfigMap, 84 | errorOnGet: nil, 85 | errorOnCreation: nil, 86 | expActions: []kubetesting.Action{ 87 | newConfigMapGetAction(testns, testConfigMap.ObjectMeta.Name), 88 | newConfigMapUpdateAction(testns, testConfigMap), 89 | }, 90 | expErr: false, 91 | }, 92 | } 93 | 94 | for _, test := range tests { 95 | t.Run(test.name, func(t *testing.T) { 96 | assert := assert.New(t) 97 | 98 | // Mock. 99 | mcli := &kubernetes.Clientset{} 100 | mcli.AddReactor("get", "configmaps", func(action kubetesting.Action) (bool, runtime.Object, error) { 101 | return true, test.getConfigMapResult, test.errorOnGet 102 | }) 103 | mcli.AddReactor("create", "configmaps", func(action kubetesting.Action) (bool, runtime.Object, error) { 104 | return true, nil, test.errorOnCreation 105 | }) 106 | 107 | service := k8s.NewConfigMapService(mcli, log.Dummy, metrics.Dummy) 108 | err := service.CreateOrUpdateConfigMap(testns, test.configMap) 109 | 110 | if test.expErr { 111 | assert.Error(err) 112 | } else { 113 | assert.NoError(err) 114 | // Check calls to kubernetes. 115 | assert.Equal(test.expActions, mcli.Actions()) 116 | } 117 | }) 118 | } 119 | } 120 | -------------------------------------------------------------------------------- /service/k8s/deployment_test.go: -------------------------------------------------------------------------------- 1 | package k8s_test 2 | 3 | import ( 4 | "errors" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | appsv1 "k8s.io/api/apps/v1" 9 | kubeerrors "k8s.io/apimachinery/pkg/api/errors" 10 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 11 | "k8s.io/apimachinery/pkg/runtime" 12 | "k8s.io/apimachinery/pkg/runtime/schema" 13 | kubernetes "k8s.io/client-go/kubernetes/fake" 14 | kubetesting "k8s.io/client-go/testing" 15 | 16 | "github.com/spotahome/redis-operator/log" 17 | "github.com/spotahome/redis-operator/metrics" 18 | "github.com/spotahome/redis-operator/service/k8s" 19 | ) 20 | 21 | var ( 22 | deploymentsGroup = schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"} 23 | ) 24 | 25 | func newDeploymentUpdateAction(ns string, deployment *appsv1.Deployment) kubetesting.UpdateActionImpl { 26 | return kubetesting.NewUpdateAction(deploymentsGroup, ns, deployment) 27 | } 28 | 29 | func newDeploymentGetAction(ns, name string) kubetesting.GetActionImpl { 30 | return kubetesting.NewGetAction(deploymentsGroup, ns, name) 31 | } 32 | 33 | func newDeploymentCreateAction(ns string, deployment *appsv1.Deployment) kubetesting.CreateActionImpl { 34 | return kubetesting.NewCreateAction(deploymentsGroup, ns, deployment) 35 | } 36 | 37 | func TestDeploymentServiceGetCreateOrUpdate(t *testing.T) { 38 | testDeployment := &appsv1.Deployment{ 39 | ObjectMeta: metav1.ObjectMeta{ 40 | Name: "testdeployment1", 41 | ResourceVersion: "10", 42 | }, 43 | } 44 | 45 | testns := "testns" 46 | 47 | tests := []struct { 48 | name string 49 | deployment *appsv1.Deployment 50 | getDeploymentResult *appsv1.Deployment 51 | errorOnGet error 52 | errorOnCreation error 53 | expActions []kubetesting.Action 54 | expErr bool 55 | }{ 56 | { 57 | name: "A new deployment should create a new deployment.", 58 | deployment: testDeployment, 59 | getDeploymentResult: nil, 60 | errorOnGet: kubeerrors.NewNotFound(schema.GroupResource{}, ""), 61 | errorOnCreation: nil, 62 | expActions: []kubetesting.Action{ 63 | newDeploymentGetAction(testns, testDeployment.ObjectMeta.Name), 64 | newDeploymentCreateAction(testns, testDeployment), 65 | }, 66 | expErr: false, 67 | }, 68 | { 69 | name: "A new deployment should error when create a new deployment fails.", 70 | deployment: testDeployment, 71 | getDeploymentResult: nil, 72 | errorOnGet: kubeerrors.NewNotFound(schema.GroupResource{}, ""), 73 | errorOnCreation: errors.New("wanted error"), 74 | expActions: []kubetesting.Action{ 75 | newDeploymentGetAction(testns, testDeployment.ObjectMeta.Name), 76 | newDeploymentCreateAction(testns, testDeployment), 77 | }, 78 | expErr: true, 79 | }, 80 | { 81 | name: "An existent deployment should update the deployment.", 82 | deployment: testDeployment, 83 | getDeploymentResult: testDeployment, 84 | errorOnGet: nil, 85 | errorOnCreation: nil, 86 | expActions: []kubetesting.Action{ 87 | newDeploymentGetAction(testns, testDeployment.ObjectMeta.Name), 88 | newDeploymentUpdateAction(testns, testDeployment), 89 | }, 90 | expErr: false, 91 | }, 92 | } 93 | 94 | for _, test := range tests { 95 | t.Run(test.name, func(t *testing.T) { 96 | assert := assert.New(t) 97 | 98 | // Mock. 99 | mcli := &kubernetes.Clientset{} 100 | mcli.AddReactor("get", "deployments", func(action kubetesting.Action) (bool, runtime.Object, error) { 101 | return true, test.getDeploymentResult, test.errorOnGet 102 | }) 103 | mcli.AddReactor("create", "deployments", func(action kubetesting.Action) (bool, runtime.Object, error) { 104 | return true, nil, test.errorOnCreation 105 | }) 106 | 107 | service := k8s.NewDeploymentService(mcli, log.Dummy, metrics.Dummy) 108 | err := service.CreateOrUpdateDeployment(testns, test.deployment) 109 | 110 | if test.expErr { 111 | assert.Error(err) 112 | } else { 113 | assert.NoError(err) 114 | // Check calls to kubernetes. 115 | assert.Equal(test.expActions, mcli.Actions()) 116 | } 117 | }) 118 | } 119 | } 120 | -------------------------------------------------------------------------------- /operator/redisfailover/handler.go: -------------------------------------------------------------------------------- 1 | package redisfailover 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "regexp" 7 | 8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | "k8s.io/apimachinery/pkg/runtime" 10 | 11 | redisfailoverv1 "github.com/spotahome/redis-operator/api/redisfailover/v1" 12 | "github.com/spotahome/redis-operator/log" 13 | "github.com/spotahome/redis-operator/metrics" 14 | rfservice "github.com/spotahome/redis-operator/operator/redisfailover/service" 15 | "github.com/spotahome/redis-operator/operator/redisfailover/util" 16 | "github.com/spotahome/redis-operator/service/k8s" 17 | ) 18 | 19 | const ( 20 | rfLabelManagedByKey = "app.kubernetes.io/managed-by" 21 | rfLabelNameKey = "redisfailovers.databases.spotahome.com/name" 22 | ) 23 | 24 | var ( 25 | defaultLabels = map[string]string{ 26 | rfLabelManagedByKey: operatorName, 27 | } 28 | ) 29 | 30 | // RedisFailoverHandler is the Redis Failover handler. This handler will create the required 31 | // resources that a RF needs. 32 | type RedisFailoverHandler struct { 33 | config Config 34 | k8sservice k8s.Service 35 | rfService rfservice.RedisFailoverClient 36 | rfChecker rfservice.RedisFailoverCheck 37 | rfHealer rfservice.RedisFailoverHeal 38 | mClient metrics.Recorder 39 | logger log.Logger 40 | } 41 | 42 | // NewRedisFailoverHandler returns a new RF handler 43 | func NewRedisFailoverHandler(config Config, rfService rfservice.RedisFailoverClient, rfChecker rfservice.RedisFailoverCheck, rfHealer rfservice.RedisFailoverHeal, k8sservice k8s.Service, mClient metrics.Recorder, logger log.Logger) *RedisFailoverHandler { 44 | return &RedisFailoverHandler{ 45 | config: config, 46 | rfService: rfService, 47 | rfChecker: rfChecker, 48 | rfHealer: rfHealer, 49 | mClient: mClient, 50 | k8sservice: k8sservice, 51 | logger: logger, 52 | } 53 | } 54 | 55 | // Handle will ensure the redis failover is in the expected state. 56 | func (r *RedisFailoverHandler) Handle(_ context.Context, obj runtime.Object) error { 57 | rf, ok := obj.(*redisfailoverv1.RedisFailover) 58 | if !ok { 59 | return fmt.Errorf("can't handle the received object: not a redisfailover") 60 | } 61 | 62 | if err := rf.Validate(); err != nil { 63 | r.mClient.SetClusterError(rf.Namespace, rf.Name) 64 | return err 65 | } 66 | 67 | // Create owner refs so the objects manager by this handler have ownership to the 68 | // received RF. 69 | oRefs := r.createOwnerReferences(rf) 70 | 71 | // Create the labels every object derived from this need to have. 72 | labels := r.getLabels(rf) 73 | 74 | if err := r.Ensure(rf, labels, oRefs, r.mClient); err != nil { 75 | r.mClient.SetClusterError(rf.Namespace, rf.Name) 76 | return err 77 | } 78 | 79 | if err := r.CheckAndHeal(rf); err != nil { 80 | r.mClient.SetClusterError(rf.Namespace, rf.Name) 81 | return err 82 | } 83 | 84 | r.mClient.SetClusterOK(rf.Namespace, rf.Name) 85 | return nil 86 | } 87 | 88 | // getLabels merges the labels (dynamic and operator static ones). 89 | func (r *RedisFailoverHandler) getLabels(rf *redisfailoverv1.RedisFailover) map[string]string { 90 | dynLabels := map[string]string{ 91 | rfLabelNameKey: rf.Name, 92 | } 93 | 94 | // Filter the labels based on the whitelist 95 | filteredCustomLabels := make(map[string]string) 96 | if rf.Spec.LabelWhitelist != nil && len(rf.Spec.LabelWhitelist) != 0 { 97 | for _, regex := range rf.Spec.LabelWhitelist { 98 | compiledRegexp, err := regexp.Compile(regex) 99 | if err != nil { 100 | r.logger.Errorf("Unable to compile label whitelist regex '%s', ignoring it.", regex) 101 | continue 102 | } 103 | for labelKey, labelValue := range rf.Labels { 104 | if match := compiledRegexp.MatchString(labelKey); match { 105 | filteredCustomLabels[labelKey] = labelValue 106 | } 107 | } 108 | } 109 | } else { 110 | // If no whitelist is specified then don't filter the labels. 111 | filteredCustomLabels = rf.Labels 112 | } 113 | return util.MergeLabels(defaultLabels, dynLabels, filteredCustomLabels) 114 | } 115 | 116 | func (w *RedisFailoverHandler) createOwnerReferences(rf *redisfailoverv1.RedisFailover) []metav1.OwnerReference { 117 | rfvk := redisfailoverv1.VersionKind(redisfailoverv1.RFKind) 118 | return []metav1.OwnerReference{ 119 | *metav1.NewControllerRef(rf, rfvk), 120 | } 121 | } 122 | -------------------------------------------------------------------------------- /service/k8s/configmap.go: -------------------------------------------------------------------------------- 1 | package k8s 2 | 3 | import ( 4 | "context" 5 | 6 | corev1 "k8s.io/api/core/v1" 7 | "k8s.io/apimachinery/pkg/api/errors" 8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | "k8s.io/client-go/kubernetes" 10 | 11 | "github.com/spotahome/redis-operator/log" 12 | "github.com/spotahome/redis-operator/metrics" 13 | ) 14 | 15 | // ConfigMap the ServiceAccount service that knows how to interact with k8s to manage them 16 | type ConfigMap interface { 17 | GetConfigMap(namespace string, name string) (*corev1.ConfigMap, error) 18 | CreateConfigMap(namespace string, configMap *corev1.ConfigMap) error 19 | UpdateConfigMap(namespace string, configMap *corev1.ConfigMap) error 20 | CreateOrUpdateConfigMap(namespace string, np *corev1.ConfigMap) error 21 | DeleteConfigMap(namespace string, name string) error 22 | ListConfigMaps(namespace string) (*corev1.ConfigMapList, error) 23 | } 24 | 25 | // ConfigMapService is the configMap service implementation using API calls to kubernetes. 26 | type ConfigMapService struct { 27 | kubeClient kubernetes.Interface 28 | logger log.Logger 29 | metricsRecorder metrics.Recorder 30 | } 31 | 32 | // NewConfigMapService returns a new ConfigMap KubeService. 33 | func NewConfigMapService(kubeClient kubernetes.Interface, logger log.Logger, metricsRecorder metrics.Recorder) *ConfigMapService { 34 | logger = logger.With("service", "k8s.configMap") 35 | return &ConfigMapService{ 36 | kubeClient: kubeClient, 37 | logger: logger, 38 | metricsRecorder: metricsRecorder, 39 | } 40 | } 41 | 42 | func (p *ConfigMapService) GetConfigMap(namespace string, name string) (*corev1.ConfigMap, error) { 43 | configMap, err := p.kubeClient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), name, metav1.GetOptions{}) 44 | recordMetrics(namespace, "ConfigMap", name, "GET", err, p.metricsRecorder) 45 | if err != nil { 46 | return nil, err 47 | } 48 | return configMap, err 49 | } 50 | 51 | func (p *ConfigMapService) CreateConfigMap(namespace string, configMap *corev1.ConfigMap) error { 52 | _, err := p.kubeClient.CoreV1().ConfigMaps(namespace).Create(context.TODO(), configMap, metav1.CreateOptions{}) 53 | recordMetrics(namespace, "ConfigMap", configMap.GetName(), "CREATE", err, p.metricsRecorder) 54 | if err != nil { 55 | return err 56 | } 57 | p.logger.WithField("namespace", namespace).WithField("configMap", configMap.Name).Debugf("configMap created") 58 | return nil 59 | } 60 | func (p *ConfigMapService) UpdateConfigMap(namespace string, configMap *corev1.ConfigMap) error { 61 | _, err := p.kubeClient.CoreV1().ConfigMaps(namespace).Update(context.TODO(), configMap, metav1.UpdateOptions{}) 62 | recordMetrics(namespace, "ConfigMap", configMap.GetName(), "UPDATE", err, p.metricsRecorder) 63 | if err != nil { 64 | return err 65 | } 66 | p.logger.WithField("namespace", namespace).WithField("configMap", configMap.Name).Debugf("configMap updated") 67 | return nil 68 | } 69 | func (p *ConfigMapService) CreateOrUpdateConfigMap(namespace string, configMap *corev1.ConfigMap) error { 70 | storedConfigMap, err := p.GetConfigMap(namespace, configMap.Name) 71 | if err != nil { 72 | // If no resource we need to create. 73 | if errors.IsNotFound(err) { 74 | return p.CreateConfigMap(namespace, configMap) 75 | } 76 | return err 77 | } 78 | 79 | // Already exists, need to Update. 80 | // Set the correct resource version to ensure we are on the latest version. This way the only valid 81 | // namespace is our spec(https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#concurrency-control-and-consistency), 82 | // we will replace the current namespace state. 83 | configMap.ResourceVersion = storedConfigMap.ResourceVersion 84 | return p.UpdateConfigMap(namespace, configMap) 85 | } 86 | 87 | func (p *ConfigMapService) DeleteConfigMap(namespace string, name string) error { 88 | err := p.kubeClient.CoreV1().ConfigMaps(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{}) 89 | recordMetrics(namespace, "ConfigMap", name, "DELETE", err, p.metricsRecorder) 90 | return err 91 | } 92 | 93 | func (p *ConfigMapService) ListConfigMaps(namespace string) (*corev1.ConfigMapList, error) { 94 | objects, err := p.kubeClient.CoreV1().ConfigMaps(namespace).List(context.TODO(), metav1.ListOptions{}) 95 | recordMetrics(namespace, "ConfigMap", metrics.NOT_APPLICABLE, "LIST", err, p.metricsRecorder) 96 | return objects, err 97 | } 98 | -------------------------------------------------------------------------------- /metrics/metrics_test.go: -------------------------------------------------------------------------------- 1 | package metrics_test 2 | 3 | import ( 4 | "io" 5 | "net/http" 6 | "net/http/httptest" 7 | "testing" 8 | 9 | "github.com/prometheus/client_golang/prometheus" 10 | "github.com/prometheus/client_golang/prometheus/promhttp" 11 | 12 | "github.com/stretchr/testify/assert" 13 | 14 | "github.com/spotahome/redis-operator/metrics" 15 | ) 16 | 17 | func TestPrometheusMetrics(t *testing.T) { 18 | 19 | tests := []struct { 20 | name string 21 | addMetrics func(rec metrics.Recorder) 22 | expMetrics []string 23 | expCode int 24 | }{ 25 | { 26 | name: "Setting OK should give an OK", 27 | addMetrics: func(rec metrics.Recorder) { 28 | rec.SetClusterOK("testns", "test") 29 | }, 30 | expMetrics: []string{ 31 | `my_metrics_controller_cluster_ok{name="test",namespace="testns"} 1`, 32 | }, 33 | expCode: http.StatusOK, 34 | }, 35 | { 36 | name: "Setting Error should give an Error", 37 | addMetrics: func(rec metrics.Recorder) { 38 | rec.SetClusterError("testns", "test") 39 | }, 40 | expMetrics: []string{ 41 | `my_metrics_controller_cluster_ok{name="test",namespace="testns"} 0`, 42 | }, 43 | expCode: http.StatusOK, 44 | }, 45 | { 46 | name: "Setting Error after ok should give an Error", 47 | addMetrics: func(rec metrics.Recorder) { 48 | rec.SetClusterOK("testns", "test") 49 | rec.SetClusterError("testns", "test") 50 | }, 51 | expMetrics: []string{ 52 | `my_metrics_controller_cluster_ok{name="test",namespace="testns"} 0`, 53 | }, 54 | expCode: http.StatusOK, 55 | }, 56 | { 57 | name: "Setting OK after Error should give an OK", 58 | addMetrics: func(rec metrics.Recorder) { 59 | rec.SetClusterError("testns", "test") 60 | rec.SetClusterOK("testns", "test") 61 | }, 62 | expMetrics: []string{ 63 | `my_metrics_controller_cluster_ok{name="test",namespace="testns"} 1`, 64 | }, 65 | expCode: http.StatusOK, 66 | }, 67 | { 68 | name: "Multiple clusters should appear", 69 | addMetrics: func(rec metrics.Recorder) { 70 | rec.SetClusterOK("testns", "test") 71 | rec.SetClusterOK("testns", "test2") 72 | }, 73 | expMetrics: []string{ 74 | `my_metrics_controller_cluster_ok{name="test",namespace="testns"} 1`, 75 | `my_metrics_controller_cluster_ok{name="test2",namespace="testns"} 1`, 76 | }, 77 | expCode: http.StatusOK, 78 | }, 79 | { 80 | name: "Same name on different namespaces should appear", 81 | addMetrics: func(rec metrics.Recorder) { 82 | rec.SetClusterOK("testns1", "test") 83 | rec.SetClusterOK("testns2", "test") 84 | }, 85 | expMetrics: []string{ 86 | `my_metrics_controller_cluster_ok{name="test",namespace="testns1"} 1`, 87 | `my_metrics_controller_cluster_ok{name="test",namespace="testns2"} 1`, 88 | }, 89 | expCode: http.StatusOK, 90 | }, 91 | { 92 | name: "Deleting a cluster should remove it", 93 | addMetrics: func(rec metrics.Recorder) { 94 | rec.SetClusterOK("testns1", "test") 95 | rec.DeleteCluster("testns1", "test") 96 | }, 97 | expMetrics: []string{}, 98 | expCode: http.StatusOK, 99 | }, 100 | { 101 | name: "Deleting a cluster should remove only the desired one", 102 | addMetrics: func(rec metrics.Recorder) { 103 | rec.SetClusterOK("testns1", "test") 104 | rec.SetClusterOK("testns2", "test") 105 | rec.DeleteCluster("testns1", "test") 106 | }, 107 | expMetrics: []string{ 108 | `my_metrics_controller_cluster_ok{name="test",namespace="testns2"} 1`, 109 | }, 110 | expCode: http.StatusOK, 111 | }, 112 | } 113 | 114 | for _, test := range tests { 115 | t.Run(test.name, func(t *testing.T) { 116 | assert := assert.New(t) 117 | 118 | // Create the muxer for testing. 119 | reg := prometheus.NewRegistry() 120 | rec := metrics.NewRecorder("my_metrics", reg) 121 | 122 | // Add metrics to prometheus. 123 | test.addMetrics(rec) 124 | 125 | // Make the request to the metrics. 126 | h := promhttp.HandlerFor(reg, promhttp.HandlerOpts{}) 127 | w := httptest.NewRecorder() 128 | h.ServeHTTP(w, httptest.NewRequest(http.MethodGet, "/metrics", nil)) 129 | 130 | resp := w.Result() 131 | if assert.Equal(test.expCode, resp.StatusCode) { 132 | body, _ := io.ReadAll(resp.Body) 133 | // Check all the metrics are present. 134 | for _, expMetric := range test.expMetrics { 135 | assert.Contains(string(body), expMetric) 136 | } 137 | } 138 | }) 139 | } 140 | } 141 | -------------------------------------------------------------------------------- /cmd/redisoperator/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "net/http" 7 | _ "net/http/pprof" 8 | "os" 9 | "os/signal" 10 | "strings" 11 | "syscall" 12 | "time" 13 | 14 | "github.com/prometheus/client_golang/prometheus" 15 | "github.com/prometheus/client_golang/prometheus/promhttp" 16 | _ "k8s.io/client-go/plugin/pkg/client/auth/oidc" 17 | 18 | "github.com/spotahome/redis-operator/cmd/utils" 19 | "github.com/spotahome/redis-operator/log" 20 | "github.com/spotahome/redis-operator/metrics" 21 | "github.com/spotahome/redis-operator/operator/redisfailover" 22 | "github.com/spotahome/redis-operator/service/k8s" 23 | "github.com/spotahome/redis-operator/service/redis" 24 | ) 25 | 26 | const ( 27 | gracePeriod = 5 * time.Second 28 | metricsNamespace = "redis_operator" 29 | ) 30 | 31 | // Main is the main runner. 32 | type Main struct { 33 | flags *utils.CMDFlags 34 | logger log.Logger 35 | stopC chan struct{} 36 | } 37 | 38 | // New returns a Main object. 39 | func New(logger log.Logger) Main { 40 | // Init flags. 41 | flgs := &utils.CMDFlags{} 42 | flgs.Init() 43 | 44 | return Main{ 45 | logger: logger, 46 | flags: flgs, 47 | } 48 | } 49 | 50 | // Run execs the program. 51 | func (m *Main) Run() error { 52 | // Create signal channels. 53 | m.stopC = make(chan struct{}) 54 | errC := make(chan error) 55 | 56 | // Set correct logging. 57 | err := m.logger.Set(log.Level(strings.ToLower(m.flags.LogLevel))) 58 | if err != nil { 59 | return err 60 | } 61 | 62 | // Create the metrics client. 63 | metricsRecorder := metrics.NewRecorder(metricsNamespace, prometheus.DefaultRegisterer) 64 | 65 | // Serve metrics. 66 | go func() { 67 | log.Infof("Listening on %s for metrics exposure on URL %s", m.flags.ListenAddr, m.flags.MetricsPath) 68 | http.Handle(m.flags.MetricsPath, promhttp.Handler()) 69 | err := http.ListenAndServe(m.flags.ListenAddr, nil) 70 | if err != nil { 71 | log.Fatal(err) 72 | } 73 | }() 74 | 75 | // Kubernetes clients. 76 | k8sClient, customClient, aeClientset, err := utils.CreateKubernetesClients(m.flags) 77 | if err != nil { 78 | return err 79 | } 80 | 81 | // Create kubernetes service. 82 | k8sservice := k8s.New(k8sClient, customClient, aeClientset, m.logger, metricsRecorder) 83 | 84 | // Create the redis clients 85 | redisClient := redis.New(metricsRecorder) 86 | 87 | // Get lease lock resource namespace 88 | lockNamespace := getNamespace() 89 | 90 | // Create operator and run. 91 | redisfailoverOperator, err := redisfailover.New(m.flags.ToRedisOperatorConfig(), k8sservice, k8sClient, lockNamespace, redisClient, metricsRecorder, m.logger) 92 | if err != nil { 93 | return err 94 | } 95 | 96 | go func() { 97 | errC <- redisfailoverOperator.Run(context.Background()) 98 | }() 99 | 100 | // Await signals. 101 | sigC := m.createSignalCapturer() 102 | var finalErr error 103 | select { 104 | case <-sigC: 105 | m.logger.Infof("Signal captured, exiting...") 106 | case err := <-errC: 107 | m.logger.Errorf("Error received: %s, exiting...", err) 108 | finalErr = err 109 | } 110 | 111 | m.stop(m.stopC) 112 | return finalErr 113 | } 114 | 115 | func (m *Main) createSignalCapturer() <-chan os.Signal { 116 | sigC := make(chan os.Signal, 1) 117 | signal.Notify(sigC, syscall.SIGTERM, syscall.SIGINT) 118 | return sigC 119 | } 120 | 121 | func (m *Main) stop(stopC chan struct{}) { 122 | m.logger.Infof("Stopping everything, waiting %s...", gracePeriod) 123 | 124 | // stop everything and let them time to stop 125 | close(stopC) 126 | time.Sleep(gracePeriod) 127 | } 128 | 129 | func getNamespace() string { 130 | // This way assumes you've set the POD_NAMESPACE environment 131 | // variable using the downward API. This check has to be done first 132 | // for backwards compatibility with the way InClusterConfig was 133 | // originally set up 134 | if ns, ok := os.LookupEnv("POD_NAMESPACE"); ok { 135 | return ns 136 | } 137 | 138 | // Fall back to the namespace associated with the service account 139 | // token, if available 140 | if data, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"); err == nil { 141 | if ns := strings.TrimSpace(string(data)); len(ns) > 0 { 142 | return ns 143 | } 144 | } 145 | 146 | return "default" 147 | } 148 | 149 | // Run app. 150 | func main() { 151 | logger := log.Base() 152 | m := New(logger) 153 | 154 | if err := m.Run(); err != nil { 155 | fmt.Fprintf(os.Stderr, "error executing: %s", err) 156 | os.Exit(1) 157 | } 158 | os.Exit(0) 159 | } 160 | -------------------------------------------------------------------------------- /api/redisfailover/v1/validate_test.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 8 | ) 9 | 10 | func TestValidate(t *testing.T) { 11 | tests := []struct { 12 | name string 13 | rfName string 14 | rfBootstrapNode *BootstrapSettings 15 | rfRedisCustomConfig []string 16 | rfSentinelCustomConfig []string 17 | expectedError string 18 | expectedBootstrapNode *BootstrapSettings 19 | }{ 20 | { 21 | name: "populates default values", 22 | rfName: "test", 23 | }, 24 | { 25 | name: "errors on too long of name", 26 | rfName: "some-super-absurdely-unnecessarily-long-name-that-will-most-definitely-fail", 27 | expectedError: "name length can't be higher than 48", 28 | }, 29 | { 30 | name: "SentinelCustomConfig provided", 31 | rfName: "test", 32 | rfSentinelCustomConfig: []string{"failover-timeout 500"}, 33 | }, 34 | { 35 | name: "BootstrapNode provided without a host", 36 | rfName: "test", 37 | rfBootstrapNode: &BootstrapSettings{}, 38 | expectedError: "BootstrapNode must include a host when provided", 39 | }, 40 | { 41 | name: "SentinelCustomConfig provided", 42 | rfName: "test", 43 | }, 44 | { 45 | name: "Populates default bootstrap port when valid", 46 | rfName: "test", 47 | rfBootstrapNode: &BootstrapSettings{Host: "127.0.0.1"}, 48 | expectedBootstrapNode: &BootstrapSettings{Host: "127.0.0.1", Port: "6379"}, 49 | }, 50 | { 51 | name: "Allows for specifying boostrap port", 52 | rfName: "test", 53 | rfBootstrapNode: &BootstrapSettings{Host: "127.0.0.1", Port: "6380"}, 54 | expectedBootstrapNode: &BootstrapSettings{Host: "127.0.0.1", Port: "6380"}, 55 | }, 56 | { 57 | name: "Appends applied custom config to default initial values", 58 | rfName: "test", 59 | rfRedisCustomConfig: []string{"tcp-keepalive 60"}, 60 | }, 61 | { 62 | name: "Appends applied custom config to default initial values when bootstrapping", 63 | rfName: "test", 64 | rfRedisCustomConfig: []string{"tcp-keepalive 60"}, 65 | rfBootstrapNode: &BootstrapSettings{Host: "127.0.0.1"}, 66 | expectedBootstrapNode: &BootstrapSettings{Host: "127.0.0.1", Port: "6379"}, 67 | }, 68 | } 69 | 70 | for _, test := range tests { 71 | t.Run(test.name, func(t *testing.T) { 72 | assert := assert.New(t) 73 | rf := generateRedisFailover(test.rfName, test.rfBootstrapNode) 74 | rf.Spec.Redis.CustomConfig = test.rfRedisCustomConfig 75 | rf.Spec.Sentinel.CustomConfig = test.rfSentinelCustomConfig 76 | 77 | err := rf.Validate() 78 | 79 | if test.expectedError == "" { 80 | assert.NoError(err) 81 | 82 | expectedRedisCustomConfig := []string{ 83 | "replica-priority 100", 84 | } 85 | 86 | if test.rfBootstrapNode != nil { 87 | expectedRedisCustomConfig = []string{ 88 | "replica-priority 0", 89 | } 90 | } 91 | 92 | expectedRedisCustomConfig = append(expectedRedisCustomConfig, test.rfRedisCustomConfig...) 93 | expectedSentinelCustomConfig := defaultSentinelCustomConfig 94 | if len(test.rfSentinelCustomConfig) > 0 { 95 | expectedSentinelCustomConfig = test.rfSentinelCustomConfig 96 | } 97 | 98 | expectedRF := &RedisFailover{ 99 | ObjectMeta: metav1.ObjectMeta{ 100 | Name: test.rfName, 101 | Namespace: "namespace", 102 | }, 103 | Spec: RedisFailoverSpec{ 104 | Redis: RedisSettings{ 105 | Image: defaultImage, 106 | Replicas: defaultRedisNumber, 107 | Port: defaultRedisPort, 108 | Exporter: Exporter{ 109 | Image: defaultExporterImage, 110 | }, 111 | CustomConfig: expectedRedisCustomConfig, 112 | }, 113 | Sentinel: SentinelSettings{ 114 | Image: defaultImage, 115 | Replicas: defaultSentinelNumber, 116 | CustomConfig: expectedSentinelCustomConfig, 117 | Exporter: Exporter{ 118 | Image: defaultSentinelExporterImage, 119 | }, 120 | }, 121 | BootstrapNode: test.expectedBootstrapNode, 122 | }, 123 | } 124 | assert.Equal(expectedRF, rf) 125 | } else { 126 | if assert.Error(err) { 127 | assert.Contains(test.expectedError, err.Error()) 128 | } 129 | } 130 | }) 131 | } 132 | } 133 | -------------------------------------------------------------------------------- /service/k8s/poddisruptionbudget.go: -------------------------------------------------------------------------------- 1 | package k8s 2 | 3 | import ( 4 | "context" 5 | 6 | policyv1 "k8s.io/api/policy/v1" 7 | "k8s.io/apimachinery/pkg/api/errors" 8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | "k8s.io/client-go/kubernetes" 10 | 11 | "github.com/spotahome/redis-operator/log" 12 | "github.com/spotahome/redis-operator/metrics" 13 | ) 14 | 15 | // PodDisruptionBudget the ServiceAccount service that knows how to interact with k8s to manage them 16 | type PodDisruptionBudget interface { 17 | GetPodDisruptionBudget(namespace string, name string) (*policyv1.PodDisruptionBudget, error) 18 | CreatePodDisruptionBudget(namespace string, podDisruptionBudget *policyv1.PodDisruptionBudget) error 19 | UpdatePodDisruptionBudget(namespace string, podDisruptionBudget *policyv1.PodDisruptionBudget) error 20 | CreateOrUpdatePodDisruptionBudget(namespace string, podDisruptionBudget *policyv1.PodDisruptionBudget) error 21 | DeletePodDisruptionBudget(namespace string, name string) error 22 | } 23 | 24 | // PodDisruptionBudgetService is the podDisruptionBudget service implementation using API calls to kubernetes. 25 | type PodDisruptionBudgetService struct { 26 | kubeClient kubernetes.Interface 27 | logger log.Logger 28 | metricsRecorder metrics.Recorder 29 | } 30 | 31 | // NewPodDisruptionBudgetService returns a new PodDisruptionBudget KubeService. 32 | func NewPodDisruptionBudgetService(kubeClient kubernetes.Interface, logger log.Logger, metricsRecorder metrics.Recorder) *PodDisruptionBudgetService { 33 | logger = logger.With("service", "k8s.podDisruptionBudget") 34 | return &PodDisruptionBudgetService{ 35 | kubeClient: kubeClient, 36 | logger: logger, 37 | metricsRecorder: metricsRecorder, 38 | } 39 | } 40 | 41 | func (p *PodDisruptionBudgetService) GetPodDisruptionBudget(namespace string, name string) (*policyv1.PodDisruptionBudget, error) { 42 | podDisruptionBudget, err := p.kubeClient.PolicyV1().PodDisruptionBudgets(namespace).Get(context.TODO(), name, metav1.GetOptions{}) 43 | recordMetrics(namespace, "PodDisruptionBudget", name, "GET", err, p.metricsRecorder) 44 | if err != nil { 45 | return nil, err 46 | } 47 | return podDisruptionBudget, nil 48 | } 49 | 50 | func (p *PodDisruptionBudgetService) CreatePodDisruptionBudget(namespace string, podDisruptionBudget *policyv1.PodDisruptionBudget) error { 51 | _, err := p.kubeClient.PolicyV1().PodDisruptionBudgets(namespace).Create(context.TODO(), podDisruptionBudget, metav1.CreateOptions{}) 52 | recordMetrics(namespace, "PodDisruptionBudget", podDisruptionBudget.GetName(), "CREATE", err, p.metricsRecorder) 53 | if err != nil { 54 | return err 55 | } 56 | p.logger.WithField("namespace", namespace).WithField("podDisruptionBudget", podDisruptionBudget.Name).Debugf("podDisruptionBudget created") 57 | return nil 58 | } 59 | 60 | func (p *PodDisruptionBudgetService) UpdatePodDisruptionBudget(namespace string, podDisruptionBudget *policyv1.PodDisruptionBudget) error { 61 | _, err := p.kubeClient.PolicyV1().PodDisruptionBudgets(namespace).Update(context.TODO(), podDisruptionBudget, metav1.UpdateOptions{}) 62 | recordMetrics(namespace, "PodDisruptionBudget", podDisruptionBudget.GetName(), "UPDATE", err, p.metricsRecorder) 63 | if err != nil { 64 | return err 65 | } 66 | p.logger.WithField("namespace", namespace).WithField("podDisruptionBudget", podDisruptionBudget.Name).Debugf("podDisruptionBudget updated") 67 | return nil 68 | } 69 | 70 | func (p *PodDisruptionBudgetService) CreateOrUpdatePodDisruptionBudget(namespace string, podDisruptionBudget *policyv1.PodDisruptionBudget) error { 71 | storedPodDisruptionBudget, err := p.GetPodDisruptionBudget(namespace, podDisruptionBudget.Name) 72 | if err != nil { 73 | // If no resource we need to create. 74 | if errors.IsNotFound(err) { 75 | return p.CreatePodDisruptionBudget(namespace, podDisruptionBudget) 76 | } 77 | return err 78 | } 79 | 80 | // Already exists, need to Update. 81 | // Set the correct resource version to ensure we are on the latest version. This way the only valid 82 | // namespace is our spec(https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#concurrency-control-and-consistency), 83 | // we will replace the current namespace state. 84 | podDisruptionBudget.ResourceVersion = storedPodDisruptionBudget.ResourceVersion 85 | return p.UpdatePodDisruptionBudget(namespace, podDisruptionBudget) 86 | } 87 | 88 | func (p *PodDisruptionBudgetService) DeletePodDisruptionBudget(namespace string, name string) error { 89 | err := p.kubeClient.PolicyV1().PodDisruptionBudgets(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{}) 90 | recordMetrics(namespace, "PodDisruptionBudget", name, "DELETE", err, p.metricsRecorder) 91 | return err 92 | } 93 | -------------------------------------------------------------------------------- /client/k8s/clientset/versioned/typed/redisfailover/v1/fake/fake_redisfailover.go: -------------------------------------------------------------------------------- 1 | // Code generated by client-gen. DO NOT EDIT. 2 | 3 | package fake 4 | 5 | import ( 6 | "context" 7 | 8 | v1 "github.com/spotahome/redis-operator/api/redisfailover/v1" 9 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 10 | labels "k8s.io/apimachinery/pkg/labels" 11 | types "k8s.io/apimachinery/pkg/types" 12 | watch "k8s.io/apimachinery/pkg/watch" 13 | testing "k8s.io/client-go/testing" 14 | ) 15 | 16 | // FakeRedisFailovers implements RedisFailoverInterface 17 | type FakeRedisFailovers struct { 18 | Fake *FakeDatabasesV1 19 | ns string 20 | } 21 | 22 | var redisfailoversResource = v1.SchemeGroupVersion.WithResource("redisfailovers") 23 | 24 | var redisfailoversKind = v1.SchemeGroupVersion.WithKind("RedisFailover") 25 | 26 | // Get takes name of the redisFailover, and returns the corresponding redisFailover object, and an error if there is any. 27 | func (c *FakeRedisFailovers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.RedisFailover, err error) { 28 | obj, err := c.Fake. 29 | Invokes(testing.NewGetAction(redisfailoversResource, c.ns, name), &v1.RedisFailover{}) 30 | 31 | if obj == nil { 32 | return nil, err 33 | } 34 | return obj.(*v1.RedisFailover), err 35 | } 36 | 37 | // List takes label and field selectors, and returns the list of RedisFailovers that match those selectors. 38 | func (c *FakeRedisFailovers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.RedisFailoverList, err error) { 39 | obj, err := c.Fake. 40 | Invokes(testing.NewListAction(redisfailoversResource, redisfailoversKind, c.ns, opts), &v1.RedisFailoverList{}) 41 | 42 | if obj == nil { 43 | return nil, err 44 | } 45 | 46 | label, _, _ := testing.ExtractFromListOptions(opts) 47 | if label == nil { 48 | label = labels.Everything() 49 | } 50 | list := &v1.RedisFailoverList{ListMeta: obj.(*v1.RedisFailoverList).ListMeta} 51 | for _, item := range obj.(*v1.RedisFailoverList).Items { 52 | if label.Matches(labels.Set(item.Labels)) { 53 | list.Items = append(list.Items, item) 54 | } 55 | } 56 | return list, err 57 | } 58 | 59 | // Watch returns a watch.Interface that watches the requested redisFailovers. 60 | func (c *FakeRedisFailovers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { 61 | return c.Fake. 62 | InvokesWatch(testing.NewWatchAction(redisfailoversResource, c.ns, opts)) 63 | 64 | } 65 | 66 | // Create takes the representation of a redisFailover and creates it. Returns the server's representation of the redisFailover, and an error, if there is any. 67 | func (c *FakeRedisFailovers) Create(ctx context.Context, redisFailover *v1.RedisFailover, opts metav1.CreateOptions) (result *v1.RedisFailover, err error) { 68 | obj, err := c.Fake. 69 | Invokes(testing.NewCreateAction(redisfailoversResource, c.ns, redisFailover), &v1.RedisFailover{}) 70 | 71 | if obj == nil { 72 | return nil, err 73 | } 74 | return obj.(*v1.RedisFailover), err 75 | } 76 | 77 | // Update takes the representation of a redisFailover and updates it. Returns the server's representation of the redisFailover, and an error, if there is any. 78 | func (c *FakeRedisFailovers) Update(ctx context.Context, redisFailover *v1.RedisFailover, opts metav1.UpdateOptions) (result *v1.RedisFailover, err error) { 79 | obj, err := c.Fake. 80 | Invokes(testing.NewUpdateAction(redisfailoversResource, c.ns, redisFailover), &v1.RedisFailover{}) 81 | 82 | if obj == nil { 83 | return nil, err 84 | } 85 | return obj.(*v1.RedisFailover), err 86 | } 87 | 88 | // Delete takes name of the redisFailover and deletes it. Returns an error if one occurs. 89 | func (c *FakeRedisFailovers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { 90 | _, err := c.Fake. 91 | Invokes(testing.NewDeleteActionWithOptions(redisfailoversResource, c.ns, name, opts), &v1.RedisFailover{}) 92 | 93 | return err 94 | } 95 | 96 | // DeleteCollection deletes a collection of objects. 97 | func (c *FakeRedisFailovers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { 98 | action := testing.NewDeleteCollectionAction(redisfailoversResource, c.ns, listOpts) 99 | 100 | _, err := c.Fake.Invokes(action, &v1.RedisFailoverList{}) 101 | return err 102 | } 103 | 104 | // Patch applies the patch and returns the patched redisFailover. 105 | func (c *FakeRedisFailovers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.RedisFailover, err error) { 106 | obj, err := c.Fake. 107 | Invokes(testing.NewPatchSubresourceAction(redisfailoversResource, c.ns, name, pt, data, subresources...), &v1.RedisFailover{}) 108 | 109 | if obj == nil { 110 | return nil, err 111 | } 112 | return obj.(*v1.RedisFailover), err 113 | } 114 | -------------------------------------------------------------------------------- /service/k8s/rbac_test.go: -------------------------------------------------------------------------------- 1 | package k8s_test 2 | 3 | import ( 4 | "errors" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | rbacv1 "k8s.io/api/rbac/v1" 9 | kubeerrors "k8s.io/apimachinery/pkg/api/errors" 10 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 11 | "k8s.io/apimachinery/pkg/runtime" 12 | "k8s.io/apimachinery/pkg/runtime/schema" 13 | kubernetes "k8s.io/client-go/kubernetes/fake" 14 | kubetesting "k8s.io/client-go/testing" 15 | 16 | "github.com/spotahome/redis-operator/log" 17 | "github.com/spotahome/redis-operator/metrics" 18 | "github.com/spotahome/redis-operator/service/k8s" 19 | ) 20 | 21 | var ( 22 | rbGroup = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "rolebindings"} 23 | ) 24 | 25 | func newRBUpdateAction(ns string, rb *rbacv1.RoleBinding) kubetesting.UpdateActionImpl { 26 | return kubetesting.NewUpdateAction(rbGroup, ns, rb) 27 | } 28 | 29 | func newRBGetAction(ns, name string) kubetesting.GetActionImpl { 30 | return kubetesting.NewGetAction(rbGroup, ns, name) 31 | } 32 | 33 | func newRBCreateAction(ns string, rb *rbacv1.RoleBinding) kubetesting.CreateActionImpl { 34 | return kubetesting.NewCreateAction(rbGroup, ns, rb) 35 | } 36 | func newRBDeleteAction(ns string, name string) kubetesting.DeleteActionImpl { 37 | return kubetesting.NewDeleteAction(rbGroup, ns, name) 38 | } 39 | 40 | func TestRBACServiceGetCreateOrUpdateRoleBinding(t *testing.T) { 41 | testRB := &rbacv1.RoleBinding{ 42 | ObjectMeta: metav1.ObjectMeta{ 43 | Name: "test1", 44 | ResourceVersion: "15", 45 | }, 46 | RoleRef: rbacv1.RoleRef{ 47 | Name: "test1", 48 | }, 49 | } 50 | 51 | testns := "testns" 52 | 53 | tests := []struct { 54 | name string 55 | rb *rbacv1.RoleBinding 56 | getRBResult *rbacv1.RoleBinding 57 | errorOnGet error 58 | errorOnCreation error 59 | expActions []kubetesting.Action 60 | expErr bool 61 | }{ 62 | { 63 | name: "A new role binding should create a new role binding.", 64 | rb: testRB, 65 | getRBResult: nil, 66 | errorOnGet: kubeerrors.NewNotFound(schema.GroupResource{}, ""), 67 | errorOnCreation: nil, 68 | expActions: []kubetesting.Action{ 69 | newRBGetAction(testns, testRB.ObjectMeta.Name), 70 | newRBCreateAction(testns, testRB), 71 | }, 72 | expErr: false, 73 | }, 74 | { 75 | name: "A new role binding should error when create a new role binding fails.", 76 | rb: testRB, 77 | getRBResult: nil, 78 | errorOnGet: kubeerrors.NewNotFound(schema.GroupResource{}, ""), 79 | errorOnCreation: errors.New("wanted error"), 80 | expActions: []kubetesting.Action{ 81 | newRBGetAction(testns, testRB.ObjectMeta.Name), 82 | newRBUpdateAction(testns, testRB), 83 | }, 84 | expErr: true, 85 | }, 86 | { 87 | name: "An existent role binding should update the role binding.", 88 | rb: testRB, 89 | getRBResult: testRB, 90 | errorOnGet: nil, 91 | errorOnCreation: nil, 92 | expActions: []kubetesting.Action{ 93 | newRBGetAction(testns, testRB.ObjectMeta.Name), 94 | newRBUpdateAction(testns, testRB), 95 | }, 96 | expErr: false, 97 | }, 98 | { 99 | name: "An change in role reference inside binding should recreate the role binding.", 100 | rb: testRB, 101 | getRBResult: &rbacv1.RoleBinding{ 102 | ObjectMeta: metav1.ObjectMeta{ 103 | Name: "test1", 104 | ResourceVersion: "15", 105 | }, 106 | RoleRef: rbacv1.RoleRef{ 107 | Name: "oldroleRef", 108 | }, 109 | }, 110 | errorOnGet: nil, 111 | errorOnCreation: nil, 112 | expActions: []kubetesting.Action{ 113 | newRBGetAction(testns, testRB.ObjectMeta.Name), 114 | newRBDeleteAction(testns, testRB.Name), 115 | newRBCreateAction(testns, testRB), 116 | }, 117 | expErr: false, 118 | }, 119 | } 120 | 121 | for _, test := range tests { 122 | t.Run(test.name, func(t *testing.T) { 123 | assert := assert.New(t) 124 | 125 | // Mock. 126 | mcli := &kubernetes.Clientset{} 127 | mcli.AddReactor("get", "rolebindings", func(action kubetesting.Action) (bool, runtime.Object, error) { 128 | return true, test.getRBResult, test.errorOnGet 129 | }) 130 | mcli.AddReactor("create", "rolebindings", func(action kubetesting.Action) (bool, runtime.Object, error) { 131 | return true, nil, test.errorOnCreation 132 | }) 133 | 134 | service := k8s.NewRBACService(mcli, log.Dummy, metrics.Dummy) 135 | err := service.CreateOrUpdateRoleBinding(testns, test.rb) 136 | 137 | if test.expErr { 138 | assert.Error(err) 139 | } else { 140 | assert.NoError(err) 141 | // Check calls to kubernetes. 142 | assert.Equal(test.expActions, mcli.Actions()) 143 | } 144 | }) 145 | } 146 | } 147 | -------------------------------------------------------------------------------- /service/k8s/poddisruptionbudget_test.go: -------------------------------------------------------------------------------- 1 | package k8s_test 2 | 3 | import ( 4 | "errors" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | policyv1 "k8s.io/api/policy/v1" 9 | kubeerrors "k8s.io/apimachinery/pkg/api/errors" 10 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 11 | "k8s.io/apimachinery/pkg/runtime" 12 | "k8s.io/apimachinery/pkg/runtime/schema" 13 | kubernetes "k8s.io/client-go/kubernetes/fake" 14 | kubetesting "k8s.io/client-go/testing" 15 | 16 | "github.com/spotahome/redis-operator/log" 17 | "github.com/spotahome/redis-operator/metrics" 18 | "github.com/spotahome/redis-operator/service/k8s" 19 | ) 20 | 21 | var podDisruptionBudgetsGroup = schema.GroupVersionResource{Group: "policy", Version: "v1", Resource: "poddisruptionbudgets"} 22 | 23 | func newPodDisruptionBudgetUpdateAction(ns string, podDisruptionBudget *policyv1.PodDisruptionBudget) kubetesting.UpdateActionImpl { 24 | return kubetesting.NewUpdateAction(podDisruptionBudgetsGroup, ns, podDisruptionBudget) 25 | } 26 | 27 | func newPodDisruptionBudgetGetAction(ns, name string) kubetesting.GetActionImpl { 28 | return kubetesting.NewGetAction(podDisruptionBudgetsGroup, ns, name) 29 | } 30 | 31 | func newPodDisruptionBudgetCreateAction(ns string, podDisruptionBudget *policyv1.PodDisruptionBudget) kubetesting.CreateActionImpl { 32 | return kubetesting.NewCreateAction(podDisruptionBudgetsGroup, ns, podDisruptionBudget) 33 | } 34 | 35 | func TestPodDisruptionBudgetServiceGetCreateOrUpdate(t *testing.T) { 36 | testPodDisruptionBudget := &policyv1.PodDisruptionBudget{ 37 | ObjectMeta: metav1.ObjectMeta{ 38 | Name: "testpodDisruptionBudget1", 39 | ResourceVersion: "10", 40 | }, 41 | } 42 | 43 | testns := "testns" 44 | 45 | tests := []struct { 46 | name string 47 | podDisruptionBudget *policyv1.PodDisruptionBudget 48 | getPodDisruptionBudgetResult *policyv1.PodDisruptionBudget 49 | errorOnGet error 50 | errorOnCreation error 51 | expActions []kubetesting.Action 52 | expErr bool 53 | }{ 54 | { 55 | name: "A new podDisruptionBudget should create a new podDisruptionBudget.", 56 | podDisruptionBudget: testPodDisruptionBudget, 57 | getPodDisruptionBudgetResult: nil, 58 | errorOnGet: kubeerrors.NewNotFound(schema.GroupResource{}, ""), 59 | errorOnCreation: nil, 60 | expActions: []kubetesting.Action{ 61 | newPodDisruptionBudgetGetAction(testns, testPodDisruptionBudget.ObjectMeta.Name), 62 | newPodDisruptionBudgetCreateAction(testns, testPodDisruptionBudget), 63 | }, 64 | expErr: false, 65 | }, 66 | { 67 | name: "A new podDisruptionBudget should error when create a new podDisruptionBudget fails.", 68 | podDisruptionBudget: testPodDisruptionBudget, 69 | getPodDisruptionBudgetResult: nil, 70 | errorOnGet: kubeerrors.NewNotFound(schema.GroupResource{}, ""), 71 | errorOnCreation: errors.New("wanted error"), 72 | expActions: []kubetesting.Action{ 73 | newPodDisruptionBudgetGetAction(testns, testPodDisruptionBudget.ObjectMeta.Name), 74 | newPodDisruptionBudgetCreateAction(testns, testPodDisruptionBudget), 75 | }, 76 | expErr: true, 77 | }, 78 | { 79 | name: "An existent podDisruptionBudget should update the podDisruptionBudget.", 80 | podDisruptionBudget: testPodDisruptionBudget, 81 | getPodDisruptionBudgetResult: testPodDisruptionBudget, 82 | errorOnGet: nil, 83 | errorOnCreation: nil, 84 | expActions: []kubetesting.Action{ 85 | newPodDisruptionBudgetGetAction(testns, testPodDisruptionBudget.ObjectMeta.Name), 86 | newPodDisruptionBudgetUpdateAction(testns, testPodDisruptionBudget), 87 | }, 88 | expErr: false, 89 | }, 90 | } 91 | 92 | for _, test := range tests { 93 | t.Run(test.name, func(t *testing.T) { 94 | assert := assert.New(t) 95 | 96 | // Mock. 97 | mcli := &kubernetes.Clientset{} 98 | mcli.AddReactor("get", "poddisruptionbudgets", func(action kubetesting.Action) (bool, runtime.Object, error) { 99 | return true, test.getPodDisruptionBudgetResult, test.errorOnGet 100 | }) 101 | mcli.AddReactor("create", "poddisruptionbudgets", func(action kubetesting.Action) (bool, runtime.Object, error) { 102 | return true, nil, test.errorOnCreation 103 | }) 104 | 105 | service := k8s.NewPodDisruptionBudgetService(mcli, log.Dummy, metrics.Dummy) 106 | err := service.CreateOrUpdatePodDisruptionBudget(testns, test.podDisruptionBudget) 107 | 108 | if test.expErr { 109 | assert.Error(err) 110 | } else { 111 | assert.NoError(err) 112 | // Check calls to kubernetes. 113 | assert.Equal(test.expActions, mcli.Actions()) 114 | } 115 | }) 116 | } 117 | } 118 | -------------------------------------------------------------------------------- /service/k8s/service.go: -------------------------------------------------------------------------------- 1 | package k8s 2 | 3 | import ( 4 | "context" 5 | 6 | corev1 "k8s.io/api/core/v1" 7 | "k8s.io/apimachinery/pkg/api/errors" 8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | "k8s.io/client-go/kubernetes" 10 | 11 | "github.com/spotahome/redis-operator/log" 12 | "github.com/spotahome/redis-operator/metrics" 13 | ) 14 | 15 | // Service the ServiceAccount service that knows how to interact with k8s to manage them 16 | type Service interface { 17 | GetService(namespace string, name string) (*corev1.Service, error) 18 | CreateService(namespace string, service *corev1.Service) error 19 | CreateIfNotExistsService(namespace string, service *corev1.Service) error 20 | UpdateService(namespace string, service *corev1.Service) error 21 | CreateOrUpdateService(namespace string, service *corev1.Service) error 22 | DeleteService(namespace string, name string) error 23 | ListServices(namespace string) (*corev1.ServiceList, error) 24 | } 25 | 26 | // ServiceService is the service service implementation using API calls to kubernetes. 27 | type ServiceService struct { 28 | kubeClient kubernetes.Interface 29 | logger log.Logger 30 | metricsRecorder metrics.Recorder 31 | } 32 | 33 | // NewServiceService returns a new Service KubeService. 34 | func NewServiceService(kubeClient kubernetes.Interface, logger log.Logger, metricsRecorder metrics.Recorder) *ServiceService { 35 | logger = logger.With("service", "k8s.service") 36 | return &ServiceService{ 37 | kubeClient: kubeClient, 38 | logger: logger, 39 | metricsRecorder: metricsRecorder, 40 | } 41 | } 42 | 43 | func (s *ServiceService) GetService(namespace string, name string) (*corev1.Service, error) { 44 | service, err := s.kubeClient.CoreV1().Services(namespace).Get(context.TODO(), name, metav1.GetOptions{}) 45 | recordMetrics(namespace, "Service", name, "GET", err, s.metricsRecorder) 46 | if err != nil { 47 | return nil, err 48 | } 49 | return service, err 50 | } 51 | 52 | func (s *ServiceService) CreateService(namespace string, service *corev1.Service) error { 53 | _, err := s.kubeClient.CoreV1().Services(namespace).Create(context.TODO(), service, metav1.CreateOptions{}) 54 | recordMetrics(namespace, "Service", service.GetName(), "CREATE", err, s.metricsRecorder) 55 | if err != nil { 56 | return err 57 | } 58 | s.logger.WithField("namespace", namespace).WithField("serviceName", service.Name).Debugf("service created") 59 | return nil 60 | } 61 | 62 | func (s *ServiceService) CreateIfNotExistsService(namespace string, service *corev1.Service) error { 63 | if _, err := s.GetService(namespace, service.Name); err != nil { 64 | // If no resource we need to create. 65 | if errors.IsNotFound(err) { 66 | return s.CreateService(namespace, service) 67 | } 68 | return err 69 | } 70 | return nil 71 | } 72 | 73 | func (s *ServiceService) UpdateService(namespace string, service *corev1.Service) error { 74 | _, err := s.kubeClient.CoreV1().Services(namespace).Update(context.TODO(), service, metav1.UpdateOptions{}) 75 | recordMetrics(namespace, "Service", service.GetName(), "UPDATE", err, s.metricsRecorder) 76 | if err != nil { 77 | return err 78 | } 79 | s.logger.WithField("namespace", namespace).WithField("serviceName", service.Name).Debugf("service updated") 80 | return nil 81 | } 82 | func (s *ServiceService) CreateOrUpdateService(namespace string, service *corev1.Service) error { 83 | storedService, err := s.GetService(namespace, service.Name) 84 | if err != nil { 85 | // If no resource we need to create. 86 | if errors.IsNotFound(err) { 87 | return s.CreateService(namespace, service) 88 | } 89 | log.Errorf("Error while updating service %v in %v namespace : %v", service.GetName(), namespace, err) 90 | return err 91 | } 92 | 93 | // Already exists, need to Update. 94 | // Set the correct resource version to ensure we are on the latest version. This way the only valid 95 | // namespace is our spec(https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#concurrency-control-and-consistency), 96 | // we will replace the current namespace state. 97 | service.ResourceVersion = storedService.ResourceVersion 98 | return s.UpdateService(namespace, service) 99 | } 100 | 101 | func (s *ServiceService) DeleteService(namespace string, name string) error { 102 | propagation := metav1.DeletePropagationForeground 103 | err := s.kubeClient.CoreV1().Services(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{PropagationPolicy: &propagation}) 104 | recordMetrics(namespace, "Service", name, "DELETE", err, s.metricsRecorder) 105 | return err 106 | } 107 | 108 | func (s *ServiceService) ListServices(namespace string) (*corev1.ServiceList, error) { 109 | serviceList, err := s.kubeClient.CoreV1().Services(namespace).List(context.TODO(), metav1.ListOptions{}) 110 | recordMetrics(namespace, "Service", metrics.NOT_APPLICABLE, "LIST", err, s.metricsRecorder) 111 | return serviceList, err 112 | } 113 | -------------------------------------------------------------------------------- /operator/redisfailover/ensurer_test.go: -------------------------------------------------------------------------------- 1 | package redisfailover_test 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | "github.com/stretchr/testify/mock" 8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | 10 | redisfailoverv1 "github.com/spotahome/redis-operator/api/redisfailover/v1" 11 | "github.com/spotahome/redis-operator/log" 12 | "github.com/spotahome/redis-operator/metrics" 13 | mRFService "github.com/spotahome/redis-operator/mocks/operator/redisfailover/service" 14 | mK8SService "github.com/spotahome/redis-operator/mocks/service/k8s" 15 | rfOperator "github.com/spotahome/redis-operator/operator/redisfailover" 16 | ) 17 | 18 | const ( 19 | name = "test" 20 | namespace = "testns" 21 | ) 22 | 23 | func generateConfig() rfOperator.Config { 24 | return rfOperator.Config{ 25 | ListenAddress: "1234", 26 | MetricsPath: "/awesome", 27 | } 28 | } 29 | 30 | func generateRF(enableExporter bool, bootstrapping bool) *redisfailoverv1.RedisFailover { 31 | return &redisfailoverv1.RedisFailover{ 32 | ObjectMeta: metav1.ObjectMeta{ 33 | Name: name, 34 | Namespace: namespace, 35 | }, 36 | Spec: redisfailoverv1.RedisFailoverSpec{ 37 | Redis: redisfailoverv1.RedisSettings{ 38 | Replicas: int32(3), 39 | Exporter: redisfailoverv1.Exporter{ 40 | Enabled: enableExporter, 41 | }, 42 | }, 43 | Sentinel: redisfailoverv1.SentinelSettings{ 44 | Replicas: int32(3), 45 | }, 46 | BootstrapNode: generateRFBootstrappingNode(bootstrapping), 47 | }, 48 | } 49 | } 50 | 51 | func generateRFBootstrappingNode(bootstrapping bool) *redisfailoverv1.BootstrapSettings { 52 | if bootstrapping { 53 | return &redisfailoverv1.BootstrapSettings{ 54 | Host: "127.0.0.1", 55 | Port: "6379", 56 | } 57 | } 58 | return nil 59 | } 60 | 61 | func TestEnsure(t *testing.T) { 62 | tests := []struct { 63 | name string 64 | exporter bool 65 | bootstrapping bool 66 | bootstrappingAllowSentinels bool 67 | }{ 68 | { 69 | name: "Call everything, use exporter", 70 | exporter: true, 71 | bootstrapping: false, 72 | bootstrappingAllowSentinels: false, 73 | }, 74 | { 75 | name: "Call everything, don't use exporter", 76 | exporter: false, 77 | bootstrapping: false, 78 | bootstrappingAllowSentinels: false, 79 | }, 80 | { 81 | name: "Only ensure Redis when bootstrapping", 82 | exporter: false, 83 | bootstrapping: true, 84 | bootstrappingAllowSentinels: false, 85 | }, 86 | { 87 | name: "call everything when bootstrapping allows sentinels", 88 | exporter: false, 89 | bootstrapping: true, 90 | bootstrappingAllowSentinels: true, 91 | }, 92 | } 93 | 94 | for _, test := range tests { 95 | t.Run(test.name, func(t *testing.T) { 96 | assert := assert.New(t) 97 | 98 | rf := generateRF(test.exporter, test.bootstrapping) 99 | if test.bootstrapping { 100 | rf.Spec.BootstrapNode.AllowSentinels = test.bootstrappingAllowSentinels 101 | } 102 | 103 | config := generateConfig() 104 | mk := &mK8SService.Services{} 105 | mrfc := &mRFService.RedisFailoverCheck{} 106 | mrfh := &mRFService.RedisFailoverHeal{} 107 | mrfs := &mRFService.RedisFailoverClient{} 108 | if test.exporter { 109 | mrfs.On("EnsureRedisService", rf, mock.Anything, mock.Anything).Once().Return(nil) 110 | } else { 111 | mrfs.On("EnsureNotPresentRedisService", rf).Once().Return(nil) 112 | } 113 | 114 | if !test.bootstrapping || test.bootstrappingAllowSentinels { 115 | mrfs.On("EnsureSentinelService", rf, mock.Anything, mock.Anything).Once().Return(nil) 116 | mrfs.On("EnsureSentinelConfigMap", rf, mock.Anything, mock.Anything).Once().Return(nil) 117 | mrfs.On("EnsureSentinelDeployment", rf, mock.Anything, mock.Anything).Once().Return(nil) 118 | } 119 | 120 | mrfs.On("EnsureRedisMasterService", rf, mock.Anything, mock.Anything).Once().Return(nil) 121 | mrfs.On("EnsureRedisSlaveService", rf, mock.Anything, mock.Anything).Once().Return(nil) 122 | mrfs.On("EnsureRedisConfigMap", rf, mock.Anything, mock.Anything).Once().Return(nil) 123 | mrfs.On("EnsureRedisShutdownConfigMap", rf, mock.Anything, mock.Anything).Once().Return(nil) 124 | mrfs.On("EnsureRedisReadinessConfigMap", rf, mock.Anything, mock.Anything).Once().Return(nil) 125 | mrfs.On("EnsureRedisStatefulset", rf, mock.Anything, mock.Anything).Once().Return(nil) 126 | 127 | // Create the Kops client and call the valid logic. 128 | handler := rfOperator.NewRedisFailoverHandler(config, mrfs, mrfc, mrfh, mk, metrics.Dummy, log.Dummy) 129 | err := handler.Ensure(rf, map[string]string{}, []metav1.OwnerReference{}, metrics.Dummy) 130 | 131 | assert.NoError(err) 132 | mrfs.AssertExpectations(t) 133 | }) 134 | } 135 | } 136 | -------------------------------------------------------------------------------- /service/k8s/pod.go: -------------------------------------------------------------------------------- 1 | package k8s 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | 7 | "k8s.io/apimachinery/pkg/types" 8 | 9 | corev1 "k8s.io/api/core/v1" 10 | "k8s.io/apimachinery/pkg/api/errors" 11 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 12 | "k8s.io/client-go/kubernetes" 13 | 14 | "github.com/spotahome/redis-operator/log" 15 | "github.com/spotahome/redis-operator/metrics" 16 | ) 17 | 18 | // Pod the ServiceAccount service that knows how to interact with k8s to manage them 19 | type Pod interface { 20 | GetPod(namespace string, name string) (*corev1.Pod, error) 21 | CreatePod(namespace string, pod *corev1.Pod) error 22 | UpdatePod(namespace string, pod *corev1.Pod) error 23 | CreateOrUpdatePod(namespace string, pod *corev1.Pod) error 24 | DeletePod(namespace string, name string) error 25 | ListPods(namespace string) (*corev1.PodList, error) 26 | UpdatePodLabels(namespace, podName string, labels map[string]string) error 27 | } 28 | 29 | // PodService is the pod service implementation using API calls to kubernetes. 30 | type PodService struct { 31 | kubeClient kubernetes.Interface 32 | logger log.Logger 33 | metricsRecorder metrics.Recorder 34 | } 35 | 36 | // NewPodService returns a new Pod KubeService. 37 | func NewPodService(kubeClient kubernetes.Interface, logger log.Logger, metricsRecorder metrics.Recorder) *PodService { 38 | logger = logger.With("service", "k8s.pod") 39 | return &PodService{ 40 | kubeClient: kubeClient, 41 | logger: logger, 42 | metricsRecorder: metricsRecorder, 43 | } 44 | } 45 | 46 | func (p *PodService) GetPod(namespace string, name string) (*corev1.Pod, error) { 47 | pod, err := p.kubeClient.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{}) 48 | recordMetrics(namespace, "Pod", name, "GET", err, p.metricsRecorder) 49 | if err != nil { 50 | return nil, err 51 | } 52 | return pod, err 53 | } 54 | 55 | func (p *PodService) CreatePod(namespace string, pod *corev1.Pod) error { 56 | _, err := p.kubeClient.CoreV1().Pods(namespace).Create(context.TODO(), pod, metav1.CreateOptions{}) 57 | recordMetrics(namespace, "Pod", pod.GetName(), "CREATE", err, p.metricsRecorder) 58 | if err != nil { 59 | return err 60 | } 61 | p.logger.WithField("namespace", namespace).WithField("pod", pod.Name).Debugf("pod created") 62 | return nil 63 | } 64 | func (p *PodService) UpdatePod(namespace string, pod *corev1.Pod) error { 65 | _, err := p.kubeClient.CoreV1().Pods(namespace).Update(context.TODO(), pod, metav1.UpdateOptions{}) 66 | recordMetrics(namespace, "Pod", pod.GetName(), "UPDATE", err, p.metricsRecorder) 67 | if err != nil { 68 | return err 69 | } 70 | p.logger.WithField("namespace", namespace).WithField("pod", pod.Name).Debugf("pod updated") 71 | return nil 72 | } 73 | func (p *PodService) CreateOrUpdatePod(namespace string, pod *corev1.Pod) error { 74 | storedPod, err := p.GetPod(namespace, pod.Name) 75 | if err != nil { 76 | // If no resource we need to create. 77 | if errors.IsNotFound(err) { 78 | return p.CreatePod(namespace, pod) 79 | } 80 | return err 81 | } 82 | 83 | // Already exists, need to Update. 84 | // Set the correct resource version to ensure we are on the latest version. This way the only valid 85 | // namespace is our spec(https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#concurrency-control-and-consistency), 86 | // we will replace the current namespace state. 87 | pod.ResourceVersion = storedPod.ResourceVersion 88 | return p.UpdatePod(namespace, pod) 89 | } 90 | 91 | func (p *PodService) DeletePod(namespace string, name string) error { 92 | err := p.kubeClient.CoreV1().Pods(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{}) 93 | recordMetrics(namespace, "Pod", name, "DELETE", err, p.metricsRecorder) 94 | return err 95 | } 96 | 97 | func (p *PodService) ListPods(namespace string) (*corev1.PodList, error) { 98 | pods, err := p.kubeClient.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{}) 99 | recordMetrics(namespace, "Pod", metrics.NOT_APPLICABLE, "LIST", err, p.metricsRecorder) 100 | return pods, err 101 | } 102 | 103 | // PatchStringValue specifies a patch operation for a string. 104 | type PatchStringValue struct { 105 | Op string `json:"op"` 106 | Path string `json:"path"` 107 | Value interface{} `json:"value"` 108 | } 109 | 110 | func (p *PodService) UpdatePodLabels(namespace, podName string, labels map[string]string) error { 111 | p.logger.Infof("Update pod label, namespace: %s, pod name: %s, labels: %v", namespace, podName, labels) 112 | 113 | var payloads []interface{} 114 | for labelKey, labelValue := range labels { 115 | payload := PatchStringValue{ 116 | Op: "replace", 117 | Path: "/metadata/labels/" + labelKey, 118 | Value: labelValue, 119 | } 120 | payloads = append(payloads, payload) 121 | } 122 | payloadBytes, _ := json.Marshal(payloads) 123 | 124 | _, err := p.kubeClient.CoreV1().Pods(namespace).Patch(context.TODO(), podName, types.JSONPatchType, payloadBytes, metav1.PatchOptions{}) 125 | recordMetrics(namespace, "Pod", podName, "PATCH", err, p.metricsRecorder) 126 | if err != nil { 127 | p.logger.Errorf("Update pod labels failed, namespace: %s, pod name: %s, error: %v", namespace, podName, err) 128 | } 129 | return err 130 | } 131 | -------------------------------------------------------------------------------- /mocks/operator/redisfailover/service/RedisFailoverHeal.go: -------------------------------------------------------------------------------- 1 | // Code generated by mockery v2.20.0. DO NOT EDIT. 2 | 3 | package mocks 4 | 5 | import ( 6 | mock "github.com/stretchr/testify/mock" 7 | 8 | v1 "github.com/spotahome/redis-operator/api/redisfailover/v1" 9 | ) 10 | 11 | // RedisFailoverHeal is an autogenerated mock type for the RedisFailoverHeal type 12 | type RedisFailoverHeal struct { 13 | mock.Mock 14 | } 15 | 16 | // DeletePod provides a mock function with given fields: podName, rFailover 17 | func (_m *RedisFailoverHeal) DeletePod(podName string, rFailover *v1.RedisFailover) error { 18 | ret := _m.Called(podName, rFailover) 19 | 20 | var r0 error 21 | if rf, ok := ret.Get(0).(func(string, *v1.RedisFailover) error); ok { 22 | r0 = rf(podName, rFailover) 23 | } else { 24 | r0 = ret.Error(0) 25 | } 26 | 27 | return r0 28 | } 29 | 30 | // MakeMaster provides a mock function with given fields: ip, rFailover 31 | func (_m *RedisFailoverHeal) MakeMaster(ip string, rFailover *v1.RedisFailover) error { 32 | ret := _m.Called(ip, rFailover) 33 | 34 | var r0 error 35 | if rf, ok := ret.Get(0).(func(string, *v1.RedisFailover) error); ok { 36 | r0 = rf(ip, rFailover) 37 | } else { 38 | r0 = ret.Error(0) 39 | } 40 | 41 | return r0 42 | } 43 | 44 | // NewSentinelMonitor provides a mock function with given fields: ip, monitor, rFailover 45 | func (_m *RedisFailoverHeal) NewSentinelMonitor(ip string, monitor string, rFailover *v1.RedisFailover) error { 46 | ret := _m.Called(ip, monitor, rFailover) 47 | 48 | var r0 error 49 | if rf, ok := ret.Get(0).(func(string, string, *v1.RedisFailover) error); ok { 50 | r0 = rf(ip, monitor, rFailover) 51 | } else { 52 | r0 = ret.Error(0) 53 | } 54 | 55 | return r0 56 | } 57 | 58 | // NewSentinelMonitorWithPort provides a mock function with given fields: ip, monitor, port, rFailover 59 | func (_m *RedisFailoverHeal) NewSentinelMonitorWithPort(ip string, monitor string, port string, rFailover *v1.RedisFailover) error { 60 | ret := _m.Called(ip, monitor, port, rFailover) 61 | 62 | var r0 error 63 | if rf, ok := ret.Get(0).(func(string, string, string, *v1.RedisFailover) error); ok { 64 | r0 = rf(ip, monitor, port, rFailover) 65 | } else { 66 | r0 = ret.Error(0) 67 | } 68 | 69 | return r0 70 | } 71 | 72 | // RestoreSentinel provides a mock function with given fields: ip 73 | func (_m *RedisFailoverHeal) RestoreSentinel(ip string) error { 74 | ret := _m.Called(ip) 75 | 76 | var r0 error 77 | if rf, ok := ret.Get(0).(func(string) error); ok { 78 | r0 = rf(ip) 79 | } else { 80 | r0 = ret.Error(0) 81 | } 82 | 83 | return r0 84 | } 85 | 86 | // SetExternalMasterOnAll provides a mock function with given fields: masterIP, masterPort, rFailover 87 | func (_m *RedisFailoverHeal) SetExternalMasterOnAll(masterIP string, masterPort string, rFailover *v1.RedisFailover) error { 88 | ret := _m.Called(masterIP, masterPort, rFailover) 89 | 90 | var r0 error 91 | if rf, ok := ret.Get(0).(func(string, string, *v1.RedisFailover) error); ok { 92 | r0 = rf(masterIP, masterPort, rFailover) 93 | } else { 94 | r0 = ret.Error(0) 95 | } 96 | 97 | return r0 98 | } 99 | 100 | // SetMasterOnAll provides a mock function with given fields: masterIP, rFailover 101 | func (_m *RedisFailoverHeal) SetMasterOnAll(masterIP string, rFailover *v1.RedisFailover) error { 102 | ret := _m.Called(masterIP, rFailover) 103 | 104 | var r0 error 105 | if rf, ok := ret.Get(0).(func(string, *v1.RedisFailover) error); ok { 106 | r0 = rf(masterIP, rFailover) 107 | } else { 108 | r0 = ret.Error(0) 109 | } 110 | 111 | return r0 112 | } 113 | 114 | // SetOldestAsMaster provides a mock function with given fields: rFailover 115 | func (_m *RedisFailoverHeal) SetOldestAsMaster(rFailover *v1.RedisFailover) error { 116 | ret := _m.Called(rFailover) 117 | 118 | var r0 error 119 | if rf, ok := ret.Get(0).(func(*v1.RedisFailover) error); ok { 120 | r0 = rf(rFailover) 121 | } else { 122 | r0 = ret.Error(0) 123 | } 124 | 125 | return r0 126 | } 127 | 128 | // SetRedisCustomConfig provides a mock function with given fields: ip, rFailover 129 | func (_m *RedisFailoverHeal) SetRedisCustomConfig(ip string, rFailover *v1.RedisFailover) error { 130 | ret := _m.Called(ip, rFailover) 131 | 132 | var r0 error 133 | if rf, ok := ret.Get(0).(func(string, *v1.RedisFailover) error); ok { 134 | r0 = rf(ip, rFailover) 135 | } else { 136 | r0 = ret.Error(0) 137 | } 138 | 139 | return r0 140 | } 141 | 142 | // SetSentinelCustomConfig provides a mock function with given fields: ip, rFailover 143 | func (_m *RedisFailoverHeal) SetSentinelCustomConfig(ip string, rFailover *v1.RedisFailover) error { 144 | ret := _m.Called(ip, rFailover) 145 | 146 | var r0 error 147 | if rf, ok := ret.Get(0).(func(string, *v1.RedisFailover) error); ok { 148 | r0 = rf(ip, rFailover) 149 | } else { 150 | r0 = ret.Error(0) 151 | } 152 | 153 | return r0 154 | } 155 | 156 | type mockConstructorTestingTNewRedisFailoverHeal interface { 157 | mock.TestingT 158 | Cleanup(func()) 159 | } 160 | 161 | // NewRedisFailoverHeal creates a new instance of RedisFailoverHeal. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. 162 | func NewRedisFailoverHeal(t mockConstructorTestingTNewRedisFailoverHeal) *RedisFailoverHeal { 163 | mock := &RedisFailoverHeal{} 164 | mock.Mock.Test(t) 165 | 166 | t.Cleanup(func() { mock.AssertExpectations(t) }) 167 | 168 | return mock 169 | } 170 | -------------------------------------------------------------------------------- /service/k8s/deployment.go: -------------------------------------------------------------------------------- 1 | package k8s 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "strings" 7 | 8 | appsv1 "k8s.io/api/apps/v1" 9 | corev1 "k8s.io/api/core/v1" 10 | "k8s.io/apimachinery/pkg/api/errors" 11 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 12 | "k8s.io/client-go/kubernetes" 13 | 14 | "github.com/spotahome/redis-operator/log" 15 | "github.com/spotahome/redis-operator/metrics" 16 | ) 17 | 18 | // Deployment the Deployment service that knows how to interact with k8s to manage them 19 | type Deployment interface { 20 | GetDeployment(namespace, name string) (*appsv1.Deployment, error) 21 | GetDeploymentPods(namespace, name string) (*corev1.PodList, error) 22 | CreateDeployment(namespace string, deployment *appsv1.Deployment) error 23 | UpdateDeployment(namespace string, deployment *appsv1.Deployment) error 24 | CreateOrUpdateDeployment(namespace string, deployment *appsv1.Deployment) error 25 | DeleteDeployment(namespace string, name string) error 26 | ListDeployments(namespace string) (*appsv1.DeploymentList, error) 27 | } 28 | 29 | // DeploymentService is the service account service implementation using API calls to kubernetes. 30 | type DeploymentService struct { 31 | kubeClient kubernetes.Interface 32 | logger log.Logger 33 | metricsRecorder metrics.Recorder 34 | } 35 | 36 | // NewDeploymentService returns a new Deployment KubeService. 37 | func NewDeploymentService(kubeClient kubernetes.Interface, logger log.Logger, metricsRecorder metrics.Recorder) *DeploymentService { 38 | logger = logger.With("service", "k8s.deployment") 39 | return &DeploymentService{ 40 | kubeClient: kubeClient, 41 | logger: logger, 42 | metricsRecorder: metricsRecorder, 43 | } 44 | } 45 | 46 | // GetDeployment will retrieve the requested deployment based on namespace and name 47 | func (d *DeploymentService) GetDeployment(namespace, name string) (*appsv1.Deployment, error) { 48 | deployment, err := d.kubeClient.AppsV1().Deployments(namespace).Get(context.TODO(), name, metav1.GetOptions{}) 49 | recordMetrics(namespace, "Deployment", name, "GET", err, d.metricsRecorder) 50 | if err != nil { 51 | return nil, err 52 | } 53 | return deployment, err 54 | } 55 | 56 | // GetDeploymentPods will retrieve the pods managed by a given deployment 57 | func (d *DeploymentService) GetDeploymentPods(namespace, name string) (*corev1.PodList, error) { 58 | deployment, err := d.kubeClient.AppsV1().Deployments(namespace).Get(context.TODO(), name, metav1.GetOptions{}) 59 | recordMetrics(namespace, "Deployment", name, "GET", err, d.metricsRecorder) 60 | if err != nil { 61 | return nil, err 62 | } 63 | labels := []string{} 64 | for k, v := range deployment.Spec.Selector.MatchLabels { 65 | labels = append(labels, fmt.Sprintf("%s=%s", k, v)) 66 | } 67 | selector := strings.Join(labels, ",") 68 | return d.kubeClient.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: selector}) 69 | } 70 | 71 | // CreateDeployment will create the given deployment 72 | func (d *DeploymentService) CreateDeployment(namespace string, deployment *appsv1.Deployment) error { 73 | _, err := d.kubeClient.AppsV1().Deployments(namespace).Create(context.TODO(), deployment, metav1.CreateOptions{}) 74 | recordMetrics(namespace, "Deployment", deployment.GetName(), "CREATE", err, d.metricsRecorder) 75 | if err != nil { 76 | return err 77 | } 78 | d.logger.WithField("namespace", namespace).WithField("deployment", deployment.ObjectMeta.Name).Debugf("deployment created") 79 | return err 80 | } 81 | 82 | // UpdateDeployment will update the given deployment 83 | func (d *DeploymentService) UpdateDeployment(namespace string, deployment *appsv1.Deployment) error { 84 | _, err := d.kubeClient.AppsV1().Deployments(namespace).Update(context.TODO(), deployment, metav1.UpdateOptions{}) 85 | recordMetrics(namespace, "Deployment", deployment.GetName(), "UPDATE", err, d.metricsRecorder) 86 | if err != nil { 87 | return err 88 | } 89 | d.logger.WithField("namespace", namespace).WithField("deployment", deployment.ObjectMeta.Name).Debugf("deployment updated") 90 | return err 91 | } 92 | 93 | // CreateOrUpdateDeployment will update the given deployment or create it if does not exist 94 | func (d *DeploymentService) CreateOrUpdateDeployment(namespace string, deployment *appsv1.Deployment) error { 95 | storedDeployment, err := d.GetDeployment(namespace, deployment.Name) 96 | if err != nil { 97 | // If no resource we need to create. 98 | if errors.IsNotFound(err) { 99 | return d.CreateDeployment(namespace, deployment) 100 | } 101 | return err 102 | } 103 | 104 | // Already exists, need to Update. 105 | // Set the correct resource version to ensure we are on the latest version. This way the only valid 106 | // namespace is our spec(https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#concurrency-control-and-consistency), 107 | // we will replace the current namespace state. 108 | deployment.ResourceVersion = storedDeployment.ResourceVersion 109 | return d.UpdateDeployment(namespace, deployment) 110 | } 111 | 112 | // DeleteDeployment will delete the given deployment 113 | func (d *DeploymentService) DeleteDeployment(namespace, name string) error { 114 | propagation := metav1.DeletePropagationForeground 115 | err := d.kubeClient.AppsV1().Deployments(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{PropagationPolicy: &propagation}) 116 | recordMetrics(namespace, "Deployment", name, "DELETE", err, d.metricsRecorder) 117 | return err 118 | } 119 | 120 | // ListDeployments will give all the deployments on a given namespace 121 | func (d *DeploymentService) ListDeployments(namespace string) (*appsv1.DeploymentList, error) { 122 | deployments, err := d.kubeClient.AppsV1().Deployments(namespace).List(context.TODO(), metav1.ListOptions{}) 123 | recordMetrics(namespace, "Deployment", metrics.NOT_APPLICABLE, "LIST", err, d.metricsRecorder) 124 | return deployments, err 125 | } 126 | -------------------------------------------------------------------------------- /client/k8s/clientset/versioned/typed/redisfailover/v1/redisfailover.go: -------------------------------------------------------------------------------- 1 | // Code generated by client-gen. DO NOT EDIT. 2 | 3 | package v1 4 | 5 | import ( 6 | "context" 7 | "time" 8 | 9 | v1 "github.com/spotahome/redis-operator/api/redisfailover/v1" 10 | scheme "github.com/spotahome/redis-operator/client/k8s/clientset/versioned/scheme" 11 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 12 | types "k8s.io/apimachinery/pkg/types" 13 | watch "k8s.io/apimachinery/pkg/watch" 14 | rest "k8s.io/client-go/rest" 15 | ) 16 | 17 | // RedisFailoversGetter has a method to return a RedisFailoverInterface. 18 | // A group's client should implement this interface. 19 | type RedisFailoversGetter interface { 20 | RedisFailovers(namespace string) RedisFailoverInterface 21 | } 22 | 23 | // RedisFailoverInterface has methods to work with RedisFailover resources. 24 | type RedisFailoverInterface interface { 25 | Create(ctx context.Context, redisFailover *v1.RedisFailover, opts metav1.CreateOptions) (*v1.RedisFailover, error) 26 | Update(ctx context.Context, redisFailover *v1.RedisFailover, opts metav1.UpdateOptions) (*v1.RedisFailover, error) 27 | Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error 28 | DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error 29 | Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.RedisFailover, error) 30 | List(ctx context.Context, opts metav1.ListOptions) (*v1.RedisFailoverList, error) 31 | Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) 32 | Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.RedisFailover, err error) 33 | RedisFailoverExpansion 34 | } 35 | 36 | // redisFailovers implements RedisFailoverInterface 37 | type redisFailovers struct { 38 | client rest.Interface 39 | ns string 40 | } 41 | 42 | // newRedisFailovers returns a RedisFailovers 43 | func newRedisFailovers(c *DatabasesV1Client, namespace string) *redisFailovers { 44 | return &redisFailovers{ 45 | client: c.RESTClient(), 46 | ns: namespace, 47 | } 48 | } 49 | 50 | // Get takes name of the redisFailover, and returns the corresponding redisFailover object, and an error if there is any. 51 | func (c *redisFailovers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.RedisFailover, err error) { 52 | result = &v1.RedisFailover{} 53 | err = c.client.Get(). 54 | Namespace(c.ns). 55 | Resource("redisfailovers"). 56 | Name(name). 57 | VersionedParams(&options, scheme.ParameterCodec). 58 | Do(ctx). 59 | Into(result) 60 | return 61 | } 62 | 63 | // List takes label and field selectors, and returns the list of RedisFailovers that match those selectors. 64 | func (c *redisFailovers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.RedisFailoverList, err error) { 65 | var timeout time.Duration 66 | if opts.TimeoutSeconds != nil { 67 | timeout = time.Duration(*opts.TimeoutSeconds) * time.Second 68 | } 69 | result = &v1.RedisFailoverList{} 70 | err = c.client.Get(). 71 | Namespace(c.ns). 72 | Resource("redisfailovers"). 73 | VersionedParams(&opts, scheme.ParameterCodec). 74 | Timeout(timeout). 75 | Do(ctx). 76 | Into(result) 77 | return 78 | } 79 | 80 | // Watch returns a watch.Interface that watches the requested redisFailovers. 81 | func (c *redisFailovers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { 82 | var timeout time.Duration 83 | if opts.TimeoutSeconds != nil { 84 | timeout = time.Duration(*opts.TimeoutSeconds) * time.Second 85 | } 86 | opts.Watch = true 87 | return c.client.Get(). 88 | Namespace(c.ns). 89 | Resource("redisfailovers"). 90 | VersionedParams(&opts, scheme.ParameterCodec). 91 | Timeout(timeout). 92 | Watch(ctx) 93 | } 94 | 95 | // Create takes the representation of a redisFailover and creates it. Returns the server's representation of the redisFailover, and an error, if there is any. 96 | func (c *redisFailovers) Create(ctx context.Context, redisFailover *v1.RedisFailover, opts metav1.CreateOptions) (result *v1.RedisFailover, err error) { 97 | result = &v1.RedisFailover{} 98 | err = c.client.Post(). 99 | Namespace(c.ns). 100 | Resource("redisfailovers"). 101 | VersionedParams(&opts, scheme.ParameterCodec). 102 | Body(redisFailover). 103 | Do(ctx). 104 | Into(result) 105 | return 106 | } 107 | 108 | // Update takes the representation of a redisFailover and updates it. Returns the server's representation of the redisFailover, and an error, if there is any. 109 | func (c *redisFailovers) Update(ctx context.Context, redisFailover *v1.RedisFailover, opts metav1.UpdateOptions) (result *v1.RedisFailover, err error) { 110 | result = &v1.RedisFailover{} 111 | err = c.client.Put(). 112 | Namespace(c.ns). 113 | Resource("redisfailovers"). 114 | Name(redisFailover.Name). 115 | VersionedParams(&opts, scheme.ParameterCodec). 116 | Body(redisFailover). 117 | Do(ctx). 118 | Into(result) 119 | return 120 | } 121 | 122 | // Delete takes name of the redisFailover and deletes it. Returns an error if one occurs. 123 | func (c *redisFailovers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { 124 | return c.client.Delete(). 125 | Namespace(c.ns). 126 | Resource("redisfailovers"). 127 | Name(name). 128 | Body(&opts). 129 | Do(ctx). 130 | Error() 131 | } 132 | 133 | // DeleteCollection deletes a collection of objects. 134 | func (c *redisFailovers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { 135 | var timeout time.Duration 136 | if listOpts.TimeoutSeconds != nil { 137 | timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second 138 | } 139 | return c.client.Delete(). 140 | Namespace(c.ns). 141 | Resource("redisfailovers"). 142 | VersionedParams(&listOpts, scheme.ParameterCodec). 143 | Timeout(timeout). 144 | Body(&opts). 145 | Do(ctx). 146 | Error() 147 | } 148 | 149 | // Patch applies the patch and returns the patched redisFailover. 150 | func (c *redisFailovers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.RedisFailover, err error) { 151 | result = &v1.RedisFailover{} 152 | err = c.client.Patch(pt). 153 | Namespace(c.ns). 154 | Resource("redisfailovers"). 155 | Name(name). 156 | SubResource(subresources...). 157 | VersionedParams(&opts, scheme.ParameterCodec). 158 | Body(data). 159 | Do(ctx). 160 | Into(result) 161 | return 162 | } 163 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | VERSION := v1.3.0-rc0 2 | 3 | # Name of this service/application 4 | SERVICE_NAME := redis-operator 5 | 6 | # Docker image name for this project 7 | IMAGE_NAME := spotahome/$(SERVICE_NAME) 8 | 9 | # Repository url for this project 10 | REPOSITORY := quay.io/$(IMAGE_NAME) 11 | 12 | # Shell to use for running scripts 13 | SHELL := $(shell which bash) 14 | 15 | # Get docker path or an empty string 16 | DOCKER := $(shell command -v docker) 17 | 18 | # Get the main unix group for the user running make (to be used by docker-compose later) 19 | GID := $(shell id -g) 20 | 21 | # Get the unix user id for the user running make (to be used by docker-compose later) 22 | UID := $(shell id -u) 23 | 24 | # Commit hash from git 25 | COMMIT=$(shell git rev-parse HEAD) 26 | GITTAG_COMMIT := $(shell git rev-list --tags --max-count=1) 27 | GITTAG := $(shell git describe --abbrev=0 --tags ${GITTAG_COMMIT} 2>/dev/null || true) 28 | 29 | # Branch from git 30 | BRANCH=$(shell git rev-parse --abbrev-ref HEAD) 31 | 32 | TAG := $(GITTAG) 33 | ifneq ($(COMMIT), $(GITTAG_COMMIT)) 34 | TAG := $(COMMIT) 35 | endif 36 | 37 | ifneq ($(shell git status --porcelain),) 38 | TAG := $(TAG)-dirty 39 | endif 40 | 41 | 42 | PROJECT_PACKAGE := github.com/spotahome/redis-operator 43 | CODEGEN_IMAGE := ghcr.io/slok/kube-code-generator:v1.27.0 44 | PORT := 9710 45 | 46 | # CMDs 47 | UNIT_TEST_CMD := go test `go list ./... | grep -v /vendor/` -v 48 | GO_GENERATE_CMD := go generate `go list ./... | grep -v /vendor/` 49 | GO_INTEGRATION_TEST_CMD := go test `go list ./... | grep test/integration` -v -tags='integration' 50 | GET_DEPS_CMD := dep ensure 51 | UPDATE_DEPS_CMD := dep ensure 52 | MOCKS_CMD := go generate ./mocks 53 | 54 | # environment dirs 55 | DEV_DIR := docker/development 56 | APP_DIR := docker/app 57 | 58 | # workdir 59 | WORKDIR := /go/src/github.com/spotahome/redis-operator 60 | 61 | # The default action of this Makefile is to build the development docker image 62 | .PHONY: default 63 | default: build 64 | 65 | # Run the development environment in non-daemonized mode (foreground) 66 | .PHONY: docker-build 67 | docker-build: deps-development 68 | docker build \ 69 | --build-arg uid=$(UID) \ 70 | -t $(REPOSITORY)-dev:latest \ 71 | -t $(REPOSITORY)-dev:$(COMMIT) \ 72 | -f $(DEV_DIR)/Dockerfile \ 73 | . 74 | 75 | # Run a shell into the development docker image 76 | .PHONY: shell 77 | shell: docker-build 78 | docker run -ti --rm -v ~/.kube:/.kube:ro -v $(PWD):$(WORKDIR) -u $(UID):$(UID) --name $(SERVICE_NAME) -p $(PORT):$(PORT) $(REPOSITORY)-dev /bin/bash 79 | 80 | # Build redis-failover executable file 81 | .PHONY: build 82 | build: docker-build 83 | docker run -ti --rm -v $(PWD):$(WORKDIR) -u $(UID):$(UID) --name $(SERVICE_NAME) $(REPOSITORY)-dev ./scripts/build.sh 84 | 85 | # Run the development environment in the background 86 | .PHONY: run 87 | run: docker-build 88 | docker run -ti --rm -v ~/.kube:/.kube:ro -v $(PWD):$(WORKDIR) -u $(UID):$(UID) --name $(SERVICE_NAME) -p $(PORT):$(PORT) $(REPOSITORY)-dev ./scripts/run.sh 89 | 90 | # Build the production image based on the public one 91 | .PHONY: image 92 | image: deps-development 93 | docker build \ 94 | -t $(SERVICE_NAME) \ 95 | -t $(REPOSITORY):latest \ 96 | -t $(REPOSITORY):$(COMMIT) \ 97 | -t $(REPOSITORY):$(BRANCH) \ 98 | -f $(APP_DIR)/Dockerfile \ 99 | . 100 | 101 | .PHONY: image-release 102 | image-release: 103 | docker buildx build \ 104 | --platform linux/amd64,linux/arm64,linux/arm/v7 \ 105 | --push \ 106 | --build-arg VERSION=$(TAG) \ 107 | -t $(REPOSITORY):latest \ 108 | -t $(REPOSITORY):$(COMMIT) \ 109 | -t $(REPOSITORY):$(TAG) \ 110 | -f $(APP_DIR)/Dockerfile \ 111 | . 112 | 113 | .PHONY: testing 114 | testing: image 115 | docker push $(REPOSITORY):$(BRANCH) 116 | 117 | .PHONY: tag 118 | tag: 119 | git tag $(VERSION) 120 | 121 | .PHONY: publish 122 | publish: 123 | @COMMIT_VERSION="$$(git rev-list -n 1 $(VERSION))"; \ 124 | docker tag $(REPOSITORY):"$$COMMIT_VERSION" $(REPOSITORY):$(VERSION) 125 | docker push $(REPOSITORY):$(VERSION) 126 | docker push $(REPOSITORY):latest 127 | 128 | .PHONY: release 129 | release: tag image-release 130 | 131 | # Test stuff in dev 132 | .PHONY: unit-test 133 | unit-test: docker-build 134 | docker run -ti --rm -v $(PWD):$(WORKDIR) -u $(UID):$(UID) --name $(SERVICE_NAME) $(REPOSITORY)-dev /bin/sh -c '$(UNIT_TEST_CMD)' 135 | 136 | .PHONY: ci-unit-test 137 | ci-unit-test: 138 | $(UNIT_TEST_CMD) 139 | 140 | .PHONY: ci-integration-test 141 | ci-integration-test: 142 | $(GO_INTEGRATION_TEST_CMD) 143 | 144 | .PHONY: integration-test 145 | integration-test: 146 | ./scripts/integration-tests.sh 147 | 148 | .PHONY: helm-test 149 | helm-test: 150 | ./scripts/helm-tests.sh 151 | 152 | # Run all tests 153 | .PHONY: test 154 | test: ci-unit-test ci-integration-test helm-test 155 | 156 | .PHONY: go-generate 157 | go-generate: docker-build 158 | docker run -ti --rm -v $(PWD):$(WORKDIR) -u $(UID):$(UID) --name $(SERVICE_NAME) $(REPOSITORY)-dev /bin/sh -c '$(GO_GENERATE_CMD)' 159 | 160 | .PHONY: generate 161 | generate: go-generate 162 | 163 | .PHONY: get-deps 164 | get-deps: docker-build 165 | docker run -ti --rm -v $(PWD):$(WORKDIR) -u $(UID):$(UID) --name $(SERVICE_NAME) $(REPOSITORY)-dev /bin/sh -c '$(GET_DEPS_CMD)' 166 | 167 | .PHONY: update-deps 168 | update-deps: docker-build 169 | docker run -ti --rm -v $(PWD):$(WORKDIR) -u $(UID):$(UID) --name $(SERVICE_NAME) $(REPOSITORY)-dev /bin/sh -c '$(UPDATE_DEPS_CMD)' 170 | 171 | .PHONY: mocks 172 | mocks: docker-build 173 | docker run -ti --rm -v $(PWD):$(WORKDIR) -u $(UID):$(UID) --name $(SERVICE_NAME) $(REPOSITORY)-dev /bin/sh -c '$(MOCKS_CMD)' 174 | 175 | .PHONY: deps-development 176 | # Test if the dependencies we need to run this Makefile are installed 177 | deps-development: 178 | ifndef DOCKER 179 | @echo "Docker is not available. Please install docker" 180 | @exit 1 181 | endif 182 | 183 | # Generate kubernetes code for types.. 184 | .PHONY: update-codegen 185 | update-codegen: 186 | @echo ">> Generating code for Kubernetes CRD types..." 187 | docker run --rm -it \ 188 | -v $(PWD):/go/src/$(PROJECT_PACKAGE) \ 189 | -e PROJECT_PACKAGE=$(PROJECT_PACKAGE) \ 190 | -e CLIENT_GENERATOR_OUT=$(PROJECT_PACKAGE)/client/k8s \ 191 | -e APIS_ROOT=$(PROJECT_PACKAGE)/api \ 192 | -e GROUPS_VERSION="redisfailover:v1" \ 193 | -e GENERATION_TARGETS="deepcopy,client" \ 194 | $(CODEGEN_IMAGE) 195 | 196 | generate-crd: 197 | docker run -it --rm \ 198 | -v $(PWD):/go/src/$(PROJECT_PACKAGE) \ 199 | -e GO_PROJECT_ROOT=/go/src/$(PROJECT_PACKAGE) \ 200 | -e CRD_TYPES_PATH=/go/src/$(PROJECT_PACKAGE)/api \ 201 | -e CRD_OUT_PATH=/go/src/$(PROJECT_PACKAGE)/manifests \ 202 | $(CODEGEN_IMAGE) update-crd.sh 203 | cp -f manifests/databases.spotahome.com_redisfailovers.yaml manifests/kustomize/base 204 | --------------------------------------------------------------------------------