├── .codeclimate.yml ├── test ├── configmap_source.yaml ├── secret_source.yaml ├── configmap_reference.yaml └── secret_reference.yaml ├── .github ├── dependabot.yml ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md └── workflows │ ├── release.yml │ └── build.yml ├── Dockerfile ├── SECURITY.md ├── .gitignore ├── deploy ├── helm-chart │ └── kubernetes-replicator │ │ ├── Chart.yaml │ │ ├── .helmignore │ │ ├── templates │ │ ├── verticalpodautoscaler.yaml │ │ ├── rbac.yaml │ │ ├── _helpers.tpl │ │ └── deployment.yaml │ │ └── values.yaml ├── rbac.yaml └── deployment.yaml ├── replicate ├── common │ ├── patch.go │ ├── exclude.go │ ├── consts.go │ ├── common.go │ ├── generic_sync_map.go │ ├── strings.go │ ├── namespaces.go │ └── generic-replicator.go ├── role │ ├── roles.go │ └── roles_test.go ├── serviceaccount │ └── serviceaccounts.go ├── rolebinding │ └── rolebindings.go ├── secret │ ├── secrets.go │ └── secrets_test.go └── configmap │ └── configmaps.go ├── .dockerignore ├── Dockerfile.buildx ├── config.go ├── liveness ├── handle.go └── handle_test.go ├── go.mod ├── main.go ├── .goreleaser.yml ├── README.md ├── LICENSE.txt └── go.sum /.codeclimate.yml: -------------------------------------------------------------------------------- 1 | exclude_patterns: 2 | - "**/*_test.go" -------------------------------------------------------------------------------- /test/configmap_source.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: config-source 5 | data: 6 | foo: bar -------------------------------------------------------------------------------- /test/secret_source.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: source-secret 5 | data: 6 | value: SGFsbG8gV2VsdA== -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: gomod 4 | directory: "/" 5 | schedule: 6 | interval: "daily" 7 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM scratch 2 | LABEL MAINTAINER="Martin Helmich " 3 | COPY kubernetes-replicator /replicator 4 | ENTRYPOINT ["/replicator"] -------------------------------------------------------------------------------- /test/configmap_reference.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: config-target 5 | annotations: 6 | replicator.v1.mittwald.de/replicate-from: default/config-source -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | ## Reporting a Vulnerability 4 | 5 | In case you have discovered a vulnerability, please reach out privately via email to opensource@mittwald.de instead of opening an issue. 6 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Directory-based project format 2 | /.idea 3 | # File-based project format 4 | /*.iws 5 | /*.iml 6 | /kubernetes-replicator 7 | 8 | # Mock clases 9 | **/*_mock.go 10 | 11 | # Target output 12 | /replicator/target 13 | /dist 14 | -------------------------------------------------------------------------------- /deploy/helm-chart/kubernetes-replicator/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: kubernetes-replicator 3 | description: Controller for replicating secrets+configmaps across namespaces 4 | 5 | type: application 6 | 7 | version: 2.12.2 8 | 9 | appVersion: v2.12.2 10 | -------------------------------------------------------------------------------- /test/secret_reference.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: some-namespace 5 | --- 6 | apiVersion: v1 7 | kind: Secret 8 | metadata: 9 | name: target-secret 10 | namespace: some-namespace 11 | annotations: 12 | replicator.v1.mittwald.de/replicate-from: default/source-secret -------------------------------------------------------------------------------- /replicate/common/patch.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | // JSONPatchOperation is a struct that defines PATCH operations on 4 | // a JSON structure. 5 | type JSONPatchOperation struct { 6 | Operation string `json:"op"` 7 | Path string `json:"path"` 8 | Value interface{} `json:"value,omitempty"` 9 | } 10 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | # Created by .ignore support plugin (hsz.mobi) 2 | ### Go template 3 | # Binaries for programs and plugins 4 | *.exe 5 | *.dll 6 | *.so 7 | *.dylib 8 | 9 | # Test binary, build with `go test -c` 10 | *.test 11 | 12 | # Output of the go coverage tool, specifically when used with LiteIDE 13 | *.out 14 | 15 | # Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 16 | .glide/ 17 | 18 | Dockerfile 19 | deploy/ 20 | test/ -------------------------------------------------------------------------------- /Dockerfile.buildx: -------------------------------------------------------------------------------- 1 | FROM --platform=$BUILDPLATFORM golang:1.24 AS deps 2 | ARG TARGETPLATFORM 3 | ARG BUILDPLATFORM 4 | 5 | COPY . /src 6 | WORKDIR /src 7 | RUN go get -v ./... 8 | RUN go vet -v ./... 9 | RUN CGO_ENABLED=0 GO111MODULE=on go build 10 | 11 | FROM --platform=$TARGETPLATFORM scratch 12 | ARG TARGETPLATFORM 13 | LABEL MAINTAINER="Martin Helmich " 14 | 15 | COPY --from=build /src/ /kubernetes-replicator 16 | 17 | CMD ["/kubernetes-replicator"] 18 | -------------------------------------------------------------------------------- /deploy/helm-chart/kubernetes-replicator/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | .vscode/ 23 | -------------------------------------------------------------------------------- /config.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import "time" 4 | 5 | type flags struct { 6 | Kubeconfig string 7 | ResyncPeriodS string 8 | ResyncPeriod time.Duration 9 | StatusAddr string 10 | AllowAll bool 11 | LogLevel string 12 | LogFormat string 13 | ReplicateSecrets bool 14 | ReplicateConfigMaps bool 15 | ReplicateRoles bool 16 | ReplicateRoleBindings bool 17 | ReplicateServiceAccounts bool 18 | SyncByContent bool 19 | ExcludeNamespaces string 20 | } 21 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior. Please provide appropriate Kubernetes manifests for reproducing the behavior. 15 | 16 | **Expected behavior** 17 | A clear and concise description of what you expected to happen. 18 | 19 | **Environment:** 20 | - Kubernetes version: [e.g. 1.18] 21 | - kubernetes-replicator version: [e.g. v0.2.2] 22 | 23 | **Additional context** 24 | Add any other context about the problem here. 25 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: enhancement 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /replicate/common/exclude.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "regexp" 5 | ) 6 | 7 | type NamespaceFilter struct { 8 | ExcludePatterns []string 9 | compiled []*regexp.Regexp 10 | } 11 | 12 | func NewNamespaceFilter(patterns []string) *NamespaceFilter { 13 | var compiled []*regexp.Regexp 14 | for _, pat := range patterns { 15 | if pat == "" { 16 | continue 17 | } 18 | re, err := regexp.Compile(pat) 19 | if err == nil { 20 | compiled = append(compiled, re) 21 | } 22 | } 23 | return &NamespaceFilter{ 24 | ExcludePatterns: patterns, 25 | compiled: compiled, 26 | } 27 | } 28 | 29 | func (f *NamespaceFilter) ShouldExclude(namespace string) bool { 30 | for _, re := range f.compiled { 31 | if re.MatchString(namespace) { 32 | return true 33 | } 34 | } 35 | return false 36 | } 37 | -------------------------------------------------------------------------------- /replicate/common/consts.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | // Annotations that are used to control this Controller's behaviour 4 | const ( 5 | ReplicateFromAnnotation = "replicator.v1.mittwald.de/replicate-from" 6 | ReplicatedAtAnnotation = "replicator.v1.mittwald.de/replicated-at" 7 | ReplicatedFromVersionAnnotation = "replicator.v1.mittwald.de/replicated-from-version" 8 | ReplicatedKeysAnnotation = "replicator.v1.mittwald.de/replicated-keys" 9 | ReplicationAllowed = "replicator.v1.mittwald.de/replication-allowed" 10 | ReplicationAllowedNamespaces = "replicator.v1.mittwald.de/replication-allowed-namespaces" 11 | ReplicateTo = "replicator.v1.mittwald.de/replicate-to" 12 | ReplicateToMatching = "replicator.v1.mittwald.de/replicate-to-matching" 13 | KeepOwnerReferences = "replicator.v1.mittwald.de/keep-owner-references" 14 | StripLabels = "replicator.v1.mittwald.de/strip-labels" 15 | ) 16 | -------------------------------------------------------------------------------- /replicate/common/common.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | v1 "k8s.io/api/core/v1" 5 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 6 | "strings" 7 | ) 8 | 9 | type Replicator interface { 10 | Run() 11 | Synced() bool 12 | NamespaceAdded(ns *v1.Namespace) 13 | } 14 | 15 | func PreviouslyPresentKeys(object *metav1.ObjectMeta) (map[string]struct{}, bool) { 16 | keyList, ok := object.Annotations[ReplicatedKeysAnnotation] 17 | if !ok { 18 | return nil, false 19 | } 20 | 21 | keys := strings.Split(keyList, ",") 22 | out := make(map[string]struct{}) 23 | 24 | for _, k := range keys { 25 | out[k] = struct{}{} 26 | } 27 | 28 | return out, true 29 | } 30 | 31 | func BuildStrictRegex(regex string) string { 32 | reg := strings.TrimSpace(regex) 33 | if !strings.HasPrefix(reg, "^") { 34 | reg = "^" + reg 35 | } 36 | if !strings.HasSuffix(reg, "$") { 37 | reg = reg + "$" 38 | } 39 | return reg 40 | } 41 | 42 | func JSONPatchPathEscape(annotation string) string { 43 | return strings.ReplaceAll(annotation, "/", "~1") 44 | } 45 | -------------------------------------------------------------------------------- /replicate/common/generic_sync_map.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "sync" 5 | ) 6 | 7 | // GenericMap is a generic sync.Map that can store any type of key and value. 8 | type GenericMap[K comparable, V any] struct { 9 | m sync.Map 10 | } 11 | 12 | func (gm *GenericMap[K, V]) Store(key K, value V) { 13 | gm.m.Store(key, value) 14 | } 15 | 16 | func (gm *GenericMap[K, V]) Load(key K) (value V, ok bool) { 17 | rawValue, ok := gm.m.Load(key) 18 | if ok { 19 | value = rawValue.(V) 20 | } 21 | return value, ok 22 | } 23 | 24 | func (gm *GenericMap[K, V]) LoadOrStore(key K, value V) (actual V, loaded bool) { 25 | rawActual, loaded := gm.m.LoadOrStore(key, value) 26 | if loaded { 27 | actual = rawActual.(V) 28 | } else { 29 | actual = value 30 | } 31 | return actual, loaded 32 | } 33 | 34 | func (gm *GenericMap[K, V]) Delete(key K) { 35 | gm.m.Delete(key) 36 | } 37 | 38 | func (gm *GenericMap[K, V]) Range(f func(key K, value V) bool) { 39 | gm.m.Range(func(rawKey, rawValue any) bool { 40 | key := rawKey.(K) 41 | value := rawValue.(V) 42 | return f(key, value) 43 | }) 44 | } 45 | -------------------------------------------------------------------------------- /deploy/rbac.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: replicator-kubernetes-replicator 5 | namespace: kube-system 6 | --- 7 | apiVersion: rbac.authorization.k8s.io/v1 8 | kind: ClusterRole 9 | metadata: 10 | name: replicator-kubernetes-replicator 11 | rules: 12 | - apiGroups: [ "" ] 13 | resources: [ "namespaces" ] 14 | verbs: [ "get", "watch", "list" ] 15 | - apiGroups: [""] # "" indicates the core API group 16 | resources: ["secrets", "configmaps", "serviceaccounts"] 17 | verbs: ["get", "watch", "list", "create", "update", "patch", "delete"] 18 | - apiGroups: ["rbac.authorization.k8s.io"] 19 | resources: ["roles", "rolebindings"] 20 | verbs: ["get", "watch", "list", "create", "update", "patch", "delete"] 21 | --- 22 | apiVersion: rbac.authorization.k8s.io/v1 23 | kind: ClusterRoleBinding 24 | metadata: 25 | name: replicator-kubernetes-replicator 26 | roleRef: 27 | kind: ClusterRole 28 | name: replicator-kubernetes-replicator 29 | apiGroup: rbac.authorization.k8s.io 30 | subjects: 31 | - kind: ServiceAccount 32 | name: replicator-kubernetes-replicator 33 | namespace: kube-system 34 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | 3 | on: 4 | push: 5 | tags: 6 | - '*' 7 | 8 | jobs: 9 | build: 10 | name: Build and release image 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@v2 14 | 15 | - run: docker login -u "${{ secrets.QUAY_IO_USER }}" -p "${{ secrets.QUAY_IO_TOKEN }}" quay.io 16 | 17 | - name: Set up Go 18 | uses: actions/setup-go@v2 19 | with: 20 | go-version: "1.24" 21 | 22 | - name: Run GoReleaser 23 | uses: goreleaser/goreleaser-action@v2 24 | with: 25 | version: "0.181.1" 26 | args: release --rm-dist 27 | env: 28 | GITHUB_TOKEN: ${{ secrets.RELEASE_USER_TOKEN }} 29 | 30 | bump-version: 31 | name: Bump app version in Helm chart 32 | runs-on: ubuntu-latest 33 | steps: 34 | - uses: actions/checkout@v2 35 | 36 | - name: Bump chart version 37 | uses: mittwald/bump-app-version-action@v1 38 | with: 39 | mode: 'publish' 40 | chartYaml: './deploy/helm-chart/kubernetes-replicator/Chart.yaml' 41 | env: 42 | GITHUB_TOKEN: "${{ secrets.RELEASE_USER_TOKEN }}" 43 | HELM_REPO_PASSWORD: "${{ secrets.HELM_REPO_PASSWORD }}" 44 | -------------------------------------------------------------------------------- /deploy/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: replicator-kubernetes-replicator 5 | namespace: kube-system 6 | labels: 7 | app.kubernetes.io/name: kubernetes-replicator 8 | app.kubernetes.io/instance: replicator 9 | spec: 10 | replicas: 1 11 | selector: 12 | matchLabels: 13 | app.kubernetes.io/name: kubernetes-replicator 14 | app.kubernetes.io/instance: replicator 15 | template: 16 | metadata: 17 | labels: 18 | app.kubernetes.io/name: kubernetes-replicator 19 | app.kubernetes.io/instance: replicator 20 | spec: 21 | serviceAccountName: replicator-kubernetes-replicator 22 | securityContext: {} 23 | containers: 24 | - name: kubernetes-replicator 25 | securityContext: {} 26 | image: quay.io/mittwald/kubernetes-replicator:latest 27 | imagePullPolicy: Always 28 | args: [] 29 | ports: 30 | - name: health 31 | containerPort: 9102 32 | protocol: TCP 33 | livenessProbe: 34 | httpGet: 35 | path: /healthz 36 | port: health 37 | readinessProbe: 38 | httpGet: 39 | path: /readyz 40 | port: health 41 | resources: {} 42 | -------------------------------------------------------------------------------- /liveness/handle.go: -------------------------------------------------------------------------------- 1 | package liveness 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "github.com/mittwald/kubernetes-replicator/replicate/common" 7 | "net/http" 8 | ) 9 | 10 | type response struct { 11 | NotReady []string `json:"notReady"` 12 | } 13 | 14 | // Handler implements a HTTP response handler that reports on the current 15 | // liveness status of the controller 16 | type Handler struct { 17 | Replicators []common.Replicator 18 | } 19 | 20 | func (h *Handler) notReadyComponents() []string { 21 | notReady := make([]string, 0) 22 | 23 | for i := range h.Replicators { 24 | synced := h.Replicators[i].Synced() 25 | 26 | if !synced { 27 | notReady = append(notReady, fmt.Sprintf("%T", h.Replicators[i])) 28 | } 29 | } 30 | 31 | return notReady 32 | } 33 | 34 | //noinspection GoUnusedParameter 35 | func (h *Handler) ServeHTTP(res http.ResponseWriter, req *http.Request) { 36 | if req.URL.Path == "/healthz" { 37 | res.WriteHeader(http.StatusOK) 38 | } else { 39 | r := response{ 40 | NotReady: h.notReadyComponents(), 41 | } 42 | 43 | if len(r.NotReady) > 0 { 44 | res.WriteHeader(http.StatusServiceUnavailable) 45 | } else { 46 | res.WriteHeader(http.StatusOK) 47 | } 48 | enc := json.NewEncoder(res) 49 | _ = enc.Encode(&r) 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /liveness/handle_test.go: -------------------------------------------------------------------------------- 1 | package liveness 2 | 3 | import ( 4 | "github.com/mittwald/kubernetes-replicator/replicate/common" 5 | v1 "k8s.io/api/core/v1" 6 | "net/http" 7 | "net/http/httptest" 8 | "testing" 9 | 10 | "github.com/stretchr/testify/assert" 11 | ) 12 | 13 | type MockReplicator struct { 14 | synced bool 15 | } 16 | 17 | func (r *MockReplicator) Run() { 18 | } 19 | 20 | func (r *MockReplicator) Synced() bool { 21 | return r.synced 22 | } 23 | 24 | //noinspection GoUnusedParameter 25 | func (r *MockReplicator) NamespaceAdded(ns *v1.Namespace) { 26 | // Do nothing 27 | } 28 | 29 | func buildReqRes(t *testing.T) (*http.Request, *httptest.ResponseRecorder) { 30 | req, err := http.NewRequest("GET", "/status", nil) 31 | res := httptest.NewRecorder() 32 | 33 | assert.Nil(t, err) 34 | return req, res 35 | } 36 | 37 | func TestReturns200IfAllReplicatorsAreSynced(t *testing.T) { 38 | req, res := buildReqRes(t) 39 | 40 | handler := Handler{ 41 | Replicators: []common.Replicator{ 42 | &MockReplicator{synced: true}, 43 | &MockReplicator{synced: true}, 44 | }, 45 | } 46 | 47 | handler.ServeHTTP(res, req) 48 | 49 | assert.Equal(t, http.StatusOK, res.Code) 50 | } 51 | 52 | func TestReturns503IfOneReplicatorIsNotSynced(t *testing.T) { 53 | req, res := buildReqRes(t) 54 | 55 | handler := Handler{ 56 | Replicators: []common.Replicator{ 57 | &MockReplicator{synced: true}, 58 | &MockReplicator{synced: false}, 59 | }, 60 | } 61 | 62 | handler.ServeHTTP(res, req) 63 | 64 | assert.Equal(t, http.StatusServiceUnavailable, res.Code) 65 | } 66 | -------------------------------------------------------------------------------- /deploy/helm-chart/kubernetes-replicator/templates/verticalpodautoscaler.yaml: -------------------------------------------------------------------------------- 1 | {{- if and (.Capabilities.APIVersions.Has "autoscaling.k8s.io/v1") .Values.verticalPodAutoscaler.enabled }} 2 | apiVersion: autoscaling.k8s.io/v1 3 | kind: VerticalPodAutoscaler 4 | metadata: 5 | name: {{ include "kubernetes-replicator.fullname" . }} 6 | namespace: {{ include "kubernetes-replicator.namespace" . }} 7 | labels: 8 | {{- include "kubernetes-replicator.labels" . | nindent 4 }} 9 | spec: 10 | {{- with .Values.verticalPodAutoscaler.recommenders }} 11 | recommenders: 12 | {{- toYaml . | nindent 4 }} 13 | {{- end }} 14 | resourcePolicy: 15 | containerPolicies: 16 | - containerName: {{ .Chart.Name }} 17 | {{- with .Values.verticalPodAutoscaler.controlledResources }} 18 | controlledResources: 19 | {{- toYaml . | nindent 8 }} 20 | {{- end }} 21 | {{- if .Values.verticalPodAutoscaler.controlledValues }} 22 | controlledValues: {{ .Values.verticalPodAutoscaler.controlledValues }} 23 | {{- end }} 24 | {{- if .Values.verticalPodAutoscaler.maxAllowed }} 25 | maxAllowed: 26 | {{ toYaml .Values.verticalPodAutoscaler.maxAllowed | nindent 8 }} 27 | {{- end }} 28 | {{- if .Values.verticalPodAutoscaler.minAllowed }} 29 | minAllowed: 30 | {{ toYaml .Values.verticalPodAutoscaler.minAllowed | nindent 8 }} 31 | {{- end }} 32 | targetRef: 33 | apiVersion: apps/v1 34 | kind: Deployment 35 | name: {{ include "kubernetes-replicator.fullname" . }} 36 | {{- with .Values.verticalPodAutoscaler.updatePolicy }} 37 | updatePolicy: 38 | {{- toYaml . | nindent 4 }} 39 | {{- end }} 40 | {{- end }} 41 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: Compile & Test 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | pull_request: 8 | jobs: 9 | verify_helm: 10 | name: Verify Helm chart 11 | runs-on: ubuntu-latest 12 | strategy: 13 | matrix: 14 | helm: [ '3.10.3' ] 15 | steps: 16 | - uses: actions/checkout@v2 17 | 18 | - name: Set up Helm 19 | run: | 20 | wget https://get.helm.sh/helm-v${{ matrix.helm }}-linux-amd64.tar.gz -O /tmp/helm.tar.gz 21 | tar xzf /tmp/helm.tar.gz -C /tmp --strip-components=1 22 | chmod +x /tmp/helm 23 | 24 | - name: Test template rendering 25 | run: /tmp/helm template ./deploy/helm-chart/kubernetes-replicator/. 26 | 27 | - name: Lint chart 28 | run: /tmp/helm lint ./deploy/helm-chart/kubernetes-replicator/ 29 | 30 | build: 31 | name: Compile 32 | runs-on: ubuntu-latest 33 | steps: 34 | - uses: actions/checkout@v2 35 | 36 | - name: Set up Go 37 | uses: actions/setup-go@v1 38 | with: 39 | go-version: "1.24" 40 | 41 | - name: Compile 42 | run: go build . 43 | 44 | - name: Go vet 45 | run: go vet ./... 46 | 47 | tests: 48 | name: Run tests suite 49 | runs-on: ubuntu-latest 50 | steps: 51 | - name: Set up Go 52 | uses: actions/setup-go@v1 53 | with: 54 | go-version: "1.24" 55 | 56 | - uses: actions/checkout@v2 57 | 58 | - name: Set up KIND 59 | run: | 60 | wget -O ./kind https://github.com/kubernetes-sigs/kind/releases/download/v0.17.0/kind-linux-amd64 61 | chmod +x ./kind 62 | 63 | - name: Start cluster 64 | run: | 65 | ./kind create cluster 66 | ./kind get kubeconfig > ./kind-kubeconfig 67 | 68 | - name: Run unit tests 69 | run: KUBECONFIG=$PWD/kind-kubeconfig go test ./... 70 | -------------------------------------------------------------------------------- /replicate/common/strings.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "fmt" 5 | "reflect" 6 | "regexp" 7 | "sort" 8 | "strings" 9 | 10 | "github.com/pkg/errors" 11 | log "github.com/sirupsen/logrus" 12 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 13 | "k8s.io/client-go/tools/cache" 14 | ) 15 | 16 | func GetKeysFromBinaryMap(data map[string][]byte) []string { 17 | strings := make([]string, 0) 18 | for k := range data { 19 | strings = append(strings, k) 20 | } 21 | sort.Strings(strings) 22 | 23 | return strings 24 | } 25 | 26 | func GetKeysFromStringMap(data map[string]string) []string { 27 | strings := make([]string, 0) 28 | for k := range data { 29 | strings = append(strings, k) 30 | } 31 | sort.Strings(strings) 32 | 33 | return strings 34 | } 35 | 36 | // MustGetKey creates a key from Kubernetes resource in the format / 37 | func MustGetKey(obj interface{}) string { 38 | if obj == nil { 39 | return "" 40 | } 41 | 42 | o := MustGetObject(obj) 43 | return fmt.Sprintf("%s/%s", o.GetNamespace(), o.GetName()) 44 | 45 | } 46 | 47 | // MustGetObject casts the object into a Kubernetes `metav1.Object` 48 | func MustGetObject(obj interface{}) metav1.Object { 49 | if obj == nil { 50 | return nil 51 | } 52 | 53 | switch o := obj.(type) { 54 | case metav1.ObjectMetaAccessor: 55 | return o.GetObjectMeta() 56 | case metav1.Object: 57 | return o 58 | case cache.DeletedFinalStateUnknown: 59 | return MustGetObject(o.Obj) 60 | } 61 | 62 | panic(errors.Errorf("Unknown type: %v", reflect.TypeOf(obj))) 63 | } 64 | 65 | func StringToPatternList(list string) (result []*regexp.Regexp) { 66 | for _, s := range strings.Split(list, ",") { 67 | s = BuildStrictRegex(s) 68 | r, err := regexp.Compile(s) 69 | if err != nil { 70 | log.WithError(err).Errorf("Invalid regex '%s' in namespace string %s: %v", s, list, err) 71 | } else { 72 | result = append(result, r) 73 | } 74 | } 75 | 76 | return 77 | } 78 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/mittwald/kubernetes-replicator 2 | 3 | go 1.24.9 4 | 5 | require ( 6 | github.com/hashicorp/go-multierror v1.1.1 7 | github.com/pkg/errors v0.9.1 8 | github.com/sirupsen/logrus v1.9.3 9 | github.com/stretchr/testify v1.10.0 10 | k8s.io/api v0.33.3 11 | k8s.io/apimachinery v0.33.3 12 | k8s.io/client-go v0.33.3 13 | ) 14 | 15 | require ( 16 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect 17 | github.com/emicklei/go-restful/v3 v3.12.2 // indirect 18 | github.com/fxamacker/cbor/v2 v2.7.0 // indirect 19 | github.com/go-logr/logr v1.4.2 // indirect 20 | github.com/go-openapi/jsonpointer v0.21.1 // indirect 21 | github.com/go-openapi/jsonreference v0.21.0 // indirect 22 | github.com/go-openapi/swag v0.23.1 // indirect 23 | github.com/gogo/protobuf v1.3.2 // indirect 24 | github.com/google/gnostic-models v0.6.9 // indirect 25 | github.com/google/go-cmp v0.7.0 // indirect 26 | github.com/google/uuid v1.6.0 // indirect 27 | github.com/hashicorp/errwrap v1.1.0 // indirect 28 | github.com/josharian/intern v1.0.0 // indirect 29 | github.com/json-iterator/go v1.1.12 // indirect 30 | github.com/mailru/easyjson v0.9.0 // indirect 31 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 32 | github.com/modern-go/reflect2 v1.0.2 // indirect 33 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 34 | github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect 35 | github.com/spf13/pflag v1.0.6 // indirect 36 | github.com/x448/float16 v0.8.4 // indirect 37 | golang.org/x/net v0.38.0 // indirect 38 | golang.org/x/oauth2 v0.28.0 // indirect 39 | golang.org/x/sys v0.31.0 // indirect 40 | golang.org/x/term v0.30.0 // indirect 41 | golang.org/x/text v0.23.0 // indirect 42 | golang.org/x/time v0.11.0 // indirect 43 | google.golang.org/protobuf v1.36.5 // indirect 44 | gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect 45 | gopkg.in/inf.v0 v0.9.1 // indirect 46 | gopkg.in/yaml.v3 v3.0.1 // indirect 47 | k8s.io/klog/v2 v2.130.1 // indirect 48 | k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect 49 | k8s.io/utils v0.0.0-20241210054802-24370beab758 // indirect 50 | sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect 51 | sigs.k8s.io/randfill v1.0.0 // indirect 52 | sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect 53 | sigs.k8s.io/yaml v1.4.0 // indirect 54 | ) 55 | -------------------------------------------------------------------------------- /deploy/helm-chart/kubernetes-replicator/templates/rbac.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.serviceAccount.create -}} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: {{ include "kubernetes-replicator.serviceAccountName" . }} 6 | namespace: {{ include "kubernetes-replicator.namespace" . }} 7 | labels: 8 | {{- include "kubernetes-replicator.labels" . | nindent 4 }} 9 | {{- with .Values.serviceAccount.annotations }} 10 | annotations: 11 | {{- toYaml . | nindent 4 }} 12 | {{- end }} 13 | automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} 14 | --- 15 | kind: ClusterRole 16 | apiVersion: rbac.authorization.k8s.io/v1 17 | metadata: 18 | name: {{ include "kubernetes-replicator.fullname" . }} 19 | labels: 20 | {{- include "kubernetes-replicator.labels" . | nindent 4 }} 21 | rules: 22 | - apiGroups: 23 | - "" 24 | resources: 25 | - namespaces 26 | verbs: {{ .Values.namespacesPrivileges | toYaml | nindent 4 }} 27 | {{ with .Values.replicationEnabled }} 28 | {{- if or .secrets .configMaps .serviceAccounts }} 29 | - apiGroups: 30 | - "" 31 | resources: 32 | {{- if .secrets }} 33 | - secrets 34 | {{- end }} 35 | {{- if .configMaps }} 36 | - configmaps 37 | {{- end }} 38 | {{- if .serviceAccounts }} 39 | - serviceaccounts 40 | {{- end }} 41 | verbs: {{ .privileges | toYaml | nindent 4 }} 42 | {{- end }} 43 | {{- if or .roles .roleBindings }} 44 | - apiGroups: 45 | - rbac.authorization.k8s.io 46 | resources: 47 | {{- if .roles }} 48 | - roles 49 | {{- end }} 50 | {{- if .roleBindings }} 51 | - rolebindings 52 | {{- end }} 53 | verbs: {{ .rolesPrivileges | toYaml | nindent 4 }} 54 | {{- end }} 55 | {{- end }} 56 | {{- range .Values.serviceAccount.privileges }} 57 | - apiGroups: {{ .apiGroups | toYaml | nindent 4 }} 58 | resources: {{ .resources | toYaml | nindent 4 }} 59 | verbs: {{ .privileges | toYaml | nindent 4 }} 60 | {{- end }} 61 | --- 62 | kind: ClusterRoleBinding 63 | apiVersion: rbac.authorization.k8s.io/v1 64 | metadata: 65 | name: {{ include "kubernetes-replicator.fullname" . }} 66 | labels: 67 | {{- include "kubernetes-replicator.labels" . | nindent 4 }} 68 | roleRef: 69 | kind: ClusterRole 70 | name: {{ include "kubernetes-replicator.roleName" . }} 71 | apiGroup: rbac.authorization.k8s.io 72 | subjects: 73 | - kind: ServiceAccount 74 | name: {{ include "kubernetes-replicator.serviceAccountName" . }} 75 | namespace: {{ include "kubernetes-replicator.namespace" . }} 76 | {{- end -}} 77 | -------------------------------------------------------------------------------- /deploy/helm-chart/kubernetes-replicator/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "kubernetes-replicator.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | If release name contains chart name it will be used as a full name. 13 | */}} 14 | {{- define "kubernetes-replicator.fullname" -}} 15 | {{- if .Values.fullnameOverride -}} 16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} 17 | {{- else -}} 18 | {{- $name := default .Chart.Name .Values.nameOverride -}} 19 | {{- if contains $name .Release.Name -}} 20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}} 21 | {{- else -}} 22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 23 | {{- end -}} 24 | {{- end -}} 25 | {{- end -}} 26 | 27 | {{/* 28 | Create chart name and version as used by the chart label. 29 | */}} 30 | {{- define "kubernetes-replicator.chart" -}} 31 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} 32 | {{- end -}} 33 | 34 | {{/* 35 | Create namespace name using the value of the release object or custom override. 36 | */}} 37 | {{- define "kubernetes-replicator.namespace" -}} 38 | {{- default .Release.Namespace .Values.namespaceOverride -}} 39 | {{- end -}} 40 | 41 | {{/* 42 | Common labels 43 | */}} 44 | {{- define "kubernetes-replicator.labels" -}} 45 | helm.sh/chart: {{ include "kubernetes-replicator.chart" . }} 46 | {{ include "kubernetes-replicator.selectorLabels" . }} 47 | {{- if .Chart.AppVersion }} 48 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} 49 | {{- end }} 50 | app.kubernetes.io/managed-by: {{ .Release.Service }} 51 | {{- end -}} 52 | 53 | {{/* 54 | Selector labels 55 | */}} 56 | {{- define "kubernetes-replicator.selectorLabels" -}} 57 | app.kubernetes.io/name: {{ include "kubernetes-replicator.name" . }} 58 | app.kubernetes.io/instance: {{ .Release.Name }} 59 | {{- end -}} 60 | 61 | {{/* 62 | Create the name of the service account to use 63 | */}} 64 | {{- define "kubernetes-replicator.serviceAccountName" -}} 65 | {{- if .Values.serviceAccount.create -}} 66 | {{ default (include "kubernetes-replicator.fullname" .) .Values.serviceAccount.name }} 67 | {{- else -}} 68 | {{ default "default" .Values.serviceAccount.name }} 69 | {{- end -}} 70 | {{- end -}} 71 | {{- define "kubernetes-replicator.roleName" -}} 72 | {{- if .Values.grantClusterAdmin -}} 73 | {{ "cluster-admin" }} 74 | {{- else -}} 75 | {{ (include "kubernetes-replicator.fullname" .) }} 76 | {{- end -}} 77 | {{- end -}} 78 | -------------------------------------------------------------------------------- /replicate/common/namespaces.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "context" 5 | "sync" 6 | "time" 7 | 8 | log "github.com/sirupsen/logrus" 9 | v1 "k8s.io/api/core/v1" 10 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 11 | "k8s.io/apimachinery/pkg/runtime" 12 | "k8s.io/apimachinery/pkg/util/wait" 13 | "k8s.io/apimachinery/pkg/watch" 14 | "k8s.io/client-go/kubernetes" 15 | "k8s.io/client-go/tools/cache" 16 | ) 17 | 18 | var namespaceWatcher NamespaceWatcher 19 | 20 | type AddFunc func(obj *v1.Namespace) 21 | 22 | type UpdateFunc func(old *v1.Namespace, new *v1.Namespace) 23 | 24 | type NamespaceWatcher struct { 25 | doOnce sync.Once 26 | 27 | NamespaceStore cache.Store 28 | NamespaceController cache.Controller 29 | 30 | AddFuncs []AddFunc 31 | UpdateFuncs []UpdateFunc 32 | } 33 | 34 | // create will create a new namespace if one does not already exist. If it does, it will do nothing. 35 | func (nw *NamespaceWatcher) create(client kubernetes.Interface, resyncPeriod time.Duration) { 36 | nw.doOnce.Do(func() { 37 | namespaceAdded := func(obj interface{}) { 38 | namespace := obj.(*v1.Namespace) 39 | for _, addFunc := range nw.AddFuncs { 40 | go addFunc(namespace) 41 | } 42 | } 43 | 44 | namespaceUpdated := func(old interface{}, new interface{}) { 45 | nsOld := old.(*v1.Namespace) 46 | nsNew := new.(*v1.Namespace) 47 | for _, updateFunc := range nw.UpdateFuncs { 48 | go updateFunc(nsOld, nsNew) 49 | } 50 | } 51 | 52 | nw.NamespaceStore, nw.NamespaceController = cache.NewInformer( 53 | &cache.ListWatch{ 54 | ListFunc: func(lo metav1.ListOptions) (runtime.Object, error) { 55 | return client.CoreV1().Namespaces().List(context.TODO(), lo) 56 | }, 57 | WatchFunc: func(lo metav1.ListOptions) (watch.Interface, error) { 58 | return client.CoreV1().Namespaces().Watch(context.TODO(), lo) 59 | }, 60 | }, 61 | &v1.Namespace{}, 62 | resyncPeriod, 63 | cache.ResourceEventHandlerFuncs{ 64 | AddFunc: namespaceAdded, 65 | UpdateFunc: namespaceUpdated, 66 | }, 67 | ) 68 | 69 | log.WithField("kind", "Namespace").Infof("running Namespace controller") 70 | go nw.NamespaceController.Run(wait.NeverStop) 71 | 72 | }) 73 | } 74 | 75 | // OnNamespaceAdded will add another method to a list of functions to be called when a new namespace is created 76 | func (nw *NamespaceWatcher) OnNamespaceAdded(client kubernetes.Interface, resyncPeriod time.Duration, addFunc AddFunc) { 77 | nw.create(client, resyncPeriod) 78 | nw.AddFuncs = append(nw.AddFuncs, addFunc) 79 | } 80 | 81 | // OnNamespaceUpdated will add another method to a list of functions to be called when a namespace is updated 82 | func (nw *NamespaceWatcher) OnNamespaceUpdated(client kubernetes.Interface, resyncPeriod time.Duration, updateFunc UpdateFunc) { 83 | nw.create(client, resyncPeriod) 84 | nw.UpdateFuncs = append(nw.UpdateFuncs, updateFunc) 85 | } 86 | -------------------------------------------------------------------------------- /deploy/helm-chart/kubernetes-replicator/values.yaml: -------------------------------------------------------------------------------- 1 | image: 2 | repository: quay.io/mittwald/kubernetes-replicator 3 | #tag: stable # if no tag is given, the chart's appVersion is used 4 | pullPolicy: Always 5 | imagePullSecrets: [] 6 | nameOverride: "" 7 | fullnameOverride: "" 8 | namespaceOverride: "" 9 | grantClusterAdmin: false 10 | automountServiceAccountToken: true 11 | # args: 12 | # - -resync-period=30m 13 | # - -allow-all=false 14 | namespacesPrivileges: 15 | - get 16 | - watch 17 | - list 18 | replicationEnabled: 19 | secrets: true 20 | configMaps: true 21 | roles: true 22 | roleBindings: true 23 | serviceAccounts: true 24 | privileges: 25 | - get 26 | - watch 27 | - list 28 | - create 29 | - update 30 | - patch 31 | - delete 32 | rolesPrivileges: 33 | - get 34 | - watch 35 | - list 36 | - create 37 | - update 38 | - patch 39 | - delete 40 | 41 | ## Deployment strategy / DaemonSet updateStrategy 42 | ## 43 | updateStrategy: {} 44 | # type: RollingUpdate 45 | # rollingUpdate: 46 | # maxUnavailable: 1 47 | 48 | serviceAccount: 49 | create: true 50 | annotations: {} 51 | name: 52 | privileges: [] 53 | automountServiceAccountToken: true 54 | # - apiGroups: [""] 55 | # resources: ["configmaps"] 56 | podSecurityContext: {} 57 | # fsGroup: 2000 58 | 59 | securityContext: {} 60 | # capabilities: 61 | # drop: 62 | # - ALL 63 | # readOnlyRootFilesystem: true 64 | # runAsNonRoot: true 65 | # runAsUser: 1000 66 | 67 | priorityClassName: "" 68 | 69 | resources: {} 70 | # limits: 71 | # cpu: 100m 72 | # memory: 128Mi 73 | # requests: 74 | # cpu: 100m 75 | # memory: 128Mi 76 | 77 | # The number of old history to retain to allow rollback. 78 | revisionHistoryLimit: 10 79 | 80 | nodeSelector: {} 81 | 82 | tolerations: [] 83 | 84 | affinity: {} 85 | 86 | # Deployment annotations 87 | annotations: {} 88 | 89 | # Deployment labels 90 | labels: {} 91 | 92 | # Pod annotations 93 | podAnnotations: {} 94 | 95 | # Pod labels 96 | podLabels: {} 97 | 98 | livenessProbe: 99 | initialDelaySeconds: 60 100 | periodSeconds: 10 101 | timeoutSeconds: 1 102 | failureThreshold: 3 103 | successThreshold: 1 104 | 105 | readinessProbe: 106 | initialDelaySeconds: 60 107 | periodSeconds: 10 108 | timeoutSeconds: 1 109 | failureThreshold: 3 110 | successThreshold: 1 111 | 112 | # Enable vertical pod autoscaler 113 | verticalPodAutoscaler: 114 | enabled: false 115 | 116 | # Recommender responsible for generating recommendation for the object. 117 | # List should be empty (then the default recommender will generate the recommendation) 118 | # or contain exactly one recommender. 119 | # recommenders: 120 | # - name: custom-recommender-performance 121 | 122 | # List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory 123 | controlledResources: [] 124 | # Specifies which resource values should be controlled: RequestsOnly or RequestsAndLimits. 125 | # controlledValues: RequestsAndLimits 126 | 127 | # Define the max allowed resources for the pod 128 | maxAllowed: {} 129 | # cpu: 100m 130 | # memory: 128Mi 131 | # Define the min allowed resources for the pod 132 | minAllowed: {} 133 | # cpu: 100m 134 | # memory: 128Mi 135 | 136 | updatePolicy: 137 | # Specifies minimal number of replicas which need to be alive for VPA Updater to attempt pod eviction 138 | # minReplicas: 1 139 | # Specifies whether recommended updates are applied when a Pod is started and whether recommended updates 140 | # are applied during the life of a Pod. Possible values are "Off", "Initial", "Recreate", and "Auto". 141 | updateMode: Auto 142 | -------------------------------------------------------------------------------- /deploy/helm-chart/kubernetes-replicator/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: {{ include "kubernetes-replicator.fullname" . }} 5 | namespace: {{ include "kubernetes-replicator.namespace" . }} 6 | labels: 7 | {{- include "kubernetes-replicator.labels" . | nindent 4 }} 8 | {{- if .Values.labels }} 9 | {{- toYaml .Values.labels | nindent 4 }} 10 | {{- end }} 11 | {{- with .Values.annotations }} 12 | annotations: 13 | {{- toYaml . | nindent 4 }} 14 | {{- end }} 15 | spec: 16 | replicas: 1 17 | revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} 18 | {{- with .Values.updateStrategy }} 19 | strategy: 20 | {{- toYaml . | nindent 4 }} 21 | {{- end }} 22 | selector: 23 | matchLabels: 24 | {{- include "kubernetes-replicator.selectorLabels" . | nindent 6 }} 25 | template: 26 | metadata: 27 | {{- with .Values.podAnnotations }} 28 | annotations: 29 | {{- toYaml . | nindent 8 }} 30 | {{- end }} 31 | labels: 32 | {{- include "kubernetes-replicator.selectorLabels" . | nindent 8 }} 33 | {{- with .Values.podLabels }} 34 | {{- toYaml . | nindent 8 }} 35 | {{- end }} 36 | spec: 37 | {{- with .Values.imagePullSecrets }} 38 | imagePullSecrets: 39 | {{- toYaml . | nindent 8 }} 40 | {{- end }} 41 | serviceAccountName: {{ include "kubernetes-replicator.serviceAccountName" . }} 42 | automountServiceAccountToken: {{ .Values.automountServiceAccountToken }} 43 | securityContext: 44 | {{- toYaml .Values.podSecurityContext | nindent 8 }} 45 | {{- if .Values.priorityClassName }} 46 | priorityClassName: {{ .Values.priorityClassName | quote }} 47 | {{- end }} 48 | containers: 49 | - name: {{ .Chart.Name }} 50 | securityContext: 51 | {{- toYaml .Values.securityContext | nindent 12 }} 52 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" 53 | imagePullPolicy: {{ .Values.image.pullPolicy }} 54 | args: 55 | - -replicate-secrets={{ .Values.replicationEnabled.secrets }} 56 | - -replicate-configmaps={{ .Values.replicationEnabled.configMaps }} 57 | - -replicate-roles={{ .Values.replicationEnabled.roles }} 58 | - -replicate-role-bindings={{ .Values.replicationEnabled.roleBindings }} 59 | - -replicate-service-accounts={{ .Values.replicationEnabled.serviceAccounts }} 60 | {{- with .Values.args }} 61 | {{- toYaml . | nindent 12 }} 62 | {{- end }} 63 | ports: 64 | - name: health 65 | containerPort: 9102 66 | protocol: TCP 67 | livenessProbe: 68 | httpGet: 69 | path: /healthz 70 | port: health 71 | initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} 72 | periodSeconds: {{ .Values.livenessProbe.periodSeconds }} 73 | timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} 74 | successThreshold: {{ .Values.livenessProbe.successThreshold }} 75 | failureThreshold: {{ .Values.livenessProbe.failureThreshold }} 76 | readinessProbe: 77 | httpGet: 78 | path: /readyz 79 | port: health 80 | initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} 81 | periodSeconds: {{ .Values.readinessProbe.periodSeconds }} 82 | timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} 83 | successThreshold: {{ .Values.readinessProbe.successThreshold }} 84 | failureThreshold: {{ .Values.readinessProbe.failureThreshold }} 85 | resources: 86 | {{- toYaml .Values.resources | nindent 12 }} 87 | {{- with .Values.nodeSelector }} 88 | nodeSelector: 89 | {{- toYaml . | nindent 8 }} 90 | {{- end }} 91 | {{- with .Values.affinity }} 92 | affinity: 93 | {{- toYaml . | nindent 8 }} 94 | {{- end }} 95 | {{- with .Values.tolerations }} 96 | tolerations: 97 | {{- toYaml . | nindent 8 }} 98 | {{- end }} 99 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "net/http" 6 | "strings" 7 | "time" 8 | 9 | "github.com/mittwald/kubernetes-replicator/replicate/common" 10 | "github.com/mittwald/kubernetes-replicator/replicate/configmap" 11 | "github.com/mittwald/kubernetes-replicator/replicate/role" 12 | "github.com/mittwald/kubernetes-replicator/replicate/rolebinding" 13 | "github.com/mittwald/kubernetes-replicator/replicate/secret" 14 | "github.com/mittwald/kubernetes-replicator/replicate/serviceaccount" 15 | 16 | log "github.com/sirupsen/logrus" 17 | 18 | "github.com/mittwald/kubernetes-replicator/liveness" 19 | "k8s.io/client-go/kubernetes" 20 | "k8s.io/client-go/rest" 21 | "k8s.io/client-go/tools/clientcmd" 22 | ) 23 | 24 | var f flags 25 | 26 | func init() { 27 | var err error 28 | flag.StringVar(&f.Kubeconfig, "kubeconfig", "", "path to Kubernetes config file") 29 | flag.StringVar(&f.ResyncPeriodS, "resync-period", "30m", "resynchronization period") 30 | flag.StringVar(&f.StatusAddr, "status-addr", ":9102", "listen address for status and monitoring server") 31 | flag.StringVar(&f.LogLevel, "log-level", "info", "Log level (trace, debug, info, warn, error)") 32 | flag.StringVar(&f.LogFormat, "log-format", "plain", "Log format (plain, json)") 33 | flag.BoolVar(&f.AllowAll, "allow-all", false, "allow replication of all secrets (CAUTION: only use when you know what you're doing)") 34 | flag.BoolVar(&f.ReplicateSecrets, "replicate-secrets", true, "Enable replication of secrets") 35 | flag.BoolVar(&f.ReplicateConfigMaps, "replicate-configmaps", true, "Enable replication of config maps") 36 | flag.BoolVar(&f.ReplicateRoles, "replicate-roles", true, "Enable replication of roles") 37 | flag.BoolVar(&f.ReplicateRoleBindings, "replicate-role-bindings", true, "Enable replication of role bindings") 38 | flag.BoolVar(&f.ReplicateServiceAccounts, "replicate-service-accounts", true, "Enable replication of service accounts") 39 | flag.BoolVar(&f.SyncByContent, "sync-by-content", false, "Always compare the contents of source and target resources and force them to be the same") 40 | flag.StringVar(&f.ExcludeNamespaces, "exclude-namespaces", "", "Comma-separated list of regex patterns for namespaces to exclude from replication") 41 | flag.Parse() 42 | 43 | switch strings.ToUpper(strings.TrimSpace(f.LogLevel)) { 44 | case "TRACE": 45 | log.SetLevel(log.TraceLevel) 46 | case "DEBUG": 47 | log.SetLevel(log.DebugLevel) 48 | case "WARN", "WARNING": 49 | log.SetLevel(log.WarnLevel) 50 | case "ERROR": 51 | log.SetLevel(log.ErrorLevel) 52 | case "FATAL": 53 | log.SetLevel(log.FatalLevel) 54 | case "PANIC": 55 | log.SetLevel(log.PanicLevel) 56 | default: 57 | log.SetLevel(log.InfoLevel) 58 | } 59 | if strings.ToUpper(strings.TrimSpace(f.LogFormat)) == "JSON" { 60 | log.SetFormatter(&log.JSONFormatter{}) 61 | } 62 | 63 | f.ResyncPeriod, err = time.ParseDuration(f.ResyncPeriodS) 64 | if err != nil { 65 | panic(err) 66 | } 67 | 68 | log.Debugf("using flag values %#v", f) 69 | } 70 | 71 | func main() { 72 | 73 | var config *rest.Config 74 | var err error 75 | var client kubernetes.Interface 76 | var enabledReplicators []common.Replicator 77 | 78 | if f.Kubeconfig == "" { 79 | log.Info("using in-cluster configuration") 80 | config, err = rest.InClusterConfig() 81 | } else { 82 | log.Infof("using configuration from '%s'", f.Kubeconfig) 83 | config, err = clientcmd.BuildConfigFromFlags("", f.Kubeconfig) 84 | } 85 | 86 | if err != nil { 87 | panic(err) 88 | } 89 | 90 | client = kubernetes.NewForConfigOrDie(config) 91 | 92 | excludePatterns := strings.Split(f.ExcludeNamespaces, ",") 93 | filter := common.NewNamespaceFilter(excludePatterns) 94 | 95 | if f.ReplicateSecrets { 96 | secretRepl := secret.NewReplicator(client, f.ResyncPeriod, f.AllowAll, f.SyncByContent, filter) 97 | go secretRepl.Run() 98 | enabledReplicators = append(enabledReplicators, secretRepl) 99 | } 100 | 101 | if f.ReplicateConfigMaps { 102 | configMapRepl := configmap.NewReplicator(client, f.ResyncPeriod, f.AllowAll, f.SyncByContent, filter) 103 | go configMapRepl.Run() 104 | enabledReplicators = append(enabledReplicators, configMapRepl) 105 | } 106 | 107 | if f.ReplicateRoles { 108 | roleRepl := role.NewReplicator(client, f.ResyncPeriod, f.AllowAll) 109 | go roleRepl.Run() 110 | enabledReplicators = append(enabledReplicators, roleRepl) 111 | } 112 | 113 | if f.ReplicateRoleBindings { 114 | roleBindingRepl := rolebinding.NewReplicator(client, f.ResyncPeriod, f.AllowAll) 115 | go roleBindingRepl.Run() 116 | enabledReplicators = append(enabledReplicators, roleBindingRepl) 117 | } 118 | 119 | if f.ReplicateServiceAccounts { 120 | serviceAccountRepl := serviceaccount.NewReplicator(client, f.ResyncPeriod, f.AllowAll) 121 | go serviceAccountRepl.Run() 122 | enabledReplicators = append(enabledReplicators, serviceAccountRepl) 123 | } 124 | 125 | h := liveness.Handler{ 126 | Replicators: enabledReplicators, 127 | } 128 | 129 | log.Infof("starting liveness monitor at %s", f.StatusAddr) 130 | 131 | http.Handle("/healthz", &h) 132 | http.Handle("/readyz", &h) 133 | err = http.ListenAndServe(f.StatusAddr, nil) 134 | if err != nil { 135 | log.Fatal(err) 136 | } 137 | } 138 | -------------------------------------------------------------------------------- /.goreleaser.yml: -------------------------------------------------------------------------------- 1 | builds: 2 | - 3 | env: 4 | - CGO_ENABLED=0 5 | - GO111MODULE=on 6 | goos: 7 | - linux 8 | goarch: 9 | - amd64 10 | - arm 11 | - arm64 12 | goarm: 13 | - 5 14 | - 6 15 | - 7 16 | checksum: 17 | name_template: 'checksums.txt' 18 | snapshot: 19 | name_template: "{{ .Tag }}-next" 20 | changelog: 21 | sort: asc 22 | filters: 23 | exclude: 24 | - '^docs:' 25 | - '^test:' 26 | dockers: 27 | - 28 | dockerfile: Dockerfile 29 | image_templates: 30 | - quay.io/mittwald/kubernetes-replicator-amd64:latest 31 | - quay.io/mittwald/kubernetes-replicator-amd64:stable 32 | - quay.io/mittwald/kubernetes-replicator-amd64:v{{ .Major }} 33 | - quay.io/mittwald/kubernetes-replicator-amd64:v{{ .Major }}.{{ .Minor }} 34 | - quay.io/mittwald/kubernetes-replicator-amd64:{{ .Tag }} 35 | ids: 36 | - kubernetes-replicator 37 | goos: linux 38 | goarch: amd64 39 | goarm: '' 40 | use: buildx 41 | build_flag_templates: 42 | - --platform=linux/amd64 43 | - 44 | dockerfile: Dockerfile 45 | image_templates: 46 | - quay.io/mittwald/kubernetes-replicator-arm64:latest 47 | - quay.io/mittwald/kubernetes-replicator-arm64:stable 48 | - quay.io/mittwald/kubernetes-replicator-arm64:v{{ .Major }} 49 | - quay.io/mittwald/kubernetes-replicator-arm64:v{{ .Major }}.{{ .Minor }} 50 | - quay.io/mittwald/kubernetes-replicator-arm64:{{ .Tag }} 51 | ids: 52 | - kubernetes-replicator 53 | goos: linux 54 | goarch: arm64 55 | goarm: '' 56 | use: buildx 57 | build_flag_templates: 58 | - --platform=linux/arm64 59 | - 60 | dockerfile: Dockerfile 61 | image_templates: 62 | - quay.io/mittwald/kubernetes-replicator-armv5:latest 63 | - quay.io/mittwald/kubernetes-replicator-armv5:stable 64 | - quay.io/mittwald/kubernetes-replicator-armv5:v{{ .Major }} 65 | - quay.io/mittwald/kubernetes-replicator-armv5:v{{ .Major }}.{{ .Minor }} 66 | - quay.io/mittwald/kubernetes-replicator-armv5:{{ .Tag }} 67 | ids: 68 | - kubernetes-replicator 69 | goos: linux 70 | goarch: arm 71 | goarm: '5' 72 | use: buildx 73 | build_flag_templates: 74 | - --platform=linux/arm/v5 75 | - 76 | dockerfile: Dockerfile 77 | image_templates: 78 | - quay.io/mittwald/kubernetes-replicator-armv6:latest 79 | - quay.io/mittwald/kubernetes-replicator-armv6:stable 80 | - quay.io/mittwald/kubernetes-replicator-armv6:v{{ .Major }} 81 | - quay.io/mittwald/kubernetes-replicator-armv6:v{{ .Major }}.{{ .Minor }} 82 | - quay.io/mittwald/kubernetes-replicator-armv6:{{ .Tag }} 83 | ids: 84 | - kubernetes-replicator 85 | goos: linux 86 | goarch: arm 87 | goarm: '6' 88 | use: buildx 89 | build_flag_templates: 90 | - --platform=linux/arm/v6 91 | - 92 | dockerfile: Dockerfile 93 | image_templates: 94 | - quay.io/mittwald/kubernetes-replicator-armv7:latest 95 | - quay.io/mittwald/kubernetes-replicator-armv7:stable 96 | - quay.io/mittwald/kubernetes-replicator-armv7:v{{ .Major }} 97 | - quay.io/mittwald/kubernetes-replicator-armv7:v{{ .Major }}.{{ .Minor }} 98 | - quay.io/mittwald/kubernetes-replicator-armv7:{{ .Tag }} 99 | ids: 100 | - kubernetes-replicator 101 | goos: linux 102 | goarch: arm 103 | goarm: '7' 104 | use: buildx 105 | build_flag_templates: 106 | - --platform=linux/arm/v7 107 | docker_manifests: 108 | - 109 | name_template: quay.io/mittwald/kubernetes-replicator:latest 110 | image_templates: 111 | - quay.io/mittwald/kubernetes-replicator-amd64:{{ .Tag }} 112 | - quay.io/mittwald/kubernetes-replicator-arm64:{{ .Tag }} 113 | - quay.io/mittwald/kubernetes-replicator-armv5:{{ .Tag }} 114 | - quay.io/mittwald/kubernetes-replicator-armv6:{{ .Tag }} 115 | - quay.io/mittwald/kubernetes-replicator-armv7:{{ .Tag }} 116 | - 117 | name_template: quay.io/mittwald/kubernetes-replicator:stable 118 | image_templates: 119 | - quay.io/mittwald/kubernetes-replicator-amd64:{{ .Tag }} 120 | - quay.io/mittwald/kubernetes-replicator-arm64:{{ .Tag }} 121 | - quay.io/mittwald/kubernetes-replicator-armv5:{{ .Tag }} 122 | - quay.io/mittwald/kubernetes-replicator-armv6:{{ .Tag }} 123 | - quay.io/mittwald/kubernetes-replicator-armv7:{{ .Tag }} 124 | - 125 | name_template: quay.io/mittwald/kubernetes-replicator:v{{ .Major }} 126 | image_templates: 127 | - quay.io/mittwald/kubernetes-replicator-amd64:{{ .Tag }} 128 | - quay.io/mittwald/kubernetes-replicator-arm64:{{ .Tag }} 129 | - quay.io/mittwald/kubernetes-replicator-armv5:{{ .Tag }} 130 | - quay.io/mittwald/kubernetes-replicator-armv6:{{ .Tag }} 131 | - quay.io/mittwald/kubernetes-replicator-armv7:{{ .Tag }} 132 | - 133 | name_template: quay.io/mittwald/kubernetes-replicator:v{{ .Major }}.{{ .Minor }} 134 | image_templates: 135 | - quay.io/mittwald/kubernetes-replicator-amd64:{{ .Tag }} 136 | - quay.io/mittwald/kubernetes-replicator-arm64:{{ .Tag }} 137 | - quay.io/mittwald/kubernetes-replicator-armv5:{{ .Tag }} 138 | - quay.io/mittwald/kubernetes-replicator-armv6:{{ .Tag }} 139 | - quay.io/mittwald/kubernetes-replicator-armv7:{{ .Tag }} 140 | - 141 | name_template: quay.io/mittwald/kubernetes-replicator:{{ .Tag }} 142 | image_templates: 143 | - quay.io/mittwald/kubernetes-replicator-amd64:{{ .Tag }} 144 | - quay.io/mittwald/kubernetes-replicator-arm64:{{ .Tag }} 145 | - quay.io/mittwald/kubernetes-replicator-armv5:{{ .Tag }} 146 | - quay.io/mittwald/kubernetes-replicator-armv6:{{ .Tag }} 147 | - quay.io/mittwald/kubernetes-replicator-armv7:{{ .Tag }} 148 | -------------------------------------------------------------------------------- /replicate/role/roles.go: -------------------------------------------------------------------------------- 1 | package role 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "fmt" 7 | "time" 8 | 9 | "github.com/mittwald/kubernetes-replicator/replicate/common" 10 | "github.com/pkg/errors" 11 | log "github.com/sirupsen/logrus" 12 | v1 "k8s.io/api/core/v1" 13 | 14 | rbacv1 "k8s.io/api/rbac/v1" 15 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 16 | "k8s.io/apimachinery/pkg/runtime" 17 | "k8s.io/apimachinery/pkg/types" 18 | "k8s.io/apimachinery/pkg/watch" 19 | "k8s.io/client-go/kubernetes" 20 | ) 21 | 22 | type Replicator struct { 23 | *common.GenericReplicator 24 | } 25 | 26 | // NewReplicator creates a new role replicator 27 | func NewReplicator(client kubernetes.Interface, resyncPeriod time.Duration, allowAll bool) common.Replicator { 28 | repl := Replicator{ 29 | GenericReplicator: common.NewGenericReplicator(common.ReplicatorConfig{ 30 | Kind: "Role", 31 | ObjType: &rbacv1.Role{}, 32 | AllowAll: allowAll, 33 | ResyncPeriod: resyncPeriod, 34 | Client: client, 35 | ListFunc: func(lo metav1.ListOptions) (runtime.Object, error) { 36 | return client.RbacV1().Roles("").List(context.TODO(), lo) 37 | }, 38 | WatchFunc: func(lo metav1.ListOptions) (watch.Interface, error) { 39 | return client.RbacV1().Roles("").Watch(context.TODO(), lo) 40 | }, 41 | }), 42 | } 43 | repl.UpdateFuncs = common.UpdateFuncs{ 44 | ReplicateDataFrom: repl.ReplicateDataFrom, 45 | ReplicateObjectTo: repl.ReplicateObjectTo, 46 | PatchDeleteDependent: repl.PatchDeleteDependent, 47 | DeleteReplicatedResource: repl.DeleteReplicatedResource, 48 | } 49 | 50 | return &repl 51 | } 52 | 53 | func (r *Replicator) ReplicateDataFrom(sourceObj interface{}, targetObj interface{}) error { 54 | source := sourceObj.(*rbacv1.Role) 55 | target := targetObj.(*rbacv1.Role) 56 | 57 | logger := log. 58 | WithField("kind", r.Kind). 59 | WithField("source", common.MustGetKey(source)). 60 | WithField("target", common.MustGetKey(target)) 61 | 62 | // make sure replication is allowed 63 | if ok, err := r.IsReplicationPermitted(&target.ObjectMeta, &source.ObjectMeta); !ok { 64 | return errors.Wrapf(err, "replication of target %s is not permitted", common.MustGetKey(source)) 65 | } 66 | 67 | targetVersion, ok := target.Annotations[common.ReplicatedFromVersionAnnotation] 68 | sourceVersion := source.ResourceVersion 69 | 70 | if ok && targetVersion == sourceVersion { 71 | logger.Debugf("target %s is already up-to-date", common.MustGetKey(target)) 72 | return nil 73 | } 74 | 75 | targetCopy := target.DeepCopy() 76 | targetCopy.Rules = source.Rules 77 | 78 | logger.Infof("updating target %s/%s", target.Namespace, target.Name) 79 | 80 | targetCopy.Annotations[common.ReplicatedAtAnnotation] = time.Now().Format(time.RFC3339) 81 | targetCopy.Annotations[common.ReplicatedFromVersionAnnotation] = source.ResourceVersion 82 | 83 | s, err := r.Client.RbacV1().Roles(target.Namespace).Update(context.TODO(), targetCopy, metav1.UpdateOptions{}) 84 | if err != nil { 85 | err = errors.Wrapf(err, "Failed updating target %s/%s", target.Namespace, targetCopy.Name) 86 | } else if err = r.Store.Update(s); err != nil { 87 | err = errors.Wrapf(err, "Failed to update cache for %s/%s: %v", target.Namespace, targetCopy, err) 88 | } 89 | 90 | return err 91 | } 92 | 93 | // ReplicateObjectTo copies the whole object to target namespace 94 | func (r *Replicator) ReplicateObjectTo(sourceObj interface{}, target *v1.Namespace) error { 95 | source := sourceObj.(*rbacv1.Role) 96 | targetLocation := fmt.Sprintf("%s/%s", target.Name, source.Name) 97 | 98 | logger := log. 99 | WithField("kind", r.Kind). 100 | WithField("source", common.MustGetKey(source)). 101 | WithField("target", targetLocation) 102 | 103 | targetResource, exists, err := r.Store.GetByKey(targetLocation) 104 | if err != nil { 105 | return errors.Wrapf(err, "Could not get %s from cache!", targetLocation) 106 | } 107 | logger.Infof("Checking if %s exists? %v", targetLocation, exists) 108 | 109 | var targetCopy *rbacv1.Role 110 | if exists { 111 | targetObject := targetResource.(*rbacv1.Role) 112 | targetVersion, ok := targetObject.Annotations[common.ReplicatedFromVersionAnnotation] 113 | sourceVersion := source.ResourceVersion 114 | 115 | if ok && targetVersion == sourceVersion { 116 | logger.Debugf("Role %s is already up-to-date", common.MustGetKey(targetObject)) 117 | return nil 118 | } 119 | 120 | targetCopy = targetObject.DeepCopy() 121 | } else { 122 | targetCopy = new(rbacv1.Role) 123 | } 124 | 125 | keepOwnerReferences, ok := source.Annotations[common.KeepOwnerReferences] 126 | if ok && keepOwnerReferences == "true" { 127 | targetCopy.OwnerReferences = source.OwnerReferences 128 | } 129 | 130 | if targetCopy.Rules == nil { 131 | targetCopy.Rules = make([]rbacv1.PolicyRule, 0) 132 | } 133 | if targetCopy.Annotations == nil { 134 | targetCopy.Annotations = make(map[string]string) 135 | } 136 | 137 | labelsCopy := make(map[string]string) 138 | 139 | stripLabels, ok := source.Annotations[common.StripLabels] 140 | if !ok && stripLabels != "true" { 141 | if source.Labels != nil { 142 | for key, value := range source.Labels { 143 | labelsCopy[key] = value 144 | } 145 | } 146 | } 147 | 148 | targetCopy.Name = source.Name 149 | targetCopy.Labels = labelsCopy 150 | targetCopy.Rules = source.Rules 151 | targetCopy.Annotations[common.ReplicatedAtAnnotation] = time.Now().Format(time.RFC3339) 152 | targetCopy.Annotations[common.ReplicatedFromVersionAnnotation] = source.ResourceVersion 153 | 154 | var obj interface{} 155 | if exists { 156 | logger.Debugf("Updating existing role %s/%s", target.Name, targetCopy.Name) 157 | obj, err = r.Client.RbacV1().Roles(target.Name).Update(context.TODO(), targetCopy, metav1.UpdateOptions{}) 158 | } else { 159 | logger.Debugf("Creating a new role %s/%s", target.Name, targetCopy.Name) 160 | obj, err = r.Client.RbacV1().Roles(target.Name).Create(context.TODO(), targetCopy, metav1.CreateOptions{}) 161 | } 162 | if err != nil { 163 | return errors.Wrapf(err, "Failed to update role %s/%s", target.Name, targetCopy.Name) 164 | } 165 | 166 | if err := r.Store.Update(obj); err != nil { 167 | return errors.Wrapf(err, "Failed to update cache for %s/%s", target.Name, targetCopy) 168 | } 169 | 170 | return nil 171 | } 172 | 173 | func (r *Replicator) PatchDeleteDependent(sourceKey string, target interface{}) (interface{}, error) { 174 | dependentKey := common.MustGetKey(target) 175 | logger := log.WithFields(log.Fields{ 176 | "kind": r.Kind, 177 | "source": sourceKey, 178 | "target": dependentKey, 179 | }) 180 | 181 | targetObject, ok := target.(*rbacv1.Role) 182 | if !ok { 183 | err := errors.Errorf("bad type returned from Store: %T", target) 184 | return nil, err 185 | } 186 | 187 | patch := []common.JSONPatchOperation{{Operation: "remove", Path: "/rules"}} 188 | patchBody, err := json.Marshal(&patch) 189 | 190 | if err != nil { 191 | return nil, errors.Wrapf(err, "error while building patch body for role %s: %v", dependentKey, err) 192 | } 193 | 194 | logger.Debugf("clearing dependent role %s", dependentKey) 195 | logger.Tracef("patch body: %s", string(patchBody)) 196 | 197 | s, err := r.Client.RbacV1().Roles(targetObject.Namespace).Patch(context.TODO(), targetObject.Name, types.JSONPatchType, patchBody, metav1.PatchOptions{}) 198 | if err != nil { 199 | return nil, errors.Wrapf(err, "error while patching role %s: %v", dependentKey, err) 200 | } 201 | return s, nil 202 | } 203 | 204 | // DeleteReplicatedResource deletes a resource replicated by ReplicateTo annotation 205 | func (r *Replicator) DeleteReplicatedResource(targetResource interface{}) error { 206 | targetLocation := common.MustGetKey(targetResource) 207 | logger := log.WithFields(log.Fields{ 208 | "kind": r.Kind, 209 | "target": targetLocation, 210 | }) 211 | 212 | object := targetResource.(*rbacv1.Role) 213 | logger.Debugf("Deleting %s", targetLocation) 214 | if err := r.Client.RbacV1().Roles(object.Namespace).Delete(context.TODO(), object.Name, metav1.DeleteOptions{}); err != nil { 215 | return errors.Wrapf(err, "Failed deleting %s: %v", targetLocation, err) 216 | } 217 | return nil 218 | } 219 | -------------------------------------------------------------------------------- /replicate/serviceaccount/serviceaccounts.go: -------------------------------------------------------------------------------- 1 | package serviceaccount 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "fmt" 7 | "time" 8 | 9 | "github.com/mittwald/kubernetes-replicator/replicate/common" 10 | "github.com/pkg/errors" 11 | log "github.com/sirupsen/logrus" 12 | v1 "k8s.io/api/core/v1" 13 | 14 | corev1 "k8s.io/api/core/v1" 15 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 16 | "k8s.io/apimachinery/pkg/runtime" 17 | "k8s.io/apimachinery/pkg/types" 18 | "k8s.io/apimachinery/pkg/watch" 19 | "k8s.io/client-go/kubernetes" 20 | ) 21 | 22 | type Replicator struct { 23 | *common.GenericReplicator 24 | } 25 | 26 | // NewReplicator creates a new serviceaccount replicator 27 | func NewReplicator(client kubernetes.Interface, resyncPeriod time.Duration, allowAll bool) common.Replicator { 28 | repl := Replicator{ 29 | GenericReplicator: common.NewGenericReplicator(common.ReplicatorConfig{ 30 | Kind: "ServiceAccount", 31 | ObjType: &corev1.ServiceAccount{}, 32 | AllowAll: allowAll, 33 | ResyncPeriod: resyncPeriod, 34 | Client: client, 35 | ListFunc: func(lo metav1.ListOptions) (runtime.Object, error) { 36 | return client.CoreV1().ServiceAccounts("").List(context.TODO(), lo) 37 | }, 38 | WatchFunc: func(lo metav1.ListOptions) (watch.Interface, error) { 39 | return client.CoreV1().ServiceAccounts("").Watch(context.TODO(), lo) 40 | }, 41 | }), 42 | } 43 | repl.UpdateFuncs = common.UpdateFuncs{ 44 | ReplicateDataFrom: repl.ReplicateDataFrom, 45 | ReplicateObjectTo: repl.ReplicateObjectTo, 46 | PatchDeleteDependent: repl.PatchDeleteDependent, 47 | DeleteReplicatedResource: repl.DeleteReplicatedResource, 48 | } 49 | 50 | return &repl 51 | } 52 | 53 | func (r *Replicator) ReplicateDataFrom(sourceObj interface{}, targetObj interface{}) error { 54 | source := sourceObj.(*corev1.ServiceAccount) 55 | target := targetObj.(*corev1.ServiceAccount) 56 | 57 | logger := log. 58 | WithField("kind", r.Kind). 59 | WithField("source", common.MustGetKey(source)). 60 | WithField("target", common.MustGetKey(target)) 61 | 62 | // make sure replication is allowed 63 | if ok, err := r.IsReplicationPermitted(&target.ObjectMeta, &source.ObjectMeta); !ok { 64 | return errors.Wrapf(err, "replication of target %s is not permitted", common.MustGetKey(source)) 65 | } 66 | 67 | targetVersion, ok := target.Annotations[common.ReplicatedFromVersionAnnotation] 68 | sourceVersion := source.ResourceVersion 69 | 70 | if ok && targetVersion == sourceVersion { 71 | logger.Debugf("target %s/%s is already up-to-date", target.Namespace, target.Name) 72 | return nil 73 | } 74 | 75 | targetCopy := target.DeepCopy() 76 | targetCopy.ImagePullSecrets = source.ImagePullSecrets 77 | 78 | log.Infof("updating target %s/%s", target.Namespace, target.Name) 79 | 80 | targetCopy.Annotations[common.ReplicatedAtAnnotation] = time.Now().Format(time.RFC3339) 81 | targetCopy.Annotations[common.ReplicatedFromVersionAnnotation] = source.ResourceVersion 82 | 83 | s, err := r.Client.CoreV1().ServiceAccounts(target.Namespace).Update(context.TODO(), targetCopy, metav1.UpdateOptions{}) 84 | if err != nil { 85 | err = errors.Wrapf(err, "Failed updating target %s/%s", target.Namespace, targetCopy.Name) 86 | } else if err = r.Store.Update(s); err != nil { 87 | err = errors.Wrapf(err, "Failed to update cache for %s/%s: %v", target.Namespace, targetCopy, err) 88 | } 89 | 90 | return err 91 | } 92 | 93 | // ReplicateObjectTo copies the whole object to target namespace 94 | func (r *Replicator) ReplicateObjectTo(sourceObj interface{}, target *v1.Namespace) error { 95 | source := sourceObj.(*corev1.ServiceAccount) 96 | targetLocation := fmt.Sprintf("%s/%s", target.Name, source.Name) 97 | 98 | logger := log. 99 | WithField("kind", r.Kind). 100 | WithField("source", common.MustGetKey(source)). 101 | WithField("target", targetLocation) 102 | 103 | targetResource, exists, err := r.Store.GetByKey(targetLocation) 104 | if err != nil { 105 | return errors.Wrapf(err, "Could not get %s from cache!", targetLocation) 106 | } 107 | logger.Infof("Checking if %s exists? %v", targetLocation, exists) 108 | 109 | var targetCopy *corev1.ServiceAccount 110 | if exists { 111 | targetObject := targetResource.(*corev1.ServiceAccount) 112 | targetVersion, ok := targetObject.Annotations[common.ReplicatedFromVersionAnnotation] 113 | sourceVersion := source.ResourceVersion 114 | 115 | if ok && targetVersion == sourceVersion { 116 | logger.Debugf("ServiceAccount %s is already up-to-date", common.MustGetKey(targetObject)) 117 | return nil 118 | } 119 | 120 | targetCopy = targetObject.DeepCopy() 121 | } else { 122 | targetCopy = new(corev1.ServiceAccount) 123 | } 124 | 125 | keepOwnerReferences, ok := source.Annotations[common.KeepOwnerReferences] 126 | if ok && keepOwnerReferences == "true" { 127 | targetCopy.OwnerReferences = source.OwnerReferences 128 | } 129 | 130 | if targetCopy.Annotations == nil { 131 | targetCopy.Annotations = make(map[string]string) 132 | } 133 | 134 | labelsCopy := make(map[string]string) 135 | 136 | stripLabels, ok := source.Annotations[common.StripLabels] 137 | if !ok && stripLabels != "true" { 138 | if source.Labels != nil { 139 | for key, value := range source.Labels { 140 | labelsCopy[key] = value 141 | } 142 | } 143 | 144 | } 145 | 146 | targetCopy.Name = source.Name 147 | targetCopy.Labels = labelsCopy 148 | targetCopy.ImagePullSecrets = source.ImagePullSecrets 149 | targetCopy.Annotations[common.ReplicatedAtAnnotation] = time.Now().Format(time.RFC3339) 150 | targetCopy.Annotations[common.ReplicatedFromVersionAnnotation] = source.ResourceVersion 151 | 152 | var obj interface{} 153 | 154 | if exists { 155 | if err == nil { 156 | logger.Debugf("Updating existing serviceAccount %s/%s", target.Name, targetCopy.Name) 157 | obj, err = r.Client.CoreV1().ServiceAccounts(target.Name).Update(context.TODO(), targetCopy, metav1.UpdateOptions{}) 158 | } 159 | } else { 160 | if err == nil { 161 | logger.Debugf("Creating a new serviceAccount %s/%s", target.Name, targetCopy.Name) 162 | obj, err = r.Client.CoreV1().ServiceAccounts(target.Name).Create(context.TODO(), targetCopy, metav1.CreateOptions{}) 163 | } 164 | } 165 | if err != nil { 166 | return errors.Wrapf(err, "Failed to update serviceAccount %s/%s", target.Name, targetCopy.Name) 167 | } 168 | 169 | if err := r.Store.Update(obj); err != nil { 170 | return errors.Wrapf(err, "Failed to update cache for %s/%s", target.Name, targetCopy) 171 | } 172 | 173 | return nil 174 | } 175 | 176 | func (r *Replicator) PatchDeleteDependent(sourceKey string, target interface{}) (interface{}, error) { 177 | dependentKey := common.MustGetKey(target) 178 | logger := log.WithFields(log.Fields{ 179 | "kind": r.Kind, 180 | "source": sourceKey, 181 | "target": dependentKey, 182 | }) 183 | 184 | targetObject, ok := target.(*corev1.ServiceAccount) 185 | if !ok { 186 | err := errors.Errorf("bad type returned from Store: %T", target) 187 | return nil, err 188 | } 189 | 190 | patch := []common.JSONPatchOperation{{Operation: "remove", Path: "/imagePullSecrets"}} 191 | patchBody, err := json.Marshal(&patch) 192 | 193 | if err != nil { 194 | return nil, errors.Wrapf(err, "error while building patch body for serviceAccount %s: %v", dependentKey, err) 195 | 196 | } 197 | 198 | logger.Debugf("clearing dependent serviceAccount %s", dependentKey) 199 | logger.Tracef("patch body: %s", string(patchBody)) 200 | 201 | s, err := r.Client.CoreV1().ServiceAccounts(targetObject.Namespace).Patch(context.TODO(), targetObject.Name, types.JSONPatchType, patchBody, metav1.PatchOptions{}) 202 | if err != nil { 203 | return nil, errors.Wrapf(err, "error while patching serviceAccount %s: %v", dependentKey, err) 204 | } 205 | return s, nil 206 | } 207 | 208 | // DeleteReplicatedResource deletes a resource replicated by ReplicateTo annotation 209 | func (r *Replicator) DeleteReplicatedResource(targetResource interface{}) error { 210 | targetLocation := common.MustGetKey(targetResource) 211 | logger := log.WithFields(log.Fields{ 212 | "kind": r.Kind, 213 | "target": targetLocation, 214 | }) 215 | 216 | object := targetResource.(*corev1.ServiceAccount) 217 | logger.Debugf("Deleting %s", targetLocation) 218 | if err := r.Client.CoreV1().ServiceAccounts(object.Namespace).Delete(context.TODO(), object.Name, metav1.DeleteOptions{}); err != nil { 219 | return errors.Wrapf(err, "Failed deleting %s: %v", targetLocation, err) 220 | } 221 | return nil 222 | } 223 | -------------------------------------------------------------------------------- /replicate/rolebinding/rolebindings.go: -------------------------------------------------------------------------------- 1 | package rolebinding 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "fmt" 7 | "time" 8 | 9 | "github.com/mittwald/kubernetes-replicator/replicate/common" 10 | "github.com/pkg/errors" 11 | log "github.com/sirupsen/logrus" 12 | v1 "k8s.io/api/core/v1" 13 | 14 | rbacv1 "k8s.io/api/rbac/v1" 15 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 16 | "k8s.io/apimachinery/pkg/runtime" 17 | "k8s.io/apimachinery/pkg/types" 18 | "k8s.io/apimachinery/pkg/watch" 19 | "k8s.io/client-go/kubernetes" 20 | ) 21 | 22 | type Replicator struct { 23 | *common.GenericReplicator 24 | } 25 | 26 | const sleepTime = 100 * time.Millisecond 27 | 28 | // NewReplicator creates a new secret replicator 29 | func NewReplicator(client kubernetes.Interface, resyncPeriod time.Duration, allowAll bool) common.Replicator { 30 | repl := Replicator{ 31 | GenericReplicator: common.NewGenericReplicator(common.ReplicatorConfig{ 32 | Kind: "RoleBinding", 33 | ObjType: &rbacv1.RoleBinding{}, 34 | AllowAll: allowAll, 35 | ResyncPeriod: resyncPeriod, 36 | Client: client, 37 | ListFunc: func(lo metav1.ListOptions) (runtime.Object, error) { 38 | return client.RbacV1().RoleBindings("").List(context.TODO(), lo) 39 | }, 40 | WatchFunc: func(lo metav1.ListOptions) (watch.Interface, error) { 41 | return client.RbacV1().RoleBindings("").Watch(context.TODO(), lo) 42 | }, 43 | }), 44 | } 45 | repl.UpdateFuncs = common.UpdateFuncs{ 46 | ReplicateDataFrom: repl.ReplicateDataFrom, 47 | ReplicateObjectTo: repl.ReplicateObjectTo, 48 | PatchDeleteDependent: repl.PatchDeleteDependent, 49 | DeleteReplicatedResource: repl.DeleteReplicatedResource, 50 | } 51 | 52 | return &repl 53 | } 54 | 55 | func (r *Replicator) ReplicateDataFrom(sourceObj interface{}, targetObj interface{}) error { 56 | source := sourceObj.(*rbacv1.RoleBinding) 57 | target := targetObj.(*rbacv1.RoleBinding) 58 | 59 | logger := log. 60 | WithField("kind", r.Kind). 61 | WithField("source", common.MustGetKey(source)). 62 | WithField("target", common.MustGetKey(target)) 63 | 64 | // make sure replication is allowed 65 | if ok, err := r.IsReplicationPermitted(&target.ObjectMeta, &source.ObjectMeta); !ok { 66 | return errors.Wrapf(err, "replication of target %s is not permitted", common.MustGetKey(source)) 67 | } 68 | 69 | targetVersion, ok := target.Annotations[common.ReplicatedFromVersionAnnotation] 70 | sourceVersion := source.ResourceVersion 71 | 72 | if ok && targetVersion == sourceVersion { 73 | logger.Debugf("target %s/%s is already up-to-date", target.Namespace, target.Name) 74 | return nil 75 | } 76 | 77 | targetCopy := target.DeepCopy() 78 | targetCopy.Subjects = source.Subjects 79 | 80 | log.Infof("updating target %s/%s", target.Namespace, target.Name) 81 | 82 | targetCopy.Annotations[common.ReplicatedAtAnnotation] = time.Now().Format(time.RFC3339) 83 | targetCopy.Annotations[common.ReplicatedFromVersionAnnotation] = source.ResourceVersion 84 | 85 | s, err := r.Client.RbacV1().RoleBindings(target.Namespace).Update(context.TODO(), targetCopy, metav1.UpdateOptions{}) 86 | if err != nil { 87 | err = errors.Wrapf(err, "Failed updating target %s/%s", target.Namespace, targetCopy.Name) 88 | } else if err = r.Store.Update(s); err != nil { 89 | err = errors.Wrapf(err, "Failed to update cache for %s/%s: %v", target.Namespace, targetCopy, err) 90 | } 91 | 92 | return err 93 | } 94 | 95 | // ReplicateObjectTo copies the whole object to target namespace 96 | func (r *Replicator) ReplicateObjectTo(sourceObj interface{}, target *v1.Namespace) error { 97 | source := sourceObj.(*rbacv1.RoleBinding) 98 | targetLocation := fmt.Sprintf("%s/%s", target.Name, source.Name) 99 | 100 | logger := log. 101 | WithField("kind", r.Kind). 102 | WithField("source", common.MustGetKey(source)). 103 | WithField("target", targetLocation) 104 | 105 | targetResource, exists, err := r.Store.GetByKey(targetLocation) 106 | if err != nil { 107 | return errors.Wrapf(err, "Could not get %s from cache!", targetLocation) 108 | } 109 | logger.Infof("Checking if %s exists? %v", targetLocation, exists) 110 | 111 | var targetCopy *rbacv1.RoleBinding 112 | if exists { 113 | targetObject := targetResource.(*rbacv1.RoleBinding) 114 | targetVersion, ok := targetObject.Annotations[common.ReplicatedFromVersionAnnotation] 115 | sourceVersion := source.ResourceVersion 116 | 117 | if ok && targetVersion == sourceVersion { 118 | logger.Debugf("RoleBinding %s is already up-to-date", common.MustGetKey(targetObject)) 119 | return nil 120 | } 121 | 122 | targetCopy = targetObject.DeepCopy() 123 | } else { 124 | targetCopy = new(rbacv1.RoleBinding) 125 | } 126 | 127 | keepOwnerReferences, ok := source.Annotations[common.KeepOwnerReferences] 128 | if ok && keepOwnerReferences == "true" { 129 | targetCopy.OwnerReferences = source.OwnerReferences 130 | } 131 | 132 | if targetCopy.Annotations == nil { 133 | targetCopy.Annotations = make(map[string]string) 134 | } 135 | 136 | labelsCopy := make(map[string]string) 137 | 138 | stripLabels, ok := source.Annotations[common.StripLabels] 139 | if !ok && stripLabels != "true" { 140 | if source.Labels != nil { 141 | for key, value := range source.Labels { 142 | labelsCopy[key] = value 143 | } 144 | } 145 | 146 | } 147 | 148 | targetCopy.Name = source.Name 149 | targetCopy.Labels = labelsCopy 150 | targetCopy.Subjects = source.Subjects 151 | targetCopy.RoleRef = source.RoleRef 152 | targetCopy.Annotations[common.ReplicatedAtAnnotation] = time.Now().Format(time.RFC3339) 153 | targetCopy.Annotations[common.ReplicatedFromVersionAnnotation] = source.ResourceVersion 154 | 155 | var obj interface{} 156 | if targetCopy.RoleRef.Kind == "Role" { 157 | err = r.canReplicate(target.Name, targetCopy.RoleRef.Name) 158 | } 159 | if exists { 160 | if err == nil { 161 | logger.Debugf("Updating existing roleBinding %s/%s", target.Name, targetCopy.Name) 162 | obj, err = r.Client.RbacV1().RoleBindings(target.Name).Update(context.TODO(), targetCopy, metav1.UpdateOptions{}) 163 | } 164 | } else { 165 | if err == nil { 166 | logger.Debugf("Creating a new roleBinding %s/%s", target.Name, targetCopy.Name) 167 | obj, err = r.Client.RbacV1().RoleBindings(target.Name).Create(context.TODO(), targetCopy, metav1.CreateOptions{}) 168 | } 169 | } 170 | if err != nil { 171 | return errors.Wrapf(err, "Failed to update roleBinding %s/%s", target.Name, targetCopy.Name) 172 | } 173 | 174 | if err := r.Store.Update(obj); err != nil { 175 | return errors.Wrapf(err, "Failed to update cache for %s/%s", target.Name, targetCopy) 176 | } 177 | 178 | return nil 179 | } 180 | 181 | //Checks if Role required for RoleBinding exists. Retries a few times before returning error to allow replication to catch up 182 | func (r *Replicator) canReplicate(targetNameSpace string, roleRef string) (err error) { 183 | for i := 0; i < 5; i++ { 184 | _, err = r.Client.RbacV1().Roles(targetNameSpace).Get(context.TODO(), roleRef, metav1.GetOptions{}) 185 | if err == nil { 186 | break 187 | } else { 188 | time.Sleep(sleepTime) 189 | } 190 | } 191 | return 192 | } 193 | 194 | func (r *Replicator) PatchDeleteDependent(sourceKey string, target interface{}) (interface{}, error) { 195 | dependentKey := common.MustGetKey(target) 196 | logger := log.WithFields(log.Fields{ 197 | "kind": r.Kind, 198 | "source": sourceKey, 199 | "target": dependentKey, 200 | }) 201 | 202 | targetObject, ok := target.(*rbacv1.RoleBinding) 203 | if !ok { 204 | err := errors.Errorf("bad type returned from Store: %T", target) 205 | return nil, err 206 | } 207 | 208 | patch := []common.JSONPatchOperation{{Operation: "remove", Path: "/subjects"}} 209 | patchBody, err := json.Marshal(&patch) 210 | 211 | if err != nil { 212 | return nil, errors.Wrapf(err, "error while building patch body for roleBinding %s: %v", dependentKey, err) 213 | 214 | } 215 | 216 | logger.Debugf("clearing dependent roleBinding %s", dependentKey) 217 | logger.Tracef("patch body: %s", string(patchBody)) 218 | 219 | s, err := r.Client.RbacV1().RoleBindings(targetObject.Namespace).Patch(context.TODO(), targetObject.Name, types.JSONPatchType, patchBody, metav1.PatchOptions{}) 220 | if err != nil { 221 | return nil, errors.Wrapf(err, "error while patching role %s: %v", dependentKey, err) 222 | } 223 | return s, nil 224 | } 225 | 226 | // DeleteReplicatedResource deletes a resource replicated by ReplicateTo annotation 227 | func (r *Replicator) DeleteReplicatedResource(targetResource interface{}) error { 228 | targetLocation := common.MustGetKey(targetResource) 229 | logger := log.WithFields(log.Fields{ 230 | "kind": r.Kind, 231 | "target": targetLocation, 232 | }) 233 | 234 | object := targetResource.(*rbacv1.RoleBinding) 235 | logger.Debugf("Deleting %s", targetLocation) 236 | if err := r.Client.RbacV1().RoleBindings(object.Namespace).Delete(context.TODO(), object.Name, metav1.DeleteOptions{}); err != nil { 237 | return errors.Wrapf(err, "Failed deleting %s: %v", targetLocation, err) 238 | } 239 | return nil 240 | } 241 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ConfigMap, Secret and Role, RoleBinding and ServiceAccount replication for Kubernetes 2 | 3 | ![Build Status](https://github.com/mittwald/kubernetes-replicator/workflows/Compile%20&%20Test/badge.svg) 4 | 5 | This repository contains a custom Kubernetes controller that can be used to make 6 | secrets and config maps available in multiple namespaces. 7 | 8 | ## Contents 9 | 10 | 1. [Deployment](#deployment) 11 | 1. [Using Helm](#using-helm) 12 | 1. [Manual](#manual) 13 | 1. [Usage](#usage) 14 | 1. ["Role and RoleBinding replication](#role-and-rolebinding-replication) 15 | 1. ["Push-based" replication](#push-based-replication) 16 | 1. ["Pull-based" replication](#pull-based-replication) 17 | 1. [1. Create the source secret](#step-1-create-the-source-secret) 18 | 1. [2. Create empty secret](#step-2-create-an-empty-destination-secret) 19 | 1. [Special case: TLS secrets](#special-case-tls-secrets) 20 | 21 | ## Deployment 22 | 23 | ### Using Helm 24 | 25 | 1. Add the Mittwald Helm Repo: 26 | ```shellsession 27 | $ helm repo add mittwald https://helm.mittwald.de 28 | "mittwald" has been added to your repositories 29 | 30 | $ helm repo update 31 | Hang tight while we grab the latest from your chart repositories... 32 | ...Successfully got an update from the "mittwald" chart repository 33 | Update Complete. ⎈ Happy Helming!⎈ 34 | ``` 35 | 36 | 2. Upgrade or install `kubernetes-replicator` 37 | `helm upgrade --install kubernetes-replicator mittwald/kubernetes-replicator` 38 | 39 | ### Manual 40 | 41 | ```shellsession 42 | $ # Create roles and service accounts 43 | $ kubectl apply -f https://raw.githubusercontent.com/mittwald/kubernetes-replicator/master/deploy/rbac.yaml 44 | $ # Create actual deployment 45 | $ kubectl apply -f https://raw.githubusercontent.com/mittwald/kubernetes-replicator/master/deploy/deployment.yaml 46 | ``` 47 | 48 | ## Usage 49 | 50 | ### Role and RoleBinding replication 51 | 52 | To create a new role, your own account needs to have at least the same set of privileges as the role you're trying to create. The chart currently offers two options to grant these permissions to the service account used by the replicator: 53 | 54 | - Set the value `grantClusterAdmin`to `true`, which grants the service account admin privileges. This is set to `false` by default, as having a service account with that level of access might be undesirable due to the potential security risks attached. 55 | 56 | - Set the lists of needed api groups and resources explicitly. These can be specified using the value `privileges`. `privileges` is a list that contains pairs of api group and resource lists. 57 | 58 | Example: 59 | 60 | ```yaml 61 | serviceAccount: 62 | create: true 63 | annotations: {} 64 | name: 65 | privileges: 66 | - apiGroups: [ "", "apps", "extensions" ] 67 | resources: ["secrets", "configmaps", "roles", "rolebindings", 68 | "cronjobs", "deployments", "events", "ingresses", "jobs", "pods", "pods/attach", "pods/exec", "pods/log", "pods/portforward", "services"] 69 | - apiGroups: [ "batch" ] 70 | resources: ["configmaps", "cronjobs", "deployments", "events", "ingresses", "jobs", "pods", "pods/attach", "pods/exec", "pods/log", "pods/portforward", "services"] 71 | ``` 72 | 73 | These settings permit the replication of Roles and RoleBindings with privileges for the api groups `""`. `apps`, `batch` and `extensions` on the resources specified. 74 | 75 | ### "Push-based" replication 76 | 77 | Push-based replication will "push out" the secrets, configmaps, roles and rolebindings into namespaces when new namespaces are created or when the secret/configmap/roles/rolebindings changes. 78 | 79 | There are two general methods for push-based replication: 80 | 81 | - name-based; this allows you to either specify your target namespaces _by name_ or by regular expression (which should match the namespace name). To use name-based push replication, add a `replicator.v1.mittwald.de/replicate-to` annotation to your secret, role(binding) or configmap. The value of this annotation should contain a comma separated list of permitted namespaces or regular expressions. (Example: `namespace-1,my-ns-2,app-ns-[0-9]*` will replicate only into the namespaces `namespace-1` and `my-ns-2` as well as any namespace that matches the regular expression `app-ns-[0-9]*`). 82 | 83 | Example: 84 | 85 | ```yaml 86 | apiVersion: v1 87 | kind: Secret 88 | metadata: 89 | name: test-secret 90 | annotations: 91 | replicator.v1.mittwald.de/replicate-to: "my-ns-1,namespace-[0-9]*" 92 | data: 93 | key1: 94 | ``` 95 | 96 | - label-based; this allows you to specify a label selector that a namespace should match in order for a secret, role(binding) or configmap to be replicated. To use label-based push replication, add a `replicator.v1.mittwald.de/replicate-to-matching` annotation to the object you want to replicate. The value of this annotation should contain an arbitrary [label selector](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors). 97 | 98 | Example: 99 | 100 | ```yaml 101 | apiVersion: v1 102 | kind: Secret 103 | metadata: 104 | name: test-secret 105 | annotations: 106 | replicator.v1.mittwald.de/replicate-to-matching: > 107 | my-label=value,my-other-label,my-other-label notin (foo,bar) 108 | data: 109 | key1: 110 | ``` 111 | 112 | When the labels of a namespace are changed, any resources that were replicated by labels into the namespace and no longer qualify for replication under the new set of labels will be deleted. Afterwards any resources that now match the updated labels will be replicated into the namespace. 113 | 114 | It is possible to use both methods of push-based replication together in a single resource, by specifying both annotations. 115 | 116 | ### "Pull-based" replication 117 | 118 | Pull-based replication makes it possible to create a secret/configmap/role/rolebindings and select a "source" resource 119 | from which the data is replicated from. 120 | 121 | #### Step 1: Create the source secret 122 | 123 | If a secret or configMap needs to be replicated to other namespaces, annotations should be added in that object 124 | permitting replication. 125 | 126 | - Add `replicator.v1.mittwald.de/replication-allowed` annotation with value `true` indicating that the object can be 127 | replicated. 128 | - Add `replicator.v1.mittwald.de/replication-allowed-namespaces` annotation. Value of this annotation should contain 129 | a comma separated list of permitted namespaces or regular expressions. For example `namespace-1,my-ns-2,app-ns-[0-9]*`: 130 | in this case replication will be performed only into the namespaces `namespace-1` and `my-ns-2` as well as any 131 | namespace that matches the regular expression `app-ns-[0-9]*`. 132 | 133 | ```yaml 134 | apiVersion: v1 135 | kind: Secret 136 | metadata: 137 | name: test-secret 138 | annotations: 139 | replicator.v1.mittwald.de/replication-allowed: "true" 140 | replicator.v1.mittwald.de/replication-allowed-namespaces: "my-ns-1,namespace-[0-9]*" 141 | data: 142 | key1: 143 | ``` 144 | 145 | #### Step 2: Create an empty destination secret 146 | 147 | Add the annotation `replicator.v1.mittwald.de/replicate-from` to any Kubernetes secret or config map object. The value 148 | of that annotation should contain the the name of another secret or config map (using `/` notation). 149 | 150 | ```yaml 151 | apiVersion: v1 152 | kind: Secret 153 | metadata: 154 | name: secret-replica 155 | annotations: 156 | replicator.v1.mittwald.de/replicate-from: default/some-secret 157 | data: {} 158 | ``` 159 | 160 | The replicator will then copy the `data` attribute of the referenced object into the annotated object and keep them in 161 | sync. 162 | 163 | By default, the replicator adds an annotation `replicator.v1.mittwald.de/replicated-from-version` to the target object. 164 | This annotation contains the resource-version of the source object at the time of replication. 165 | 166 | ##### Sync by Content 167 | 168 | When the target object is re-applied with an empty `data` attribute, the replicator will not automatically perform replication. 169 | The reason is that the target already has the `replicated-from-version` annotation with a matching source resource-version. 170 | For Secrets and ConfigMaps, there is the option to synchronize _based on the content_, ignoring the `replicated-from-version` annotation. 171 | 172 | To activate this mode, start the replicator with the `--sync-by-content` flag. 173 | 174 | #### Special case: TLS secrets 175 | 176 | Secrets of type `kubernetes.io/tls` are treated in a special way and need to have a `data["tls.crt"]` and a 177 | `data["tls.key"]` property to begin with. In the replicated secrets, these properties need to be present to begin with, 178 | but they may be empty: 179 | 180 | ```yaml 181 | apiVersion: v1 182 | kind: Secret 183 | metadata: 184 | name: tls-secret-replica 185 | annotations: 186 | replicator.v1.mittwald.de/replicate-from: default/some-tls-secret 187 | type: kubernetes.io/tls 188 | data: 189 | tls.key: "" 190 | tls.crt: "" 191 | ``` 192 | 193 | #### Special case: Docker registry credentials 194 | 195 | Secrets of type `kubernetes.io/dockerconfigjson` also require special treatment. These secrets require to have a 196 | `.dockerconfigjson` key that needs to require valid JSON. For this reason, a replicated secret of this type should be 197 | created as follows: 198 | 199 | ```yaml 200 | apiVersion: v1 201 | kind: Secret 202 | metadata: 203 | name: docker-secret-replica 204 | annotations: 205 | replicator.v1.mittwald.de/replicate-from: default/some-docker-secret 206 | type: kubernetes.io/dockerconfigjson 207 | data: 208 | .dockerconfigjson: e30K 209 | ``` 210 | 211 | #### Special case: Strip labels while replicate the resources. 212 | 213 | Operators like [https://github.com/strimzi/strimzi-kafka-operator](strimzi-kafka-operator) implement an own garbage collection based on specific labels defined on resources. If mittwald replicator replicate secrets to different namespace, the strimzi-kafka-operator will remove the replicated secrets because from operators point of view the secret is a left-over. To mitigate the issue, set the annotation `replicator.v1.mittwald.de/strip-labels=true` to remove all labels on the replicated resource. 214 | 215 | ```yaml 216 | apiVersion: v1 217 | kind: Secret 218 | metadata: 219 | labels: 220 | app.kubernetes.io/managed-by: "strimzi-kafka-operator" 221 | name: cluster-ca-certs 222 | annotations: 223 | replicator.v1.mittwald.de/strip-labels: "true" 224 | type: kubernetes.io/tls 225 | data: 226 | tls.key: "" 227 | tls.crt: "" 228 | ``` 229 | 230 | #### Special case: Resource with .metadata.ownerReferences 231 | 232 | Sometimes, secrets are generated by external components. Such secrets are configured with an ownerReference. By default, the kubernetes-replicator will delete the 233 | ownerReference in the target namespace. 234 | 235 | ownerReference won't work [across different namespaces](https://kubernetes.io/docs/concepts/workloads/controllers/garbage-collection/#owners-and-dependents) and the secret at the destination will be removed by the kubernetes garbage collection. 236 | 237 | To keep `ownerReferences` at the destination, set the annotation `replicator.v1.mittwald.de/keep-owner-references=true` 238 | 239 | ```yaml 240 | apiVersion: v1 241 | kind: Secret 242 | metadata: 243 | name: docker-secret-replica 244 | annotations: 245 | replicator.v1.mittwald.de/keep-owner-references: "true" 246 | ownerReferences: 247 | - apiVersion: v1 248 | kind: Deployment 249 | name: owner 250 | uid: "1234" 251 | type: kubernetes.io/tls 252 | data: 253 | tls.key: "" 254 | tls.crt: "" 255 | ``` 256 | 257 | See also: https://github.com/mittwald/kubernetes-replicator/issues/120 258 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. -------------------------------------------------------------------------------- /replicate/secret/secrets.go: -------------------------------------------------------------------------------- 1 | package secret 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "encoding/json" 7 | "fmt" 8 | "sort" 9 | "strings" 10 | "time" 11 | 12 | "github.com/mittwald/kubernetes-replicator/replicate/common" 13 | "github.com/pkg/errors" 14 | log "github.com/sirupsen/logrus" 15 | "k8s.io/apimachinery/pkg/types" 16 | 17 | v1 "k8s.io/api/core/v1" 18 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 19 | "k8s.io/apimachinery/pkg/runtime" 20 | "k8s.io/apimachinery/pkg/watch" 21 | "k8s.io/client-go/kubernetes" 22 | ) 23 | 24 | type Replicator struct { 25 | *common.GenericReplicator 26 | } 27 | 28 | // NewReplicator creates a new secret replicator 29 | func NewReplicator(client kubernetes.Interface, resyncPeriod time.Duration, allowAll, syncByContent bool, namespaceFilter *common.NamespaceFilter) common.Replicator { 30 | repl := Replicator{ 31 | GenericReplicator: common.NewGenericReplicator(common.ReplicatorConfig{ 32 | Kind: "Secret", 33 | ObjType: &v1.Secret{}, 34 | AllowAll: allowAll, 35 | SyncByContent: syncByContent, 36 | ResyncPeriod: resyncPeriod, 37 | Client: client, 38 | ListFunc: func(lo metav1.ListOptions) (runtime.Object, error) { 39 | return client.CoreV1().Secrets("").List(context.TODO(), lo) 40 | }, 41 | WatchFunc: func(lo metav1.ListOptions) (watch.Interface, error) { 42 | return client.CoreV1().Secrets("").Watch(context.TODO(), lo) 43 | }, 44 | NamespaceFilter: namespaceFilter, 45 | }), 46 | } 47 | repl.UpdateFuncs = common.UpdateFuncs{ 48 | ReplicateDataFrom: repl.ReplicateDataFrom, 49 | ReplicateObjectTo: repl.ReplicateObjectTo, 50 | PatchDeleteDependent: repl.PatchDeleteDependent, 51 | DeleteReplicatedResource: repl.DeleteReplicatedResource, 52 | } 53 | 54 | return &repl 55 | } 56 | 57 | // ReplicateDataFrom takes a source object and copies over data to target object 58 | func (r *Replicator) ReplicateDataFrom(sourceObj interface{}, targetObj interface{}) error { 59 | source := sourceObj.(*v1.Secret) 60 | target := targetObj.(*v1.Secret) 61 | 62 | // make sure replication is allowed 63 | logger := log. 64 | WithField("kind", r.Kind). 65 | WithField("source", common.MustGetKey(source)). 66 | WithField("target", common.MustGetKey(target)) 67 | 68 | if ok, err := r.IsReplicationPermitted(&target.ObjectMeta, &source.ObjectMeta); !ok { 69 | return errors.Wrapf(err, "replication of target %s is not permitted", common.MustGetKey(source)) 70 | } 71 | 72 | targetVersion, ok := target.Annotations[common.ReplicatedFromVersionAnnotation] 73 | sourceVersion := source.ResourceVersion 74 | 75 | if ok && targetVersion == sourceVersion && !r.SyncByContent { 76 | logger.Debugf("target %s is already up-to-date", common.MustGetKey(target)) 77 | return nil 78 | } 79 | 80 | targetCopy := target.DeepCopy() 81 | if targetCopy.Data == nil { 82 | targetCopy.Data = make(map[string][]byte) 83 | } 84 | 85 | prevKeys, hasPrevKeys := common.PreviouslyPresentKeys(&targetCopy.ObjectMeta) 86 | replicatedKeys := make([]string, 0) 87 | 88 | dataChanged := false 89 | for key, value := range source.Data { 90 | newValue := make([]byte, len(value)) 91 | copy(newValue, value) 92 | oldValue, ok := targetCopy.Data[key] 93 | if ok { 94 | if bytes.Compare(newValue, oldValue) != 0 { 95 | dataChanged = true 96 | } 97 | } else { 98 | dataChanged = true 99 | } 100 | targetCopy.Data[key] = newValue 101 | 102 | replicatedKeys = append(replicatedKeys, key) 103 | delete(prevKeys, key) 104 | } 105 | 106 | if hasPrevKeys { 107 | for k := range prevKeys { 108 | logger.Debugf("removing previously present key %s: not present in source any more", k) 109 | delete(targetCopy.Data, k) 110 | dataChanged = true 111 | } 112 | } 113 | 114 | if !dataChanged { 115 | logger.Debugf("target values of %s are already up-to-date", common.MustGetKey(target)) 116 | return nil 117 | } 118 | 119 | sort.Strings(replicatedKeys) 120 | 121 | logger.Infof("updating target %s", common.MustGetKey(target)) 122 | 123 | targetCopy.Annotations[common.ReplicatedAtAnnotation] = time.Now().Format(time.RFC3339) 124 | targetCopy.Annotations[common.ReplicatedFromVersionAnnotation] = source.ResourceVersion 125 | targetCopy.Annotations[common.ReplicatedKeysAnnotation] = strings.Join(replicatedKeys, ",") 126 | 127 | s, err := r.Client.CoreV1().Secrets(target.Namespace).Update(context.TODO(), targetCopy, metav1.UpdateOptions{}) 128 | if err != nil { 129 | err = errors.Wrapf(err, "Failed updating target %s/%s", target.Namespace, targetCopy.Name) 130 | } else if err = r.Store.Update(s); err != nil { 131 | err = errors.Wrapf(err, "Failed to update cache for %s/%s: %v", target.Namespace, targetCopy, err) 132 | } 133 | return err 134 | } 135 | 136 | // ReplicateObjectTo copies the whole object to target namespace 137 | func (r *Replicator) ReplicateObjectTo(sourceObj interface{}, target *v1.Namespace) error { 138 | source := sourceObj.(*v1.Secret) 139 | targetLocation := fmt.Sprintf("%s/%s", target.Name, source.Name) 140 | 141 | logger := log. 142 | WithField("kind", r.Kind). 143 | WithField("source", common.MustGetKey(source)). 144 | WithField("target", targetLocation) 145 | 146 | targetResourceType := source.Type 147 | targetResource, exists, err := r.Store.GetByKey(targetLocation) 148 | if err != nil { 149 | return errors.Wrapf(err, "Could not get %s from cache!", targetLocation) 150 | } 151 | logger.Infof("Checking if %s exists? %v", targetLocation, exists) 152 | 153 | var resourceCopy *v1.Secret 154 | if exists { 155 | targetObject := targetResource.(*v1.Secret) 156 | targetVersion, ok := targetObject.Annotations[common.ReplicatedFromVersionAnnotation] 157 | sourceVersion := source.ResourceVersion 158 | 159 | if ok && targetVersion == sourceVersion { 160 | logger.Debugf("Secret %s is already up-to-date", common.MustGetKey(targetObject)) 161 | return nil 162 | } 163 | 164 | targetResourceType = targetObject.Type 165 | resourceCopy = targetObject.DeepCopy() 166 | } else { 167 | resourceCopy = new(v1.Secret) 168 | } 169 | 170 | keepOwnerReferences, ok := source.Annotations[common.KeepOwnerReferences] 171 | if ok && keepOwnerReferences == "true" { 172 | resourceCopy.OwnerReferences = source.OwnerReferences 173 | } 174 | 175 | if resourceCopy.Data == nil { 176 | resourceCopy.Data = make(map[string][]byte) 177 | } 178 | if resourceCopy.Annotations == nil { 179 | resourceCopy.Annotations = make(map[string]string) 180 | } 181 | 182 | replicatedKeys := r.extractReplicatedKeys(source, targetLocation, resourceCopy) 183 | 184 | sort.Strings(replicatedKeys) 185 | 186 | labelsCopy := make(map[string]string) 187 | 188 | stripLabels, ok := source.Annotations[common.StripLabels] 189 | if !ok && stripLabels != "true" { 190 | if source.Labels != nil { 191 | for key, value := range source.Labels { 192 | labelsCopy[key] = value 193 | } 194 | } 195 | } 196 | 197 | resourceCopy.Name = source.Name 198 | resourceCopy.Labels = labelsCopy 199 | resourceCopy.Type = targetResourceType 200 | resourceCopy.Annotations[common.ReplicatedAtAnnotation] = time.Now().Format(time.RFC3339) 201 | resourceCopy.Annotations[common.ReplicatedFromVersionAnnotation] = source.ResourceVersion 202 | resourceCopy.Annotations[common.ReplicatedKeysAnnotation] = strings.Join(replicatedKeys, ",") 203 | 204 | var obj interface{} 205 | if exists { 206 | logger.Debugf("Updating existing secret %s/%s", target.Name, resourceCopy.Name) 207 | obj, err = r.Client.CoreV1().Secrets(target.Name).Update(context.TODO(), resourceCopy, metav1.UpdateOptions{}) 208 | } else { 209 | logger.Debugf("Creating a new secret secret %s/%s", target.Name, resourceCopy.Name) 210 | obj, err = r.Client.CoreV1().Secrets(target.Name).Create(context.TODO(), resourceCopy, metav1.CreateOptions{}) 211 | } 212 | if err != nil { 213 | err = errors.Wrapf(err, "Failed to update secret %s/%s", target.Name, resourceCopy.Name) 214 | } else if err = r.Store.Update(obj); err != nil { 215 | err = errors.Wrapf(err, "Failed to update cache for %s/%s", target.Name, resourceCopy) 216 | } 217 | 218 | return err 219 | } 220 | 221 | func (r *Replicator) extractReplicatedKeys(source *v1.Secret, targetLocation string, resourceCopy *v1.Secret) []string { 222 | logger := log. 223 | WithField("kind", r.Kind). 224 | WithField("source", common.MustGetKey(source)). 225 | WithField("target", targetLocation) 226 | 227 | prevKeys, hasPrevKeys := common.PreviouslyPresentKeys(&resourceCopy.ObjectMeta) 228 | replicatedKeys := make([]string, 0) 229 | 230 | for key, value := range source.Data { 231 | newValue := make([]byte, len(value)) 232 | copy(newValue, value) 233 | resourceCopy.Data[key] = newValue 234 | 235 | replicatedKeys = append(replicatedKeys, key) 236 | delete(prevKeys, key) 237 | } 238 | 239 | if hasPrevKeys { 240 | for k := range prevKeys { 241 | logger.Debugf("removing previously present key %s: not present in source secret any more", k) 242 | delete(resourceCopy.Data, k) 243 | } 244 | } 245 | return replicatedKeys 246 | } 247 | 248 | func (r *Replicator) PatchDeleteDependent(sourceKey string, target interface{}) (interface{}, error) { 249 | dependentKey := common.MustGetKey(target) 250 | logger := log.WithFields(log.Fields{ 251 | "kind": r.Kind, 252 | "source": sourceKey, 253 | "target": dependentKey, 254 | }) 255 | 256 | targetObject, ok := target.(*v1.Secret) 257 | if !ok { 258 | err := errors.Errorf("bad type returned from Store: %T", target) 259 | return nil, err 260 | } 261 | 262 | patch := []common.JSONPatchOperation{{Operation: "remove", Path: "/data"}} 263 | patchBody, err := json.Marshal(&patch) 264 | 265 | if err != nil { 266 | return nil, errors.Wrapf(err, "error while building patch body for secret %s: %v", dependentKey, err) 267 | } 268 | 269 | logger.Debugf("clearing dependent %s %s", r.Kind, dependentKey) 270 | logger.Tracef("patch body: %s", string(patchBody)) 271 | 272 | s, err := r.Client.CoreV1().Secrets(targetObject.Namespace).Patch(context.TODO(), targetObject.Name, types.JSONPatchType, patchBody, metav1.PatchOptions{}) 273 | if err != nil { 274 | return nil, errors.Wrapf(err, "error while patching secret %s: %v", dependentKey, err) 275 | } 276 | return s, nil 277 | } 278 | 279 | // DeleteReplicatedResource deletes a resource replicated by ReplicateTo annotation 280 | func (r *Replicator) DeleteReplicatedResource(targetResource interface{}) error { 281 | targetLocation := common.MustGetKey(targetResource) 282 | logger := log.WithFields(log.Fields{ 283 | "kind": r.Kind, 284 | "target": targetLocation, 285 | }) 286 | 287 | object := targetResource.(*v1.Secret) 288 | resourceKeys := strings.Join(common.GetKeysFromBinaryMap(object.Data), ",") 289 | if resourceKeys == object.Annotations[common.ReplicatedKeysAnnotation] { 290 | logger.Debugf("Deleting %s", targetLocation) 291 | if err := r.Client.CoreV1().Secrets(object.Namespace).Delete(context.TODO(), object.Name, metav1.DeleteOptions{}); err != nil { 292 | return errors.Wrapf(err, "Failed deleting %s: %v", targetLocation, err) 293 | } 294 | } else { 295 | var patch []common.JSONPatchOperation 296 | exists := make(map[string]struct{}) 297 | for _, value := range common.GetKeysFromBinaryMap(object.Data) { 298 | exists[value] = struct{}{} 299 | } 300 | for _, val := range strings.Split(object.Annotations[common.ReplicatedKeysAnnotation], ",") { 301 | if _, ok := exists[val]; ok { 302 | patch = append(patch, common.JSONPatchOperation{Operation: "remove", Path: fmt.Sprintf("/data/%s", val)}) 303 | } 304 | } 305 | patch = append(patch, common.JSONPatchOperation{Operation: "remove", Path: fmt.Sprintf("/metadata/annotations/%s", common.JSONPatchPathEscape(common.ReplicatedKeysAnnotation))}) 306 | 307 | patchBody, err := json.Marshal(&patch) 308 | if err != nil { 309 | return errors.Wrapf(err, "error while building patch body for confimap %s: %v", object, err) 310 | } 311 | 312 | s, err := r.Client.CoreV1().Secrets(object.Namespace).Patch(context.TODO(), object.Name, types.JSONPatchType, patchBody, metav1.PatchOptions{}) 313 | if err != nil { 314 | return errors.Wrapf(err, "error while patching secret %s: %v", s, err) 315 | 316 | } 317 | 318 | logger.Debugf("Not deleting %s since it contains other keys then replicated.", targetLocation) 319 | } 320 | 321 | return nil 322 | } 323 | -------------------------------------------------------------------------------- /replicate/configmap/configmaps.go: -------------------------------------------------------------------------------- 1 | package configmap 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "encoding/json" 7 | "fmt" 8 | "sort" 9 | "strings" 10 | "time" 11 | 12 | "github.com/mittwald/kubernetes-replicator/replicate/common" 13 | "github.com/pkg/errors" 14 | log "github.com/sirupsen/logrus" 15 | 16 | v1 "k8s.io/api/core/v1" 17 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 18 | "k8s.io/apimachinery/pkg/runtime" 19 | "k8s.io/apimachinery/pkg/types" 20 | "k8s.io/apimachinery/pkg/watch" 21 | "k8s.io/client-go/kubernetes" 22 | ) 23 | 24 | type Replicator struct { 25 | *common.GenericReplicator 26 | } 27 | 28 | // NewReplicator creates a new config map replicator 29 | func NewReplicator(client kubernetes.Interface, resyncPeriod time.Duration, allowAll, syncByContent bool, namespaceFilter *common.NamespaceFilter) common.Replicator { 30 | repl := Replicator{ 31 | GenericReplicator: common.NewGenericReplicator(common.ReplicatorConfig{ 32 | Kind: "ConfigMap", 33 | ObjType: &v1.ConfigMap{}, 34 | AllowAll: allowAll, 35 | SyncByContent: syncByContent, 36 | ResyncPeriod: resyncPeriod, 37 | Client: client, 38 | ListFunc: func(lo metav1.ListOptions) (runtime.Object, error) { 39 | return client.CoreV1().ConfigMaps("").List(context.TODO(), lo) 40 | }, 41 | WatchFunc: func(lo metav1.ListOptions) (watch.Interface, error) { 42 | return client.CoreV1().ConfigMaps("").Watch(context.TODO(), lo) 43 | }, 44 | NamespaceFilter: namespaceFilter, 45 | }), 46 | } 47 | repl.UpdateFuncs = common.UpdateFuncs{ 48 | ReplicateDataFrom: repl.ReplicateDataFrom, 49 | ReplicateObjectTo: repl.ReplicateObjectTo, 50 | PatchDeleteDependent: repl.PatchDeleteDependent, 51 | DeleteReplicatedResource: repl.DeleteReplicatedResource, 52 | } 53 | 54 | return &repl 55 | } 56 | 57 | // ReplicateDataFrom takes a source object and copies over data to target object 58 | func (r *Replicator) ReplicateDataFrom(sourceObj interface{}, targetObj interface{}) error { 59 | source := sourceObj.(*v1.ConfigMap) 60 | target := targetObj.(*v1.ConfigMap) 61 | 62 | // make sure replication is allowed 63 | logger := log. 64 | WithField("kind", r.Kind). 65 | WithField("source", common.MustGetKey(source)). 66 | WithField("target", common.MustGetKey(target)) 67 | 68 | targetVersion, ok := target.Annotations[common.ReplicatedFromVersionAnnotation] 69 | sourceVersion := source.ResourceVersion 70 | 71 | if ok && targetVersion == sourceVersion && !r.SyncByContent { 72 | logger.Debugf("target %s is already up-to-date", common.MustGetKey(target)) 73 | return nil 74 | } 75 | 76 | targetCopy := target.DeepCopy() 77 | if targetCopy.Data == nil { 78 | targetCopy.Data = make(map[string]string) 79 | } 80 | 81 | prevKeys, hasPrevKeys := common.PreviouslyPresentKeys(&targetCopy.ObjectMeta) 82 | replicatedKeys := make([]string, 0) 83 | 84 | dataChanged := false 85 | for key, value := range source.Data { 86 | oldValue, ok := targetCopy.Data[key] 87 | if ok { 88 | if strings.Compare(value, oldValue) != 0 { 89 | dataChanged = true 90 | } 91 | } else { 92 | dataChanged = true 93 | } 94 | targetCopy.Data[key] = value 95 | 96 | replicatedKeys = append(replicatedKeys, key) 97 | delete(prevKeys, key) 98 | } 99 | 100 | if source.BinaryData != nil { 101 | if targetCopy.BinaryData == nil { 102 | targetCopy.BinaryData = make(map[string][]byte) 103 | } 104 | for key, value := range source.BinaryData { 105 | newValue := make([]byte, len(value)) 106 | copy(newValue, value) 107 | oldValue, ok := targetCopy.BinaryData[key] 108 | if ok { 109 | if bytes.Compare(newValue, oldValue) != 0 { 110 | dataChanged = true 111 | } 112 | } else { 113 | dataChanged = true 114 | } 115 | targetCopy.BinaryData[key] = newValue 116 | 117 | replicatedKeys = append(replicatedKeys, key) 118 | delete(prevKeys, key) 119 | } 120 | } 121 | 122 | if hasPrevKeys { 123 | for k := range prevKeys { 124 | logger.Debugf("removing previously present key %s: not present in source any more", k) 125 | delete(targetCopy.Data, k) 126 | delete(targetCopy.BinaryData, k) 127 | dataChanged = true 128 | } 129 | } 130 | 131 | if !dataChanged { 132 | logger.Debugf("target values of %s are already up-to-date", common.MustGetKey(target)) 133 | return nil 134 | } 135 | 136 | sort.Strings(replicatedKeys) 137 | 138 | logger.Infof("updating config map %s/%s", target.Namespace, target.Name) 139 | 140 | targetCopy.Annotations[common.ReplicatedAtAnnotation] = time.Now().Format(time.RFC3339) 141 | targetCopy.Annotations[common.ReplicatedFromVersionAnnotation] = source.ResourceVersion 142 | targetCopy.Annotations[common.ReplicatedKeysAnnotation] = strings.Join(replicatedKeys, ",") 143 | 144 | s, err := r.Client.CoreV1().ConfigMaps(target.Namespace).Update(context.TODO(), targetCopy, metav1.UpdateOptions{}) 145 | if err != nil { 146 | err = errors.Wrapf(err, "Failed updating target %s/%s", target.Namespace, targetCopy.Name) 147 | } else if err = r.Store.Update(s); err != nil { 148 | err = errors.Wrapf(err, "Failed to update cache for %s/%s: %v", target.Namespace, targetCopy, err) 149 | } 150 | 151 | return err 152 | } 153 | 154 | // ReplicateObjectTo copies the whole object to target namespace 155 | func (r *Replicator) ReplicateObjectTo(sourceObj interface{}, target *v1.Namespace) error { 156 | source := sourceObj.(*v1.ConfigMap) 157 | targetLocation := fmt.Sprintf("%s/%s", target.Name, source.Name) 158 | 159 | logger := log. 160 | WithField("kind", r.Kind). 161 | WithField("source", common.MustGetKey(source)). 162 | WithField("target", targetLocation) 163 | 164 | targetResource, exists, err := r.Store.GetByKey(targetLocation) 165 | if err != nil { 166 | return errors.Wrapf(err, "Could not get %s from cache!", targetLocation) 167 | } 168 | logger.Infof("Checking if %s exists? %v", targetLocation, exists) 169 | 170 | var resourceCopy *v1.ConfigMap 171 | if exists { 172 | targetObject := targetResource.(*v1.ConfigMap) 173 | targetVersion, ok := targetObject.Annotations[common.ReplicatedFromVersionAnnotation] 174 | sourceVersion := source.ResourceVersion 175 | 176 | if ok && targetVersion == sourceVersion { 177 | logger.Debugf("Secret %s is already up-to-date", common.MustGetKey(targetObject)) 178 | return nil 179 | } 180 | 181 | resourceCopy = targetObject.DeepCopy() 182 | } else { 183 | resourceCopy = new(v1.ConfigMap) 184 | } 185 | 186 | keepOwnerReferences, ok := source.Annotations[common.KeepOwnerReferences] 187 | if ok && keepOwnerReferences == "true" { 188 | resourceCopy.OwnerReferences = source.OwnerReferences 189 | } 190 | 191 | if resourceCopy.Data == nil { 192 | resourceCopy.Data = make(map[string]string) 193 | } 194 | if resourceCopy.BinaryData == nil { 195 | resourceCopy.BinaryData = make(map[string][]byte) 196 | } 197 | if resourceCopy.Annotations == nil { 198 | resourceCopy.Annotations = make(map[string]string) 199 | } 200 | 201 | prevKeys, hasPrevKeys := common.PreviouslyPresentKeys(&resourceCopy.ObjectMeta) 202 | replicatedKeys := make([]string, 0) 203 | 204 | for key, value := range source.Data { 205 | resourceCopy.Data[key] = value 206 | 207 | replicatedKeys = append(replicatedKeys, key) 208 | delete(prevKeys, key) 209 | } 210 | for key, value := range source.BinaryData { 211 | newValue := make([]byte, len(value)) 212 | copy(newValue, value) 213 | resourceCopy.BinaryData[key] = newValue 214 | 215 | replicatedKeys = append(replicatedKeys, key) 216 | delete(prevKeys, key) 217 | } 218 | 219 | if hasPrevKeys { 220 | for k := range prevKeys { 221 | logger.Debugf("removing previously present key %s: not present in source secret any more", k) 222 | delete(resourceCopy.Data, k) 223 | } 224 | } 225 | 226 | labelsCopy := make(map[string]string) 227 | 228 | stripLabels, ok := source.Annotations[common.StripLabels] 229 | if !ok && stripLabels != "true" { 230 | if source.Labels != nil { 231 | for key, value := range source.Labels { 232 | labelsCopy[key] = value 233 | } 234 | } 235 | } 236 | 237 | sort.Strings(replicatedKeys) 238 | resourceCopy.Name = source.Name 239 | resourceCopy.Labels = labelsCopy 240 | resourceCopy.Annotations[common.ReplicatedAtAnnotation] = time.Now().Format(time.RFC3339) 241 | resourceCopy.Annotations[common.ReplicatedFromVersionAnnotation] = source.ResourceVersion 242 | resourceCopy.Annotations[common.ReplicatedKeysAnnotation] = strings.Join(replicatedKeys, ",") 243 | 244 | var obj interface{} 245 | if exists { 246 | logger.Debugf("Updating existing secret %s/%s", target.Name, resourceCopy.Name) 247 | obj, err = r.Client.CoreV1().ConfigMaps(target.Name).Update(context.TODO(), resourceCopy, metav1.UpdateOptions{}) 248 | } else { 249 | logger.Debugf("Creating a new secret secret %s/%s", target.Name, resourceCopy.Name) 250 | obj, err = r.Client.CoreV1().ConfigMaps(target.Name).Create(context.TODO(), resourceCopy, metav1.CreateOptions{}) 251 | } 252 | if err != nil { 253 | return errors.Wrapf(err, "Failed to update secret %s/%s", target.Name, resourceCopy.Name) 254 | } 255 | 256 | if err := r.Store.Update(obj); err != nil { 257 | return errors.Wrapf(err, "Failed to update cache for %s/%s", target.Name, resourceCopy) 258 | } 259 | 260 | return nil 261 | } 262 | 263 | func (r *Replicator) PatchDeleteDependent(sourceKey string, target interface{}) (interface{}, error) { 264 | dependentKey := common.MustGetKey(target) 265 | logger := log.WithFields(log.Fields{ 266 | "kind": r.Kind, 267 | "source": sourceKey, 268 | "target": dependentKey, 269 | }) 270 | 271 | targetObject, ok := target.(*v1.ConfigMap) 272 | if !ok { 273 | err := errors.Errorf("bad type returned from Store: %T", target) 274 | return nil, err 275 | } 276 | 277 | patch := []common.JSONPatchOperation{{Operation: "remove", Path: "/data"}} 278 | patchBody, err := json.Marshal(&patch) 279 | 280 | if err != nil { 281 | return nil, errors.Wrapf(err, "error while building patch body for confimap %s: %v", dependentKey, err) 282 | 283 | } 284 | 285 | logger.Debugf("clearing dependent config map %s", dependentKey) 286 | logger.Tracef("patch body: %s", string(patchBody)) 287 | 288 | s, err := r.Client.CoreV1().ConfigMaps(targetObject.Namespace).Patch(context.TODO(), targetObject.Name, types.JSONPatchType, patchBody, metav1.PatchOptions{}) 289 | if err != nil { 290 | return nil, errors.Wrapf(err, "error while patching secret %s: %v", dependentKey, err) 291 | 292 | } 293 | 294 | return s, nil 295 | } 296 | 297 | // DeleteReplicatedResource deletes a resource replicated by ReplicateTo annotation 298 | func (r *Replicator) DeleteReplicatedResource(targetResource interface{}) error { 299 | targetLocation := common.MustGetKey(targetResource) 300 | logger := log.WithFields(log.Fields{ 301 | "kind": r.Kind, 302 | "target": targetLocation, 303 | }) 304 | 305 | object := targetResource.(*v1.ConfigMap) 306 | resourceKeys := make([]string, 0) 307 | resourceKeys = append(resourceKeys, common.GetKeysFromBinaryMap(object.BinaryData)...) 308 | resourceKeys = append(resourceKeys, common.GetKeysFromStringMap(object.Data)...) 309 | sort.Strings(resourceKeys) 310 | 311 | if strings.Join(resourceKeys, ",") == object.Annotations[common.ReplicatedKeysAnnotation] { 312 | logger.Debugf("Deleting %s", targetLocation) 313 | if err := r.Client.CoreV1().ConfigMaps(object.Namespace).Delete(context.TODO(), object.Name, metav1.DeleteOptions{}); err != nil { 314 | return errors.Wrapf(err, "Failed deleting %s: %v", targetLocation, err) 315 | } 316 | } else { 317 | var patch []common.JSONPatchOperation 318 | exists := make(map[string]struct{}) 319 | for _, value := range resourceKeys { 320 | exists[value] = struct{}{} 321 | } 322 | for _, val := range strings.Split(object.Annotations[common.ReplicatedKeysAnnotation], ",") { 323 | if _, ok := exists[val]; ok { 324 | patch = append(patch, common.JSONPatchOperation{Operation: "remove", Path: fmt.Sprintf("/data/%s", val)}) 325 | } 326 | } 327 | patch = append(patch, common.JSONPatchOperation{Operation: "remove", Path: fmt.Sprintf("/metadata/annotations/%s", common.JSONPatchPathEscape(common.ReplicatedKeysAnnotation))}) 328 | 329 | patchBody, err := json.Marshal(&patch) 330 | if err != nil { 331 | return errors.Wrapf(err, "error while building patch body for confimap %s: %v", object, err) 332 | } 333 | 334 | s, err := r.Client.CoreV1().ConfigMaps(object.Namespace).Patch(context.TODO(), object.Name, types.JSONPatchType, patchBody, metav1.PatchOptions{}) 335 | if err != nil { 336 | return errors.Wrapf(err, "error while patching secret %s: %v", s, err) 337 | 338 | } 339 | 340 | logger.Debugf("Not deleting %s since it contains other keys then replicated.", targetLocation) 341 | } 342 | 343 | return nil 344 | } 345 | -------------------------------------------------------------------------------- /replicate/role/roles_test.go: -------------------------------------------------------------------------------- 1 | package role 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "fmt" 7 | rbacv1 "k8s.io/api/rbac/v1" 8 | "k8s.io/apimachinery/pkg/types" 9 | "os" 10 | "path/filepath" 11 | "strings" 12 | "sync" 13 | "testing" 14 | "time" 15 | 16 | "github.com/mittwald/kubernetes-replicator/replicate/common" 17 | pkgerrors "github.com/pkg/errors" 18 | log "github.com/sirupsen/logrus" 19 | "github.com/stretchr/testify/require" 20 | corev1 "k8s.io/api/core/v1" 21 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 22 | "k8s.io/client-go/informers" 23 | "k8s.io/client-go/kubernetes" 24 | "k8s.io/client-go/tools/cache" 25 | "k8s.io/client-go/tools/clientcmd" 26 | ) 27 | 28 | func namespacePrefix() string { 29 | // Mon Jan 2 15:04:05 -0700 MST 2006 30 | return "test-repl-" + time.Now().Format("060102150405") + "-" 31 | } 32 | 33 | type EventHandlerFuncs struct { 34 | AddFunc func(wg *sync.WaitGroup, obj interface{}) 35 | UpdateFunc func(wg *sync.WaitGroup, oldObj, newObj interface{}) 36 | DeleteFunc func(wg *sync.WaitGroup, obj interface{}) 37 | } 38 | 39 | type PlainFormatter struct { 40 | } 41 | 42 | func (pf *PlainFormatter) Format(entry *log.Entry) ([]byte, error) { 43 | var b *bytes.Buffer 44 | if entry.Buffer != nil { 45 | b = entry.Buffer 46 | } else { 47 | b = &bytes.Buffer{} 48 | } 49 | 50 | b.WriteString(entry.Time.Format("15:04:05") + " ") 51 | b.WriteString(fmt.Sprintf("%-8s", strings.ToUpper(entry.Level.String()))) 52 | b.WriteString(entry.Message) 53 | 54 | if val, ok := entry.Data[log.ErrorKey]; ok { 55 | b.WriteByte('\n') 56 | b.WriteString(fmt.Sprint(val)) 57 | } 58 | 59 | b.WriteByte('\n') 60 | return b.Bytes(), nil 61 | } 62 | 63 | func TestRoleReplicator(t *testing.T) { 64 | 65 | log.SetLevel(log.TraceLevel) 66 | log.SetFormatter(&PlainFormatter{}) 67 | 68 | kubeconfig := os.Getenv("KUBECONFIG") 69 | //is KUBECONFIG is not specified try to use the local KUBECONFIG or the in cluster config 70 | if len(kubeconfig) == 0 { 71 | if home := homeDir(); home != "" && home != "/root" { 72 | kubeconfig = filepath.Join(home, ".kube", "config") 73 | } 74 | } 75 | 76 | config, err := clientcmd.BuildConfigFromFlags("", kubeconfig) 77 | require.NoError(t, err) 78 | 79 | prefix := namespacePrefix() 80 | client := kubernetes.NewForConfigOrDie(config) 81 | 82 | repl := NewReplicator(client, 60*time.Second, false) 83 | go repl.Run() 84 | 85 | time.Sleep(200 * time.Millisecond) 86 | 87 | ns := corev1.Namespace{ 88 | ObjectMeta: metav1.ObjectMeta{ 89 | Name: prefix + "test", 90 | }, 91 | } 92 | _, err = client.CoreV1().Namespaces().Create(context.TODO(), &ns, metav1.CreateOptions{}) 93 | require.NoError(t, err) 94 | 95 | ns2 := corev1.Namespace{ 96 | ObjectMeta: metav1.ObjectMeta{ 97 | Name: prefix + "test2", 98 | Labels: map[string]string{ 99 | "foo": "bar", 100 | }}, 101 | } 102 | _, err = client.CoreV1().Namespaces().Create(context.TODO(), &ns2, metav1.CreateOptions{}) 103 | require.NoError(t, err) 104 | 105 | defer func() { 106 | _ = client.CoreV1().Namespaces().Delete(context.TODO(), ns.Name, metav1.DeleteOptions{}) 107 | _ = client.CoreV1().Namespaces().Delete(context.TODO(), ns2.Name, metav1.DeleteOptions{}) 108 | }() 109 | 110 | roles := client.RbacV1().Roles(prefix + "test") 111 | 112 | const MaxWaitTime = 1000 * time.Millisecond 113 | t.Run("replicates from existing role", func(t *testing.T) { 114 | source := rbacv1.Role{ 115 | ObjectMeta: metav1.ObjectMeta{ 116 | Name: "source", 117 | Namespace: ns.Name, 118 | Annotations: map[string]string{ 119 | common.ReplicationAllowed: "true", 120 | common.ReplicationAllowedNamespaces: ns.Name, 121 | }, 122 | }, 123 | Rules: []rbacv1.PolicyRule{{ 124 | APIGroups: []string{""}, 125 | Resources: []string{"pods"}, 126 | Verbs: []string{"list", "get", "watch"}, 127 | }}, 128 | } 129 | 130 | target := rbacv1.Role{ 131 | ObjectMeta: metav1.ObjectMeta{ 132 | Name: "target", 133 | Namespace: ns.Name, 134 | Annotations: map[string]string{ 135 | common.ReplicateFromAnnotation: common.MustGetKey(&source), 136 | }, 137 | }, 138 | } 139 | 140 | wg, stop := waitForRoles(client, 3, EventHandlerFuncs{ 141 | AddFunc: func(wg *sync.WaitGroup, obj interface{}) { 142 | role := obj.(*rbacv1.Role) 143 | if role.Namespace == source.Namespace && role.Name == source.Name { 144 | log.Debugf("AddFunc %+v", obj) 145 | wg.Done() 146 | } else if role.Namespace == target.Namespace && role.Name == target.Name { 147 | log.Debugf("AddFunc %+v", obj) 148 | wg.Done() 149 | } 150 | }, 151 | UpdateFunc: func(wg *sync.WaitGroup, oldObj interface{}, newObj interface{}) { 152 | role := oldObj.(*rbacv1.Role) 153 | if role.Namespace == target.Namespace && role.Name == target.Name { 154 | log.Debugf("UpdateFunc %+v -> %+v", oldObj, newObj) 155 | wg.Done() 156 | } 157 | }, 158 | }) 159 | 160 | _, err := roles.Create(context.TODO(), &source, metav1.CreateOptions{}) 161 | require.NoError(t, err) 162 | 163 | _, err = roles.Create(context.TODO(), &target, metav1.CreateOptions{}) 164 | require.NoError(t, err) 165 | 166 | waitWithTimeout(wg, MaxWaitTime) 167 | close(stop) 168 | 169 | updTarget, err := roles.Get(context.TODO(), target.Name, metav1.GetOptions{}) 170 | require.NoError(t, err) 171 | require.EqualValues(t, source.Rules, updTarget.Rules) 172 | }) 173 | 174 | t.Run("replication is pushed to other namespaces", func(t *testing.T) { 175 | source := rbacv1.Role{ 176 | ObjectMeta: metav1.ObjectMeta{ 177 | Name: "source-pushed-to-other-ns", 178 | Namespace: ns.Name, 179 | Annotations: map[string]string{ 180 | common.ReplicateTo: prefix + "test2", 181 | }, 182 | }, 183 | Rules: []rbacv1.PolicyRule{{ 184 | APIGroups: []string{""}, 185 | Resources: []string{"pods"}, 186 | Verbs: []string{"list", "get", "watch"}, 187 | }}, 188 | } 189 | 190 | wg, stop := waitForRoles(client, 2, EventHandlerFuncs{ 191 | AddFunc: func(wg *sync.WaitGroup, obj interface{}) { 192 | role := obj.(*rbacv1.Role) 193 | if role.Namespace == source.Namespace && role.Name == source.Name { 194 | log.Debugf("AddFunc %+v", obj) 195 | wg.Done() 196 | } else if role.Namespace == prefix+"test2" && role.Name == source.Name { 197 | log.Debugf("AddFunc %+v", obj) 198 | wg.Done() 199 | } 200 | }, 201 | }) 202 | _, err := roles.Create(context.TODO(), &source, metav1.CreateOptions{}) 203 | require.NoError(t, err) 204 | 205 | waitWithTimeout(wg, MaxWaitTime) 206 | close(stop) 207 | 208 | roles2 := client.RbacV1().Roles(prefix + "test2") 209 | updTarget, err := roles2.Get(context.TODO(), source.Name, metav1.GetOptions{}) 210 | 211 | require.NoError(t, err) 212 | require.EqualValues(t, source.Rules, updTarget.Rules) 213 | 214 | wg, stop = waitForRoles(client, 2, EventHandlerFuncs{ 215 | UpdateFunc: func(wg *sync.WaitGroup, oldObj interface{}, newObj interface{}) { 216 | role := oldObj.(*rbacv1.Role) 217 | if role.Namespace == prefix+"test2" && role.Name == source.Name { 218 | log.Debugf("UpdateFunc %+v -> %+v", oldObj, newObj) 219 | wg.Done() 220 | } 221 | }, 222 | }) 223 | 224 | _, err = roles.Patch(context.TODO(), source.Name, types.JSONPatchType, []byte(`[{"op": "remove", "path": "/rules/0"}]`), metav1.PatchOptions{}) 225 | require.NoError(t, err) 226 | 227 | waitWithTimeout(wg, MaxWaitTime) 228 | close(stop) 229 | 230 | updTarget, err = roles2.Get(context.TODO(), source.Name, metav1.GetOptions{}) 231 | require.NoError(t, err) 232 | 233 | require.Len(t, updTarget.Rules, 0) 234 | }) 235 | 236 | t.Run("roles are replicated when new namespace is created", func(t *testing.T) { 237 | namespaceName := prefix + "test-repl-new-ns" 238 | source := rbacv1.Role{ 239 | ObjectMeta: metav1.ObjectMeta{ 240 | Name: "source6", 241 | Namespace: ns.Name, 242 | Annotations: map[string]string{ 243 | common.ReplicateTo: namespaceName, 244 | }, 245 | }, 246 | Rules: []rbacv1.PolicyRule{{ 247 | APIGroups: []string{""}, 248 | Resources: []string{"pods"}, 249 | Verbs: []string{"list", "get", "watch"}, 250 | }}, 251 | } 252 | 253 | wg, stop := waitForRoles(client, 1, EventHandlerFuncs{ 254 | AddFunc: func(wg *sync.WaitGroup, obj interface{}) { 255 | role := obj.(*rbacv1.Role) 256 | if role.Namespace == source.Namespace && role.Name == source.Name { 257 | log.Debugf("AddFunc %+v", obj) 258 | wg.Done() 259 | } 260 | }, 261 | }) 262 | 263 | _, err := roles.Create(context.TODO(), &source, metav1.CreateOptions{}) 264 | require.NoError(t, err) 265 | 266 | waitWithTimeout(wg, MaxWaitTime) 267 | close(stop) 268 | 269 | ns3 := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespaceName}} 270 | 271 | wg, stop = waitForNamespaces(client, 1, EventHandlerFuncs{ 272 | AddFunc: func(wg *sync.WaitGroup, obj interface{}) { 273 | ns := obj.(*corev1.Namespace) 274 | if ns.Name == ns3.Name { 275 | log.Debugf("AddFunc %+v", obj) 276 | wg.Done() 277 | } 278 | }, 279 | }) 280 | 281 | wg2, stop2 := waitForRoles(client, 1, EventHandlerFuncs{ 282 | AddFunc: func(wg *sync.WaitGroup, obj interface{}) { 283 | role := obj.(*rbacv1.Role) 284 | if role.Namespace == ns3.Name && role.Name == source.Name { 285 | log.Debugf("AddFunc %+v", obj) 286 | wg.Done() 287 | } 288 | }, 289 | }) 290 | 291 | _, err = client.CoreV1().Namespaces().Create(context.TODO(), &ns3, metav1.CreateOptions{}) 292 | require.NoError(t, err) 293 | 294 | defer func() { 295 | _ = client.CoreV1().Namespaces().Delete(context.TODO(), ns3.Name, metav1.DeleteOptions{}) 296 | }() 297 | 298 | waitWithTimeout(wg, MaxWaitTime) 299 | close(stop) 300 | 301 | waitWithTimeout(wg2, MaxWaitTime) 302 | close(stop2) 303 | 304 | roles3 := client.RbacV1().Roles(namespaceName) 305 | updTarget, err := roles3.Get(context.TODO(), source.Name, metav1.GetOptions{}) 306 | require.NoError(t, err) 307 | require.EqualValues(t, source.Rules, updTarget.Rules) 308 | 309 | wg, stop = waitForRoles(client, 1, EventHandlerFuncs{ 310 | UpdateFunc: func(wg *sync.WaitGroup, objOld interface{}, objNew interface{}) { 311 | role := objOld.(*rbacv1.Role) 312 | if role.Namespace == ns3.Name && role.Name == source.Name { 313 | log.Debugf("UpdateFunc %+v", objOld) 314 | wg.Done() 315 | } 316 | }, 317 | }) 318 | _, err = roles.Patch(context.TODO(), source.Name, types.JSONPatchType, []byte(`[{"op": "remove", "path": "/rules/0"}]`), metav1.PatchOptions{}) 319 | require.NoError(t, err) 320 | 321 | waitWithTimeout(wg, MaxWaitTime) 322 | close(stop) 323 | 324 | updTarget, err = roles3.Get(context.TODO(), source.Name, metav1.GetOptions{}) 325 | require.NoError(t, err) 326 | 327 | require.Len(t, updTarget.Rules, 0) 328 | }) 329 | 330 | } 331 | 332 | func waitForNamespaces(client *kubernetes.Clientset, count int, eventHandlers EventHandlerFuncs) (wg *sync.WaitGroup, stop chan struct{}) { 333 | wg = &sync.WaitGroup{} 334 | wg.Add(count) 335 | informerFactory := informers.NewSharedInformerFactory(client, 60*time.Second) 336 | informer := informerFactory.Core().V1().Namespaces().Informer() 337 | informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ 338 | AddFunc: func(obj interface{}) { 339 | if eventHandlers.AddFunc != nil { 340 | eventHandlers.AddFunc(wg, obj) 341 | } 342 | }, 343 | UpdateFunc: func(oldObj, newObj interface{}) { 344 | if eventHandlers.UpdateFunc != nil { 345 | eventHandlers.UpdateFunc(wg, oldObj, newObj) 346 | } 347 | 348 | }, 349 | DeleteFunc: func(obj interface{}) { 350 | if eventHandlers.DeleteFunc != nil { 351 | eventHandlers.DeleteFunc(wg, obj) 352 | } 353 | }, 354 | }) 355 | stop = make(chan struct{}) 356 | go informerFactory.Start(stop) 357 | 358 | return 359 | 360 | } 361 | 362 | func waitForRoles(client *kubernetes.Clientset, count int, eventHandlers EventHandlerFuncs) (wg *sync.WaitGroup, stop chan struct{}) { 363 | wg = &sync.WaitGroup{} 364 | wg.Add(count) 365 | informerFactory := informers.NewSharedInformerFactory(client, 60*time.Second) 366 | informer := informerFactory.Rbac().V1().Roles().Informer() 367 | informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ 368 | AddFunc: func(obj interface{}) { 369 | if eventHandlers.AddFunc != nil { 370 | eventHandlers.AddFunc(wg, obj) 371 | } 372 | }, 373 | UpdateFunc: func(oldObj, newObj interface{}) { 374 | if eventHandlers.UpdateFunc != nil { 375 | eventHandlers.UpdateFunc(wg, oldObj, newObj) 376 | } 377 | 378 | }, 379 | DeleteFunc: func(obj interface{}) { 380 | if eventHandlers.DeleteFunc != nil { 381 | eventHandlers.DeleteFunc(wg, obj) 382 | } 383 | }, 384 | }) 385 | stop = make(chan struct{}) 386 | go informerFactory.Start(stop) 387 | 388 | return 389 | 390 | } 391 | 392 | func waitWithTimeout(wg *sync.WaitGroup, timeout time.Duration) { 393 | done := make(chan struct{}) 394 | go func() { 395 | wg.Wait() 396 | close(done) 397 | }() 398 | 399 | select { 400 | case <-done: 401 | return 402 | case <-time.After(timeout): 403 | err := pkgerrors.Errorf("Timeout hit") 404 | log.WithError(err).Debugf("Wait timed out") 405 | } 406 | } 407 | 408 | func homeDir() string { 409 | if h := os.Getenv("HOME"); h != "" { 410 | return h 411 | } 412 | return os.Getenv("USERPROFILE") // windows 413 | } 414 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 2 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 3 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= 4 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 5 | github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= 6 | github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= 7 | github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= 8 | github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= 9 | github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= 10 | github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= 11 | github.com/go-openapi/jsonpointer v0.21.1 h1:whnzv/pNXtK2FbX/W9yJfRmE2gsmkfahjMKB0fZvcic= 12 | github.com/go-openapi/jsonpointer v0.21.1/go.mod h1:50I1STOfbY1ycR8jGz8DaMeLCdXiI6aDteEdRNNzpdk= 13 | github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= 14 | github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= 15 | github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU= 16 | github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0= 17 | github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= 18 | github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= 19 | github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= 20 | github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= 21 | github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= 22 | github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= 23 | github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= 24 | github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= 25 | github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= 26 | github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= 27 | github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= 28 | github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= 29 | github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= 30 | github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= 31 | github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= 32 | github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= 33 | github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= 34 | github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= 35 | github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= 36 | github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= 37 | github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= 38 | github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= 39 | github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= 40 | github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= 41 | github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= 42 | github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= 43 | github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= 44 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= 45 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= 46 | github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= 47 | github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= 48 | github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= 49 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= 50 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= 51 | github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= 52 | github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= 53 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= 54 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= 55 | github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= 56 | github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= 57 | github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= 58 | github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= 59 | github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= 60 | github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 61 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 62 | github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= 63 | github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 64 | github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= 65 | github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= 66 | github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= 67 | github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= 68 | github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= 69 | github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= 70 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 71 | github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= 72 | github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= 73 | github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= 74 | github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= 75 | github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= 76 | github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= 77 | github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= 78 | github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= 79 | github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= 80 | github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= 81 | go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= 82 | go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= 83 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 84 | golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= 85 | golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= 86 | golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= 87 | golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= 88 | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 89 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 90 | golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 91 | golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= 92 | golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= 93 | golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= 94 | golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc= 95 | golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= 96 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 97 | golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 98 | golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 99 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 100 | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 101 | golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 102 | golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 103 | golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= 104 | golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= 105 | golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= 106 | golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= 107 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 108 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 109 | golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= 110 | golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= 111 | golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= 112 | golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= 113 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 114 | golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= 115 | golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= 116 | golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= 117 | golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= 118 | golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= 119 | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 120 | golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 121 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 122 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 123 | google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= 124 | google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= 125 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 126 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= 127 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= 128 | gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= 129 | gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= 130 | gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= 131 | gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= 132 | gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 133 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 134 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 135 | k8s.io/api v0.33.3 h1:SRd5t//hhkI1buzxb288fy2xvjubstenEKL9K51KBI8= 136 | k8s.io/api v0.33.3/go.mod h1:01Y/iLUjNBM3TAvypct7DIj0M0NIZc+PzAHCIo0CYGE= 137 | k8s.io/apimachinery v0.33.3 h1:4ZSrmNa0c/ZpZJhAgRdcsFcZOw1PQU1bALVQ0B3I5LA= 138 | k8s.io/apimachinery v0.33.3/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= 139 | k8s.io/client-go v0.33.3 h1:M5AfDnKfYmVJif92ngN532gFqakcGi6RvaOF16efrpA= 140 | k8s.io/client-go v0.33.3/go.mod h1:luqKBQggEf3shbxHY4uVENAxrDISLOarxpTKMiUuujg= 141 | k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= 142 | k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= 143 | k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= 144 | k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= 145 | k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0= 146 | k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= 147 | sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= 148 | sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= 149 | sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= 150 | sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= 151 | sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= 152 | sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc= 153 | sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= 154 | sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= 155 | sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= 156 | -------------------------------------------------------------------------------- /replicate/common/generic-replicator.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "reflect" 7 | "regexp" 8 | "strconv" 9 | "strings" 10 | "time" 11 | 12 | "k8s.io/apimachinery/pkg/labels" 13 | 14 | "github.com/hashicorp/go-multierror" 15 | "github.com/pkg/errors" 16 | log "github.com/sirupsen/logrus" 17 | v1 "k8s.io/api/core/v1" 18 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 19 | "k8s.io/apimachinery/pkg/runtime" 20 | "k8s.io/apimachinery/pkg/util/wait" 21 | "k8s.io/client-go/kubernetes" 22 | "k8s.io/client-go/tools/cache" 23 | ) 24 | 25 | type ReplicatorConfig struct { 26 | Kind string 27 | Client kubernetes.Interface 28 | ResyncPeriod time.Duration 29 | AllowAll bool 30 | SyncByContent bool 31 | ListFunc cache.ListFunc 32 | WatchFunc cache.WatchFunc 33 | ObjType runtime.Object 34 | NamespaceFilter *NamespaceFilter 35 | } 36 | 37 | type UpdateFuncs struct { 38 | ReplicateDataFrom func(source interface{}, target interface{}) error 39 | ReplicateObjectTo func(source interface{}, target *v1.Namespace) error 40 | PatchDeleteDependent func(sourceKey string, target interface{}) (interface{}, error) 41 | DeleteReplicatedResource func(target interface{}) error 42 | } 43 | 44 | type GenericReplicator struct { 45 | ReplicatorConfig 46 | Store cache.Store 47 | Controller cache.Controller 48 | 49 | DependencyMap map[string]map[string]interface{} 50 | DependentMap map[string]string 51 | UpdateFuncs UpdateFuncs 52 | 53 | // ReplicateToList is a set that caches the names of all secrets that have a 54 | // "replicate-to" annotation. 55 | ReplicateToList GenericMap[string, struct{}] 56 | 57 | // ReplicateToMatchingList is a set that caches the names of all secrets 58 | // that have a "replicate-to-matching" annotation. 59 | ReplicateToMatchingList GenericMap[string, labels.Selector] 60 | } 61 | 62 | // NewGenericReplicator creates a new generic replicator 63 | func NewGenericReplicator(config ReplicatorConfig) *GenericReplicator { 64 | repl := GenericReplicator{ 65 | ReplicatorConfig: config, 66 | DependencyMap: make(map[string]map[string]interface{}), 67 | DependentMap: make(map[string]string), 68 | ReplicateToList: GenericMap[string, struct{}]{}, 69 | ReplicateToMatchingList: GenericMap[string, labels.Selector]{}, 70 | } 71 | 72 | store, controller := cache.NewInformer( 73 | &cache.ListWatch{ 74 | ListFunc: config.ListFunc, 75 | WatchFunc: config.WatchFunc, 76 | }, 77 | config.ObjType, 78 | config.ResyncPeriod, 79 | cache.ResourceEventHandlerFuncs{ 80 | AddFunc: repl.ResourceAdded, 81 | UpdateFunc: func(old interface{}, new interface{}) { repl.ResourceAdded(new) }, 82 | DeleteFunc: repl.ResourceDeleted, 83 | }, 84 | ) 85 | 86 | namespaceWatcher.OnNamespaceAdded(config.Client, config.ResyncPeriod, repl.NamespaceAdded) 87 | namespaceWatcher.OnNamespaceUpdated(config.Client, config.ResyncPeriod, repl.NamespaceUpdated) 88 | 89 | repl.Store = store 90 | repl.Controller = controller 91 | 92 | repl.NamespaceFilter = config.NamespaceFilter 93 | 94 | return &repl 95 | } 96 | 97 | // IsReplicationPermitted checks if replication is allowed in annotations of the source object 98 | // Returns true if replication is allowed. If replication is not allowed returns false with 99 | // error message 100 | func (r *GenericReplicator) IsReplicationPermitted(object *metav1.ObjectMeta, sourceObject *metav1.ObjectMeta) (bool, error) { 101 | if r.AllowAll { 102 | return true, nil 103 | } 104 | 105 | // make sure source object allows replication 106 | annotationAllowed, ok := sourceObject.Annotations[ReplicationAllowed] 107 | if !ok { 108 | return false, fmt.Errorf("source %s/%s does not allow replication. %s will not be replicated", 109 | sourceObject.Namespace, sourceObject.Name, object.Name) 110 | } 111 | annotationAllowedBool, err := strconv.ParseBool(annotationAllowed) 112 | 113 | // check if source object allows replication 114 | if err != nil || !annotationAllowedBool { 115 | return false, fmt.Errorf("source %s/%s does not allow replication. %s will not be replicated", 116 | sourceObject.Namespace, sourceObject.Name, object.Name) 117 | } 118 | 119 | // check if the target namespace is permitted 120 | annotationAllowedNamespaces, ok := sourceObject.Annotations[ReplicationAllowedNamespaces] 121 | if !ok { 122 | return false, fmt.Errorf( 123 | "source %s/%s does not allow replication (%s annotation missing). %s will not be replicated", 124 | sourceObject.Namespace, sourceObject.Name, ReplicationAllowedNamespaces, object.Name) 125 | } 126 | allowedNamespaces := strings.Split(annotationAllowedNamespaces, ",") 127 | allowed := false 128 | for _, ns := range allowedNamespaces { 129 | ns := BuildStrictRegex(ns) 130 | 131 | if matched, _ := regexp.MatchString(ns, object.Namespace); matched { 132 | log.Tracef("Namespace '%s' matches '%s' -- allowing replication", object.Namespace, ns) 133 | allowed = true 134 | break 135 | } 136 | } 137 | 138 | err = nil 139 | if !allowed { 140 | err = fmt.Errorf( 141 | "source %s/%s does not allow replication in namespace %s. %s will not be replicated", 142 | sourceObject.Namespace, sourceObject.Name, object.Namespace, object.Name) 143 | } 144 | return allowed, err 145 | } 146 | 147 | func (r *GenericReplicator) Synced() bool { 148 | return r.Controller.HasSynced() 149 | } 150 | 151 | func (r *GenericReplicator) Run() { 152 | log.WithField("kind", r.Kind).Infof("running %s controller", r.Kind) 153 | r.Controller.Run(wait.NeverStop) 154 | } 155 | 156 | // NamespaceAdded replicates resources with ReplicateTo and ReplicateToMatching 157 | // annotations into newly created namespaces. 158 | func (r *GenericReplicator) NamespaceAdded(ns *v1.Namespace) { 159 | logger := log.WithField("kind", r.Kind).WithField("target", ns.Name) 160 | r.ReplicateToList.Range(func(sourceKey string, _ struct{}) bool { 161 | logger := logger.WithField("resource", sourceKey) 162 | obj, exists, err := r.Store.GetByKey(sourceKey) 163 | 164 | if err != nil { 165 | log.WithError(err).Error("error fetching object from store") 166 | return true 167 | } else if !exists { 168 | log.Warn("object not found in store") 169 | return true 170 | } 171 | 172 | objectMeta := MustGetObject(obj) 173 | replicatedList := make([]string, 0) 174 | namespacePatterns, found := objectMeta.GetAnnotations()[ReplicateTo] 175 | if found { 176 | if err := r.replicateResourceToMatchingNamespaces(obj, namespacePatterns, []v1.Namespace{*ns}); err != nil { 177 | logger. 178 | WithError(err). 179 | Errorf("Failed replicating the resource to the new namespace %s: %v", ns.Name, err) 180 | } else { 181 | replicatedList = append(replicatedList, ns.Name) 182 | } 183 | 184 | } 185 | 186 | return true 187 | }) 188 | 189 | namespaceLabels := labels.Set(ns.Labels) 190 | r.ReplicateToMatchingList.Range(func(sourceKey string, selector labels.Selector) bool { 191 | logger := logger.WithField("resource", sourceKey) 192 | 193 | obj, exists, err := r.Store.GetByKey(sourceKey) 194 | if err != nil { 195 | log.WithError(err).Error("error fetching object from store") 196 | return true 197 | } else if !exists { 198 | log.Warn("object not found in store") 199 | return true 200 | } 201 | 202 | if !selector.Matches(namespaceLabels) { 203 | return true 204 | } 205 | 206 | if _, err := r.replicateResourceToNamespaces(obj, []v1.Namespace{*ns}); err != nil { 207 | logger.WithError(err).Error("error while replicating object to namespace") 208 | } 209 | return true 210 | }) 211 | } 212 | 213 | // NamespaceUpdated checks if namespace's labels changed and deletes any 'replicate-to-matching' resources 214 | // the namespace no longer qualifies for. Then it attempts to replicate resources into the updated ns based 215 | // on the updated set of labels 216 | func (r *GenericReplicator) NamespaceUpdated(nsOld *v1.Namespace, nsNew *v1.Namespace) { 217 | logger := log.WithField("kind", r.Kind).WithField("target", nsNew.Name) 218 | // check if labels changed 219 | if reflect.DeepEqual(nsNew.Labels, nsOld.Labels) { 220 | logger.Debug("labels didn't change") 221 | return 222 | } else { 223 | logger.Infof("labels of namespace %s changed, attempting to delete %ss that no longer match", nsNew.Name, r.Kind) 224 | // delete any resources where namespace labels no longer match 225 | var newLabelSet labels.Set 226 | newLabelSet = nsNew.Labels 227 | var oldLabelSet labels.Set 228 | oldLabelSet = nsOld.Labels 229 | // check 'replicate-to-matching' resources against new labels 230 | r.ReplicateToMatchingList.Range(func(sourceKey string, selector labels.Selector) bool { 231 | if selector.Matches(oldLabelSet) && !selector.Matches(newLabelSet) { 232 | obj, exists, err := r.Store.GetByKey(sourceKey) 233 | if err != nil { 234 | log.WithError(err).Error("error fetching object from store") 235 | return true 236 | } else if !exists { 237 | log.Warn("object not found in store") 238 | return true 239 | } 240 | // delete resource from the updated namespace 241 | logger.Infof("removed %s %s from %s", r.Kind, sourceKey, nsNew.Name) 242 | r.DeleteResourceInNamespaces(obj, &v1.NamespaceList{Items: []v1.Namespace{*nsNew}}) 243 | } 244 | return true 245 | }) 246 | 247 | // replicate resources to updated ns 248 | logger.Infof("labels of namespace %s changed, attempting to replicate %ss", nsNew.Name, r.Kind) 249 | r.NamespaceAdded(nsNew) 250 | } 251 | } 252 | 253 | // ResourceAdded checks resources with ReplicateTo or ReplicateFromAnnotation annotation 254 | func (r *GenericReplicator) ResourceAdded(obj interface{}) { 255 | objectMeta := MustGetObject(obj) 256 | sourceKey := MustGetKey(objectMeta) 257 | logger := log.WithField("kind", r.Kind).WithField("resource", sourceKey) 258 | 259 | ctx := context.Background() 260 | 261 | if replicas, ok := r.DependencyMap[sourceKey]; ok { 262 | logger.Debugf("objectMeta %s has %d dependents", sourceKey, len(replicas)) 263 | if err := r.updateDependents(obj, replicas); err != nil { 264 | logger.WithError(err).Error("failed to update cache") 265 | } 266 | } 267 | source, ok := r.DependentMap[sourceKey] 268 | if ok { 269 | logger.Debugf("objectMeta %s has source %s", sourceKey, source) 270 | 271 | sourceObject, exists, err := r.Store.GetByKey(source) 272 | if err != nil { 273 | logger.Debugf("could not get source %s %s: %s", r.Kind, source, err) 274 | return 275 | } else if !exists { 276 | logger.Debugf("could not get source %s %s: does not exist", r.Kind, source) 277 | return 278 | } 279 | targetMap := map[string]interface{}{MustGetKey(obj): ""} 280 | if err := r.updateDependents(sourceObject, targetMap); err != nil { 281 | logger.WithError(err). 282 | Errorf("Failed to update cache for %s: %v", MustGetKey(objectMeta), err) 283 | } 284 | } 285 | 286 | annotations := objectMeta.GetAnnotations() 287 | 288 | // Match resources with "replicate-from" annotation 289 | if source, ok := annotations[ReplicateFromAnnotation]; ok { 290 | if err := r.resourceAddedReplicateFrom(source, obj); err != nil { 291 | logger.WithError(err).Error("could not copy from source") 292 | } 293 | 294 | return 295 | } 296 | 297 | // Match resources with "replicate-to" annotation 298 | if namespacePatterns, ok := annotations[ReplicateTo]; ok { 299 | r.ReplicateToList.Store(sourceKey, struct{}{}) 300 | 301 | namespacesFromStore := namespaceWatcher.NamespaceStore.List() 302 | namespaces := make([]v1.Namespace, len(namespacesFromStore)) 303 | for i, ns := range namespacesFromStore { 304 | namespaces[i] = *ns.(*v1.Namespace) 305 | } 306 | if err := r.replicateResourceToMatchingNamespaces(obj, namespacePatterns, namespaces); err != nil { 307 | logger.WithError(err).Errorf("could not replicate object to other namespaces") 308 | } 309 | } else { 310 | r.ReplicateToList.Delete(sourceKey) 311 | } 312 | 313 | // Match resources with "replicate-to-matching" annotations 314 | if namespaceSelectorString, ok := annotations[ReplicateToMatching]; ok { 315 | namespaceSelector, err := labels.Parse(namespaceSelectorString) 316 | if err != nil { 317 | r.ReplicateToMatchingList.Delete(sourceKey) 318 | logger.WithError(err).Error("failed to parse label selector") 319 | 320 | return 321 | } 322 | 323 | r.ReplicateToMatchingList.Store(sourceKey, namespaceSelector) 324 | 325 | if err := r.replicateResourceToMatchingNamespacesByLabel(ctx, obj, namespaceSelector); err != nil { 326 | logger.WithError(err).Error("error while replicating by label selector") 327 | } 328 | } else { 329 | r.ReplicateToMatchingList.Delete(sourceKey) 330 | } 331 | } 332 | 333 | // resourceAddedReplicateFrom replicates resources with ReplicateFromAnnotation 334 | func (r *GenericReplicator) resourceAddedReplicateFrom(sourceLocation string, target interface{}) error { 335 | cacheKey := MustGetKey(target) 336 | 337 | logger := log.WithField("kind", r.Kind).WithField("source", sourceLocation).WithField("target", cacheKey) 338 | logger.Debugf("%s %s is replicated from %s", r.Kind, cacheKey, sourceLocation) 339 | v := strings.SplitN(sourceLocation, "/", 2) 340 | 341 | if len(v) < 2 { 342 | return errors.Errorf("Invalid source location expected '/', got '%s'", sourceLocation) 343 | } 344 | 345 | if _, ok := r.DependencyMap[sourceLocation]; !ok { 346 | r.DependencyMap[sourceLocation] = make(map[string]interface{}) 347 | } 348 | 349 | r.DependencyMap[sourceLocation][cacheKey] = nil 350 | 351 | if _, ok := r.DependentMap[cacheKey]; !ok { 352 | r.DependentMap[cacheKey] = sourceLocation 353 | } 354 | 355 | sourceObject, exists, err := r.Store.GetByKey(sourceLocation) 356 | if err != nil { 357 | return errors.Wrapf(err, "Could not get source %s: %v", sourceLocation, err) 358 | } else if !exists { 359 | return errors.Errorf("Could not get source %s: does not exist", sourceLocation) 360 | } 361 | 362 | if err := r.UpdateFuncs.ReplicateDataFrom(sourceObject, target); err != nil { 363 | return errors.Wrapf(err, "Failed to replicate %s target %s -> %s: %v", 364 | r.Kind, MustGetKey(sourceObject), cacheKey, err, 365 | ) 366 | } 367 | 368 | return nil 369 | } 370 | 371 | // resourceAddedReplicateFrom replicates resources with ReplicateTo annotation 372 | func (r *GenericReplicator) replicateResourceToMatchingNamespaces(obj interface{}, nsPatternList string, namespaceList []v1.Namespace) error { 373 | cacheKey := MustGetKey(obj) 374 | logger := log.WithField("kind", r.Kind).WithField("source", cacheKey) 375 | 376 | logger.Infof("%s %s to be replicated to: [%s]", r.Kind, cacheKey, nsPatternList) 377 | 378 | replicateTo := r.getNamespacesToReplicate(MustGetObject(obj).GetNamespace(), nsPatternList, namespaceList) 379 | 380 | if replicated, err := r.replicateResourceToNamespaces(obj, replicateTo); err != nil { 381 | return errors.Wrapf(err, "Replicated %s to %d out of %d namespaces", 382 | cacheKey, len(replicated), len(replicateTo), 383 | ) 384 | } 385 | 386 | return nil 387 | } 388 | 389 | func (r *GenericReplicator) replicateResourceToMatchingNamespacesByLabel(ctx context.Context, obj interface{}, selector labels.Selector) error { 390 | cacheKey := MustGetKey(obj) 391 | 392 | namespaces, err := r.Client.CoreV1().Namespaces().List(ctx, metav1.ListOptions{LabelSelector: selector.String()}) 393 | if err != nil { 394 | return errors.Wrap(err, "error while listing namespaces by selector") 395 | } 396 | 397 | if replicated, err := r.replicateResourceToNamespaces(obj, namespaces.Items); err != nil { 398 | return errors.Wrapf(err, "Replicated %s to %d out of %d namespaces", 399 | cacheKey, len(replicated), len(namespaces.Items), 400 | ) 401 | } 402 | 403 | return nil 404 | } 405 | 406 | // getNamespacesToReplicate will check the provided filters and create a list of namespace into with to replicate the 407 | // given object. 408 | func (r *GenericReplicator) getNamespacesToReplicate(myNs string, patterns string, namespaces []v1.Namespace) []v1.Namespace { 409 | 410 | replicateTo := make([]v1.Namespace, 0) 411 | for _, namespace := range namespaces { 412 | for _, ns := range StringToPatternList(patterns) { 413 | if matched := ns.MatchString(namespace.Name); matched { 414 | if namespace.Name == myNs { 415 | // Don't replicate upon itself 416 | continue 417 | } 418 | replicateTo = append(replicateTo, namespace) 419 | break 420 | 421 | } 422 | } 423 | } 424 | return replicateTo 425 | } 426 | 427 | // replicateResourceToNamespaces will replicate the given object into target namespaces. It will return a list of 428 | // Namespaces it was successful in replicating into 429 | func (r *GenericReplicator) replicateResourceToNamespaces(obj interface{}, targets []v1.Namespace) (replicatedTo []v1.Namespace, err error) { 430 | cacheKey := MustGetKey(obj) 431 | 432 | for _, namespace := range targets { 433 | if r.NamespaceFilter != nil && r.NamespaceFilter.ShouldExclude(namespace.Name) { 434 | log.WithFields(log.Fields{ 435 | "kind": r.Kind, 436 | "namespace": namespace.Name, 437 | }).Info("Skipping excluded namespace") 438 | continue 439 | } 440 | 441 | if innerErr := r.UpdateFuncs.ReplicateObjectTo(obj, &namespace); innerErr != nil { 442 | err = multierror.Append(err, errors.Wrapf(innerErr, "Failed to replicate %s %s -> %s: %v", 443 | r.Kind, cacheKey, namespace.Name, innerErr, 444 | )) 445 | } else { 446 | replicatedTo = append(replicatedTo, namespace) 447 | logger := log.WithField("source", cacheKey) 448 | logger.Infof("Replicated %s to: %v", cacheKey, namespace.Name) 449 | } 450 | } 451 | 452 | return 453 | } 454 | 455 | func (r *GenericReplicator) updateDependents(obj interface{}, dependents map[string]interface{}) error { 456 | cacheKey := MustGetKey(obj) 457 | logger := log.WithField("kind", r.Kind).WithField("source", cacheKey) 458 | 459 | for dependentKey := range dependents { 460 | logger.Infof("updating dependent %s %s -> %s", r.Kind, cacheKey, dependentKey) 461 | 462 | targetObject, exists, err := r.Store.GetByKey(dependentKey) 463 | if err != nil { 464 | logger.Debugf("could not get dependent %s %s: %s", r.Kind, dependentKey, err) 465 | continue 466 | } else if !exists { 467 | logger.Debugf("could not get dependent %s %s: does not exist", r.Kind, dependentKey) 468 | continue 469 | } 470 | 471 | if err := r.UpdateFuncs.ReplicateDataFrom(obj, targetObject); err != nil { 472 | return errors.WithStack(err) 473 | } 474 | } 475 | 476 | return nil 477 | } 478 | 479 | // ObjectFromStore gets object from store cache 480 | func (r *GenericReplicator) ObjectFromStore(key string) (interface{}, error) { 481 | obj, exists, err := r.Store.GetByKey(key) 482 | if err != nil { 483 | return nil, errors.Errorf("could not get %s %s: %s", r.Kind, key, err) 484 | } 485 | 486 | if !exists { 487 | return nil, errors.Errorf("could not get %s %s: does not exist", r.Kind, key) 488 | } 489 | 490 | return obj, nil 491 | } 492 | 493 | // ResourceDeleted watches for the deletion of resources 494 | func (r *GenericReplicator) ResourceDeleted(source interface{}) { 495 | sourceKey := MustGetKey(source) 496 | logger := log.WithField("kind", r.Kind).WithField("source", sourceKey) 497 | logger.Debugf("Deleting %s %s", r.Kind, sourceKey) 498 | 499 | r.ResourceDeletedReplicateTo(source) 500 | r.ResourceDeletedReplicateFrom(source) 501 | 502 | r.ReplicateToList.Delete(sourceKey) 503 | 504 | } 505 | 506 | func (r *GenericReplicator) ResourceDeletedReplicateTo(source interface{}) { 507 | sourceKey := MustGetKey(source) 508 | logger := log.WithField("kind", r.Kind).WithField("source", sourceKey) 509 | objMeta := MustGetObject(source) 510 | namespaceList, replicateTo := objMeta.GetAnnotations()[ReplicateTo] 511 | if replicateTo { 512 | filters := strings.Split(namespaceList, ",") 513 | list, err := r.Client.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{}) 514 | if err != nil { 515 | err = errors.Wrapf(err, "Failed to list namespaces: %v", err) 516 | logger.WithError(err).Errorf("Could not get namespaces: %+v", err) 517 | } else { 518 | r.DeleteResources(source, list, filters) 519 | } 520 | } 521 | 522 | // delete replicated resources in namespaces that match labels 523 | namespaceSelectorString, replicateToMatching := objMeta.GetAnnotations()[ReplicateToMatching] 524 | if replicateToMatching { 525 | namespaceSelector, err := labels.Parse(namespaceSelectorString) 526 | if err != nil { 527 | err = errors.Wrapf(err, "Failed parse namespace selector: %v", err) 528 | logger.WithError(err).Errorf("Could not get namespaces: %+v", err) 529 | } else { 530 | var namespaces *v1.NamespaceList 531 | namespaces, err = r.Client.CoreV1().Namespaces().List(context.TODO(), metav1.ListOptions{LabelSelector: namespaceSelector.String()}) 532 | if err != nil { 533 | err = errors.Wrapf(err, "Failed to list namespaces: %v", err) 534 | logger.WithError(err).Errorf("Could not get namespaces: %+v", err) 535 | } else { 536 | r.DeleteResourceInNamespaces(source, namespaces) 537 | } 538 | } 539 | } 540 | } 541 | 542 | func (r *GenericReplicator) DeleteResources(source interface{}, list *v1.NamespaceList, filters []string) { 543 | for _, namespace := range list.Items { 544 | for _, ns := range filters { 545 | ns = strings.TrimSpace(ns) 546 | if matched, _ := regexp.MatchString(ns, namespace.Name); matched { 547 | r.DeleteResource(namespace, source) 548 | } 549 | } 550 | } 551 | } 552 | 553 | // DeleteResourceInNamespaces deletes resources in a list of namespaces acquired by evaluating namespace labels 554 | func (r *GenericReplicator) DeleteResourceInNamespaces(source interface{}, list *v1.NamespaceList) { 555 | for _, namespace := range list.Items { 556 | r.DeleteResource(namespace, source) 557 | } 558 | } 559 | 560 | func (r *GenericReplicator) DeleteResource(namespace v1.Namespace, source interface{}) { 561 | sourceKey := MustGetKey(source) 562 | 563 | logger := log.WithField("kind", r.Kind).WithField("source", sourceKey) 564 | objMeta := MustGetObject(source) 565 | 566 | if namespace.Name == objMeta.GetNamespace() { 567 | // Don't work upon itself 568 | return 569 | } 570 | targetLocation := fmt.Sprintf("%s/%s", namespace.Name, objMeta.GetName()) 571 | targetResource, exists, err := r.Store.GetByKey(targetLocation) 572 | if err != nil { 573 | logger.WithError(err).Errorf("Could not get objectMeta %s: %+v", targetLocation, err) 574 | return 575 | } 576 | if !exists { 577 | return 578 | } 579 | if err := r.UpdateFuncs.DeleteReplicatedResource(targetResource); err != nil { 580 | logger.WithError(err).Errorf("Could not delete resource %s: %+v", targetLocation, err) 581 | } 582 | } 583 | 584 | func (r *GenericReplicator) ResourceDeletedReplicateFrom(source interface{}) { 585 | sourceKey := MustGetKey(source) 586 | 587 | logger := log.WithField("kind", r.Kind).WithField("source", sourceKey) 588 | replicas, ok := r.DependencyMap[sourceKey] 589 | if !ok { 590 | logger.Debugf("%s %s has no dependents and can be deleted without issues", r.Kind, sourceKey) 591 | return 592 | } 593 | 594 | for dependentKey := range replicas { 595 | target, err := r.ObjectFromStore(dependentKey) 596 | if err != nil { 597 | logger.WithError(err).Warnf("could not load dependent %s %s: %v", r.Kind, dependentKey, err) 598 | continue 599 | } 600 | s, err := r.UpdateFuncs.PatchDeleteDependent(sourceKey, target) 601 | if err != nil { 602 | logger.WithError(err).Warnf("could not patch dependent %s %s: %v", r.Kind, dependentKey, err) 603 | continue 604 | } 605 | if err := r.Store.Update(s); err != nil { 606 | logger.WithError(err).Errorf("Error updating store for %s %s: %v", r.Kind, MustGetKey(s), err) 607 | } 608 | } 609 | } 610 | -------------------------------------------------------------------------------- /replicate/secret/secrets_test.go: -------------------------------------------------------------------------------- 1 | package secret 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "fmt" 7 | "k8s.io/client-go/tools/clientcmd" 8 | "os" 9 | "path/filepath" 10 | "reflect" 11 | "strings" 12 | "sync" 13 | "testing" 14 | "time" 15 | 16 | "github.com/mittwald/kubernetes-replicator/replicate/common" 17 | pkgerrors "github.com/pkg/errors" 18 | log "github.com/sirupsen/logrus" 19 | "github.com/stretchr/testify/require" 20 | corev1 "k8s.io/api/core/v1" 21 | "k8s.io/apimachinery/pkg/api/errors" 22 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 23 | "k8s.io/apimachinery/pkg/types" 24 | "k8s.io/client-go/informers" 25 | "k8s.io/client-go/kubernetes" 26 | "k8s.io/client-go/tools/cache" 27 | ) 28 | 29 | func namespacePrefix() string { 30 | // Mon Jan 2 15:04:05 -0700 MST 2006 31 | return "test-repl-" + time.Now().Format("060102150405") + "-" 32 | } 33 | 34 | type EventHandlerFuncs struct { 35 | AddFunc func(wg *sync.WaitGroup, obj any) 36 | UpdateFunc func(wg *sync.WaitGroup, oldObj, newObj any) 37 | DeleteFunc func(wg *sync.WaitGroup, obj any) 38 | } 39 | 40 | type PlainFormatter struct { 41 | } 42 | 43 | func (pf *PlainFormatter) Format(entry *log.Entry) ([]byte, error) { 44 | var b *bytes.Buffer 45 | if entry.Buffer != nil { 46 | b = entry.Buffer 47 | } else { 48 | b = &bytes.Buffer{} 49 | } 50 | 51 | b.WriteString(entry.Time.Format("15:04:05") + " ") 52 | b.WriteString(fmt.Sprintf("%-8s", strings.ToUpper(entry.Level.String()))) 53 | b.WriteString(entry.Message) 54 | 55 | if val, ok := entry.Data[log.ErrorKey]; ok { 56 | b.WriteByte('\n') 57 | b.WriteString(fmt.Sprint(val)) 58 | } 59 | 60 | b.WriteByte('\n') 61 | return b.Bytes(), nil 62 | } 63 | 64 | func setupRealClientSet(t *testing.T) *kubernetes.Clientset { 65 | kubeconfig := os.Getenv("KUBECONFIG") 66 | //is KUBECONFIG is not specified try to use the local KUBECONFIG or the in cluster config 67 | if len(kubeconfig) == 0 { 68 | if home := homeDir(); home != "" && home != "/root" { 69 | kubeconfig = filepath.Join(home, ".kube", "config") 70 | } 71 | } 72 | 73 | config, err := clientcmd.BuildConfigFromFlags("", kubeconfig) 74 | require.NoError(t, err) 75 | 76 | return kubernetes.NewForConfigOrDie(config) 77 | } 78 | 79 | func TestSecretReplicator(t *testing.T) { 80 | 81 | log.SetLevel(log.TraceLevel) 82 | log.SetFormatter(&PlainFormatter{}) 83 | 84 | client := setupRealClientSet(t) 85 | filter := common.NewNamespaceFilter([]string{}) 86 | 87 | repl := NewReplicator(client, 60*time.Second, false, false, filter) 88 | go repl.Run() 89 | 90 | time.Sleep(200 * time.Millisecond) 91 | 92 | prefix := namespacePrefix() 93 | 94 | ns := corev1.Namespace{ 95 | ObjectMeta: metav1.ObjectMeta{ 96 | Name: prefix + "test", 97 | }, 98 | } 99 | 100 | nsData, err := client.CoreV1().Namespaces().Create(context.TODO(), &ns, metav1.CreateOptions{}) 101 | require.NoError(t, err) 102 | 103 | ns2 := corev1.Namespace{ 104 | ObjectMeta: metav1.ObjectMeta{ 105 | Name: prefix + "test2", 106 | Labels: map[string]string{ 107 | "foo": "bar", 108 | }}, 109 | } 110 | _, err = client.CoreV1().Namespaces().Create(context.TODO(), &ns2, metav1.CreateOptions{}) 111 | require.NoError(t, err) 112 | 113 | defer func() { 114 | _ = client.CoreV1().Namespaces().Delete(context.TODO(), ns.Name, metav1.DeleteOptions{}) 115 | _ = client.CoreV1().Namespaces().Delete(context.TODO(), ns2.Name, metav1.DeleteOptions{}) 116 | }() 117 | 118 | secrets := client.CoreV1().Secrets(prefix + "test") 119 | 120 | const MaxWaitTime = 1000 * time.Millisecond 121 | t.Run("replicates from existing secret", func(t *testing.T) { 122 | source := corev1.Secret{ 123 | ObjectMeta: metav1.ObjectMeta{ 124 | Name: "source", 125 | Namespace: ns.Name, 126 | Annotations: map[string]string{ 127 | common.ReplicationAllowed: "true", 128 | common.ReplicationAllowedNamespaces: ns.Name, 129 | }, 130 | }, 131 | Type: corev1.SecretTypeOpaque, 132 | Data: map[string][]byte{ 133 | "foo": []byte("Hello World"), 134 | }, 135 | } 136 | 137 | target := corev1.Secret{ 138 | ObjectMeta: metav1.ObjectMeta{ 139 | Name: "target", 140 | Namespace: ns.Name, 141 | Annotations: map[string]string{ 142 | common.ReplicateFromAnnotation: common.MustGetKey(&source), 143 | }, 144 | }, 145 | Type: corev1.SecretTypeOpaque, 146 | } 147 | 148 | wg, stop := waitForSecrets(client, 3, EventHandlerFuncs{ 149 | AddFunc: func(wg *sync.WaitGroup, obj any) { 150 | secret := obj.(*corev1.Secret) 151 | if secret.Namespace == source.Namespace && secret.Name == source.Name { 152 | log.Debugf("AddFunc %+v", obj) 153 | wg.Done() 154 | } else if secret.Namespace == target.Namespace && secret.Name == target.Name { 155 | log.Debugf("AddFunc %+v", obj) 156 | wg.Done() 157 | } 158 | }, 159 | UpdateFunc: func(wg *sync.WaitGroup, oldObj, newObj any) { 160 | secret := oldObj.(*corev1.Secret) 161 | if secret.Namespace == target.Namespace && secret.Name == target.Name { 162 | log.Debugf("UpdateFunc %+v -> %+v", oldObj, newObj) 163 | wg.Done() 164 | } 165 | }, 166 | }) 167 | 168 | _, err := secrets.Create(context.TODO(), &source, metav1.CreateOptions{}) 169 | require.NoError(t, err) 170 | 171 | _, err = secrets.Create(context.TODO(), &target, metav1.CreateOptions{}) 172 | require.NoError(t, err) 173 | 174 | waitWithTimeout(wg, MaxWaitTime) 175 | close(stop) 176 | 177 | updTarget, err := secrets.Get(context.TODO(), target.Name, metav1.GetOptions{}) 178 | require.NoError(t, err) 179 | require.Equal(t, []byte("Hello World"), updTarget.Data["foo"]) 180 | }) 181 | 182 | t.Run("replicates honours ReplicationAllowed tag", func(t *testing.T) { 183 | source := corev1.Secret{ 184 | ObjectMeta: metav1.ObjectMeta{ 185 | Name: "source-repl-allowed", 186 | Namespace: ns.Name, 187 | Annotations: map[string]string{ 188 | common.ReplicationAllowed: "false", 189 | common.ReplicationAllowedNamespaces: ns2.Name, 190 | }, 191 | }, 192 | Type: corev1.SecretTypeOpaque, 193 | Data: map[string][]byte{ 194 | "foo": []byte("Hello World"), 195 | }, 196 | } 197 | 198 | target := corev1.Secret{ 199 | ObjectMeta: metav1.ObjectMeta{ 200 | Name: "target-repl-allowed", 201 | Namespace: ns2.Name, 202 | Annotations: map[string]string{ 203 | common.ReplicateFromAnnotation: common.MustGetKey(&source), 204 | }, 205 | }, 206 | Type: corev1.SecretTypeOpaque, 207 | } 208 | 209 | wg, stop := waitForSecrets(client, 2, EventHandlerFuncs{ 210 | AddFunc: func(wg *sync.WaitGroup, obj any) { 211 | secret := obj.(*corev1.Secret) 212 | if secret.Namespace == source.Namespace && secret.Name == source.Name { 213 | log.Debugf("AddFunc %+v", obj) 214 | wg.Done() 215 | } else if secret.Namespace == target.Namespace && secret.Name == target.Name { 216 | log.Debugf("AddFunc %+v", obj) 217 | wg.Done() 218 | } 219 | }, 220 | }) 221 | 222 | _, err := secrets.Create(context.TODO(), &source, metav1.CreateOptions{}) 223 | require.NoError(t, err) 224 | 225 | secrets2 := client.CoreV1().Secrets(prefix + "test2") 226 | _, err = secrets2.Create(context.TODO(), &target, metav1.CreateOptions{}) 227 | require.NoError(t, err) 228 | 229 | waitWithTimeout(wg, MaxWaitTime) 230 | close(stop) 231 | 232 | updTarget, err := secrets2.Get(context.TODO(), target.Name, metav1.GetOptions{}) 233 | require.NoError(t, err) 234 | require.NotEqual(t, []byte("Hello World"), updTarget.Data["foo"]) 235 | }) 236 | 237 | t.Run("replicates keeps originally present values", func(t *testing.T) { 238 | source := corev1.Secret{ 239 | ObjectMeta: metav1.ObjectMeta{ 240 | Name: "source3", 241 | Namespace: ns.Name, 242 | Annotations: map[string]string{ 243 | common.ReplicationAllowed: "true", 244 | common.ReplicationAllowedNamespaces: ns.Name, 245 | }, 246 | }, 247 | Type: corev1.SecretTypeOpaque, 248 | Data: map[string][]byte{ 249 | "foo": []byte("Hello World"), 250 | }, 251 | } 252 | 253 | target := corev1.Secret{ 254 | ObjectMeta: metav1.ObjectMeta{ 255 | Name: "target3", 256 | Namespace: ns.Name, 257 | Annotations: map[string]string{ 258 | common.ReplicateFromAnnotation: common.MustGetKey(&source), 259 | }, 260 | }, 261 | Type: corev1.SecretTypeOpaque, 262 | Data: map[string][]byte{ 263 | "bar": []byte("Hello Bar"), 264 | }, 265 | } 266 | 267 | wg, stop := waitForSecrets(client, 3, EventHandlerFuncs{ 268 | AddFunc: func(wg *sync.WaitGroup, obj any) { 269 | secret := obj.(*corev1.Secret) 270 | if secret.Namespace == source.Namespace && secret.Name == source.Name { 271 | log.Debugf("AddFunc %+v", obj) 272 | wg.Done() 273 | } else if secret.Namespace == target.Namespace && secret.Name == target.Name { 274 | log.Debugf("AddFunc %+v", obj) 275 | wg.Done() 276 | } 277 | }, 278 | UpdateFunc: func(wg *sync.WaitGroup, oldObj, newObj any) { 279 | secret := oldObj.(*corev1.Secret) 280 | if secret.Namespace == target.Namespace && secret.Name == target.Name { 281 | log.Debugf("UpdateFunc %+v -> %+v", oldObj, newObj) 282 | wg.Done() 283 | } 284 | }, 285 | }) 286 | _, err := secrets.Create(context.TODO(), &source, metav1.CreateOptions{}) 287 | require.NoError(t, err) 288 | 289 | _, err = secrets.Create(context.TODO(), &target, metav1.CreateOptions{}) 290 | require.NoError(t, err) 291 | 292 | waitWithTimeout(wg, MaxWaitTime) 293 | close(stop) 294 | 295 | updTarget, err := secrets.Get(context.TODO(), target.Name, metav1.GetOptions{}) 296 | require.NoError(t, err) 297 | require.Equal(t, []byte("Hello World"), updTarget.Data["foo"]) 298 | require.Equal(t, []byte("Hello Bar"), updTarget.Data["bar"]) 299 | }) 300 | 301 | t.Run("replication removes keys removed from source secret", func(t *testing.T) { 302 | source := corev1.Secret{ 303 | ObjectMeta: metav1.ObjectMeta{ 304 | Name: "source2", 305 | Namespace: ns.Name, 306 | Annotations: map[string]string{ 307 | common.ReplicationAllowed: "true", 308 | common.ReplicationAllowedNamespaces: ns.Name, 309 | }, 310 | }, 311 | Type: corev1.SecretTypeOpaque, 312 | Data: map[string][]byte{ 313 | "foo": []byte("Hello Foo"), 314 | "bar": []byte("Hello Bar"), 315 | }, 316 | } 317 | 318 | target := corev1.Secret{ 319 | ObjectMeta: metav1.ObjectMeta{ 320 | Name: "target2", 321 | Namespace: ns.Name, 322 | Annotations: map[string]string{ 323 | common.ReplicateFromAnnotation: common.MustGetKey(&source), 324 | }, 325 | }, 326 | Type: corev1.SecretTypeOpaque, 327 | } 328 | 329 | wg, stop := waitForSecrets(client, 3, EventHandlerFuncs{ 330 | AddFunc: func(wg *sync.WaitGroup, obj any) { 331 | secret := obj.(*corev1.Secret) 332 | if secret.Namespace == source.Namespace && secret.Name == source.Name { 333 | log.Debugf("AddFunc %+v", obj) 334 | wg.Done() 335 | } else if secret.Namespace == target.Namespace && secret.Name == target.Name { 336 | log.Debugf("AddFunc %+v", obj) 337 | wg.Done() 338 | } 339 | }, 340 | UpdateFunc: func(wg *sync.WaitGroup, oldObj, newObj any) { 341 | secret := oldObj.(*corev1.Secret) 342 | if secret.Namespace == target.Namespace && secret.Name == target.Name { 343 | log.Debugf("UpdateFunc %+v -> %+v", oldObj, newObj) 344 | wg.Done() 345 | } 346 | }, 347 | }) 348 | 349 | _, err := secrets.Create(context.TODO(), &source, metav1.CreateOptions{}) 350 | require.NoError(t, err) 351 | 352 | _, err = secrets.Create(context.TODO(), &target, metav1.CreateOptions{}) 353 | require.NoError(t, err) 354 | 355 | waitWithTimeout(wg, MaxWaitTime) 356 | close(stop) 357 | 358 | updTarget, err := secrets.Get(context.TODO(), target.Name, metav1.GetOptions{}) 359 | require.NoError(t, err) 360 | require.Equal(t, []byte("Hello Foo"), updTarget.Data["foo"]) 361 | 362 | wg, stop = waitForSecrets(client, 1, EventHandlerFuncs{ 363 | UpdateFunc: func(wg *sync.WaitGroup, oldObj interface{}, newObj interface{}) { 364 | secret := oldObj.(*corev1.Secret) 365 | if secret.Namespace == target.Namespace && secret.Name == target.Name { 366 | log.Debugf("UpdateFunc %+v -> %+v", oldObj, newObj) 367 | wg.Done() 368 | } 369 | }, 370 | }) 371 | 372 | _, err = secrets.Patch(context.TODO(), source.Name, types.JSONPatchType, []byte(`[{"op": "remove", "path": "/data/foo"}]`), metav1.PatchOptions{}) 373 | require.NoError(t, err) 374 | 375 | waitWithTimeout(wg, MaxWaitTime) 376 | close(stop) 377 | 378 | updTarget, err = secrets.Get(context.TODO(), target.Name, metav1.GetOptions{}) 379 | require.NoError(t, err) 380 | 381 | _, hasFoo := updTarget.Data["foo"] 382 | require.False(t, hasFoo) 383 | }) 384 | 385 | t.Run("replication does not remove original values", func(t *testing.T) { 386 | source := corev1.Secret{ 387 | ObjectMeta: metav1.ObjectMeta{ 388 | Name: "source4", 389 | Namespace: ns.Name, 390 | Annotations: map[string]string{ 391 | common.ReplicationAllowed: "true", 392 | common.ReplicationAllowedNamespaces: ns.Name, 393 | }, 394 | }, 395 | Type: corev1.SecretTypeOpaque, 396 | Data: map[string][]byte{ 397 | "foo": []byte("Hello Foo"), 398 | "bar": []byte("Hello Bar"), 399 | }, 400 | } 401 | 402 | target := corev1.Secret{ 403 | ObjectMeta: metav1.ObjectMeta{ 404 | Name: "target4", 405 | Namespace: ns.Name, 406 | Annotations: map[string]string{ 407 | common.ReplicateFromAnnotation: common.MustGetKey(&source), 408 | }, 409 | }, 410 | Type: corev1.SecretTypeOpaque, 411 | Data: map[string][]byte{ 412 | "bar": []byte("Hello Bar"), 413 | }, 414 | } 415 | 416 | wg, stop := waitForSecrets(client, 3, EventHandlerFuncs{ 417 | AddFunc: func(wg *sync.WaitGroup, obj interface{}) { 418 | secret := obj.(*corev1.Secret) 419 | if secret.Namespace == source.Namespace && secret.Name == source.Name { 420 | log.Debugf("AddFunc %+v", obj) 421 | wg.Done() 422 | } else if secret.Namespace == target.Namespace && secret.Name == target.Name { 423 | log.Debugf("AddFunc %+v", obj) 424 | wg.Done() 425 | } 426 | }, 427 | UpdateFunc: func(wg *sync.WaitGroup, oldObj interface{}, newObj interface{}) { 428 | secret := oldObj.(*corev1.Secret) 429 | if secret.Namespace == target.Namespace && secret.Name == target.Name { 430 | log.Debugf("UpdateFunc %+v -> %+v", oldObj, newObj) 431 | wg.Done() 432 | } 433 | }, 434 | }) 435 | 436 | _, err := secrets.Create(context.TODO(), &source, metav1.CreateOptions{}) 437 | require.NoError(t, err) 438 | 439 | _, err = secrets.Create(context.TODO(), &target, metav1.CreateOptions{}) 440 | require.NoError(t, err) 441 | 442 | waitWithTimeout(wg, MaxWaitTime) 443 | close(stop) 444 | 445 | updTarget, err := secrets.Get(context.TODO(), target.Name, metav1.GetOptions{}) 446 | require.NoError(t, err) 447 | require.Equal(t, []byte("Hello Foo"), updTarget.Data["foo"]) 448 | 449 | wg, stop = waitForSecrets(client, 1, EventHandlerFuncs{ 450 | UpdateFunc: func(wg *sync.WaitGroup, oldObj interface{}, newObj interface{}) { 451 | secret := oldObj.(*corev1.Secret) 452 | if secret.Namespace == target.Namespace && secret.Name == target.Name { 453 | log.Debugf("UpdateFunc %+v -> %+v", oldObj, newObj) 454 | wg.Done() 455 | } 456 | }, 457 | }) 458 | 459 | _, err = secrets.Patch(context.TODO(), source.Name, types.JSONPatchType, []byte(`[{"op": "remove", "path": "/data/foo"}]`), metav1.PatchOptions{}) 460 | require.NoError(t, err) 461 | 462 | waitWithTimeout(wg, MaxWaitTime) 463 | close(stop) 464 | 465 | updTarget, err = secrets.Get(context.TODO(), target.Name, metav1.GetOptions{}) 466 | require.NoError(t, err) 467 | 468 | _, hasFoo := updTarget.Data["foo"] 469 | require.False(t, hasFoo) 470 | require.Equal(t, []byte("Hello Bar"), updTarget.Data["bar"]) 471 | }) 472 | 473 | t.Run("replication is pushed to other namespaces", func(t *testing.T) { 474 | sourceLabels := map[string]string{ 475 | "foo": "bar", 476 | "hello": "world", 477 | } 478 | source := corev1.Secret{ 479 | ObjectMeta: metav1.ObjectMeta{ 480 | Name: "source-pushed-to-other-ns", 481 | Namespace: ns.Name, 482 | Annotations: map[string]string{ 483 | common.ReplicateTo: prefix + "test2", 484 | }, 485 | Labels: sourceLabels, 486 | }, 487 | Type: corev1.SecretTypeOpaque, 488 | Data: map[string][]byte{ 489 | "foo": []byte("Hello Foo"), 490 | "bar": []byte("Hello Bar"), 491 | }, 492 | } 493 | 494 | wg, stop := waitForSecrets(client, 2, EventHandlerFuncs{ 495 | AddFunc: func(wg *sync.WaitGroup, obj interface{}) { 496 | secret := obj.(*corev1.Secret) 497 | if secret.Namespace == source.Namespace && secret.Name == source.Name { 498 | log.Debugf("AddFunc %+v", obj) 499 | wg.Done() 500 | } else if secret.Namespace == prefix+"test2" && secret.Name == source.Name { 501 | log.Debugf("AddFunc %+v", obj) 502 | wg.Done() 503 | } 504 | }, 505 | }) 506 | _, err := secrets.Create(context.TODO(), &source, metav1.CreateOptions{}) 507 | require.NoError(t, err) 508 | 509 | waitWithTimeout(wg, MaxWaitTime) 510 | close(stop) 511 | 512 | secrets2 := client.CoreV1().Secrets(prefix + "test2") 513 | updTarget, err := secrets2.Get(context.TODO(), source.Name, metav1.GetOptions{}) 514 | 515 | require.NoError(t, err) 516 | require.Equal(t, []byte("Hello Foo"), updTarget.Data["foo"]) 517 | require.True(t, reflect.DeepEqual(sourceLabels, updTarget.Labels)) 518 | 519 | wg, stop = waitForSecrets(client, 1, EventHandlerFuncs{ 520 | UpdateFunc: func(wg *sync.WaitGroup, oldObj interface{}, newObj interface{}) { 521 | secret := oldObj.(*corev1.Secret) 522 | if secret.Namespace == prefix+"test2" && secret.Name == source.Name { 523 | log.Debugf("UpdateFunc %+v -> %+v", oldObj, newObj) 524 | wg.Done() 525 | } 526 | }, 527 | }) 528 | 529 | fmt.Printf("removing key foo from source secret\n") 530 | _, err = secrets.Patch(context.TODO(), source.Name, types.JSONPatchType, []byte(`[{"op": "remove", "path": "/data/foo"}]`), metav1.PatchOptions{}) 531 | fmt.Printf("source secret patched\n") 532 | require.NoError(t, err) 533 | 534 | waitWithTimeout(wg, MaxWaitTime) 535 | close(stop) 536 | 537 | updTarget, err = secrets2.Get(context.TODO(), source.Name, metav1.GetOptions{}) 538 | require.NoError(t, err) 539 | 540 | _, hasFoo := updTarget.Data["foo"] 541 | require.False(t, hasFoo) 542 | require.Equal(t, []byte("Hello Bar"), updTarget.Data["bar"]) 543 | }) 544 | 545 | t.Run("replication is pushed to other namespaces without ownerReferences", func(t *testing.T) { 546 | sourceLabels := map[string]string{ 547 | "foo": "bar", 548 | "hello": "world", 549 | } 550 | source := corev1.Secret{ 551 | ObjectMeta: metav1.ObjectMeta{ 552 | Name: "source-pushed-to-other-without-owner-references", 553 | Namespace: ns.Name, 554 | Annotations: map[string]string{ 555 | common.ReplicateTo: prefix + "test2", 556 | }, 557 | Labels: sourceLabels, 558 | OwnerReferences: []metav1.OwnerReference{{ 559 | APIVersion: "v1", 560 | Kind: "Namespace", 561 | Name: nsData.Name, 562 | UID: nsData.UID, 563 | }}, 564 | }, 565 | Type: corev1.SecretTypeOpaque, 566 | Data: map[string][]byte{ 567 | "foo": []byte("Hello Foo"), 568 | "bar": []byte("Hello Bar"), 569 | }, 570 | } 571 | 572 | wg, stop := waitForSecrets(client, 2, EventHandlerFuncs{ 573 | AddFunc: func(wg *sync.WaitGroup, obj interface{}) { 574 | secret := obj.(*corev1.Secret) 575 | if secret.Namespace == source.Namespace && secret.Name == source.Name { 576 | log.Debugf("AddFunc %+v", obj) 577 | wg.Done() 578 | } else if secret.Namespace == prefix+"test2" && secret.Name == source.Name { 579 | log.Debugf("AddFunc %+v", obj) 580 | wg.Done() 581 | } 582 | }, 583 | }) 584 | _, err := secrets.Create(context.TODO(), &source, metav1.CreateOptions{}) 585 | require.NoError(t, err) 586 | 587 | waitWithTimeout(wg, MaxWaitTime) 588 | close(stop) 589 | 590 | secrets2 := client.CoreV1().Secrets(prefix + "test2") 591 | updTarget, err := secrets2.Get(context.TODO(), source.Name, metav1.GetOptions{}) 592 | 593 | require.NoError(t, err) 594 | require.Equal(t, []byte("Hello Foo"), updTarget.Data["foo"]) 595 | require.True(t, reflect.DeepEqual(sourceLabels, updTarget.Labels)) 596 | 597 | require.Equal(t, []metav1.OwnerReference(nil), updTarget.OwnerReferences) 598 | require.NotEqual(t, source.OwnerReferences, updTarget.OwnerReferences) 599 | 600 | wg, stop = waitForSecrets(client, 1, EventHandlerFuncs{ 601 | UpdateFunc: func(wg *sync.WaitGroup, oldObj interface{}, newObj interface{}) { 602 | secret := oldObj.(*corev1.Secret) 603 | if secret.Namespace == prefix+"test2" && secret.Name == source.Name { 604 | log.Debugf("UpdateFunc %+v -> %+v", oldObj, newObj) 605 | wg.Done() 606 | } 607 | }, 608 | }) 609 | 610 | _, err = secrets.Patch(context.TODO(), source.Name, types.JSONPatchType, []byte(`[{"op": "remove", "path": "/data/foo"}]`), metav1.PatchOptions{}) 611 | require.NoError(t, err) 612 | 613 | waitWithTimeout(wg, MaxWaitTime) 614 | close(stop) 615 | 616 | updTarget, err = secrets2.Get(context.TODO(), source.Name, metav1.GetOptions{}) 617 | require.NoError(t, err) 618 | 619 | _, hasFoo := updTarget.Data["foo"] 620 | require.False(t, hasFoo) 621 | require.Equal(t, []byte("Hello Bar"), updTarget.Data["bar"]) 622 | }) 623 | 624 | t.Run("replication is pushed to other namespaces with ownerReferences", func(t *testing.T) { 625 | sourceLabels := map[string]string{ 626 | "foo": "bar", 627 | "hello": "world", 628 | } 629 | source := corev1.Secret{ 630 | ObjectMeta: metav1.ObjectMeta{ 631 | Name: "source-pushed-to-other-with-owner-references", 632 | Namespace: ns.Name, 633 | Annotations: map[string]string{ 634 | common.ReplicateTo: prefix + "test2", 635 | common.KeepOwnerReferences: "true", 636 | }, 637 | Labels: sourceLabels, 638 | OwnerReferences: []metav1.OwnerReference{{ 639 | APIVersion: "v1", 640 | Kind: "Namespace", 641 | Name: nsData.Name, 642 | UID: nsData.UID, 643 | }}, 644 | }, 645 | Type: corev1.SecretTypeOpaque, 646 | Data: map[string][]byte{ 647 | "foo": []byte("Hello Foo"), 648 | "bar": []byte("Hello Bar"), 649 | }, 650 | } 651 | 652 | wg, stop := waitForSecrets(client, 2, EventHandlerFuncs{ 653 | AddFunc: func(wg *sync.WaitGroup, obj interface{}) { 654 | secret := obj.(*corev1.Secret) 655 | if secret.Namespace == source.Namespace && secret.Name == source.Name { 656 | log.Debugf("AddFunc %+v", obj) 657 | wg.Done() 658 | } else if secret.Namespace == prefix+"test2" && secret.Name == source.Name { 659 | log.Debugf("AddFunc %+v", obj) 660 | wg.Done() 661 | } 662 | }, 663 | }) 664 | _, err := secrets.Create(context.TODO(), &source, metav1.CreateOptions{}) 665 | require.NoError(t, err) 666 | 667 | waitWithTimeout(wg, MaxWaitTime) 668 | close(stop) 669 | 670 | secrets2 := client.CoreV1().Secrets(prefix + "test2") 671 | updTarget, err := secrets2.Get(context.TODO(), source.Name, metav1.GetOptions{}) 672 | 673 | require.NoError(t, err) 674 | require.Equal(t, []byte("Hello Foo"), updTarget.Data["foo"]) 675 | require.True(t, reflect.DeepEqual(sourceLabels, updTarget.Labels)) 676 | 677 | require.Equal(t, source.OwnerReferences, updTarget.OwnerReferences) 678 | 679 | wg, stop = waitForSecrets(client, 1, EventHandlerFuncs{ 680 | UpdateFunc: func(wg *sync.WaitGroup, oldObj interface{}, newObj interface{}) { 681 | secret := oldObj.(*corev1.Secret) 682 | if secret.Namespace == prefix+"test2" && secret.Name == source.Name { 683 | log.Debugf("UpdateFunc %+v -> %+v", oldObj, newObj) 684 | wg.Done() 685 | } 686 | }, 687 | }) 688 | 689 | _, err = secrets.Patch(context.TODO(), source.Name, types.JSONPatchType, []byte(`[{"op": "remove", "path": "/data/foo"}]`), metav1.PatchOptions{}) 690 | require.NoError(t, err) 691 | 692 | waitWithTimeout(wg, MaxWaitTime) 693 | close(stop) 694 | 695 | updTarget, err = secrets2.Get(context.TODO(), source.Name, metav1.GetOptions{}) 696 | require.NoError(t, err) 697 | 698 | _, hasFoo := updTarget.Data["foo"] 699 | require.False(t, hasFoo) 700 | require.Equal(t, []byte("Hello Bar"), updTarget.Data["bar"]) 701 | }) 702 | 703 | t.Run("replication is pushed to other namespaces and strip labels", func(t *testing.T) { 704 | sourceLabels := map[string]string{ 705 | "foo": "bar", 706 | "hello": "world", 707 | } 708 | source := corev1.Secret{ 709 | ObjectMeta: metav1.ObjectMeta{ 710 | Name: "source-pushed-to-other-with-strip-labels", 711 | Namespace: ns.Name, 712 | Annotations: map[string]string{ 713 | common.ReplicateTo: prefix + "test2", 714 | common.StripLabels: "true", 715 | }, 716 | Labels: sourceLabels, 717 | OwnerReferences: []metav1.OwnerReference{{ 718 | APIVersion: "v1", 719 | Kind: "Namespace", 720 | Name: nsData.Name, 721 | UID: nsData.UID, 722 | }}, 723 | }, 724 | Type: corev1.SecretTypeOpaque, 725 | Data: map[string][]byte{ 726 | "foo": []byte("Hello Foo"), 727 | "bar": []byte("Hello Bar"), 728 | }, 729 | } 730 | 731 | wg, stop := waitForSecrets(client, 2, EventHandlerFuncs{ 732 | AddFunc: func(wg *sync.WaitGroup, obj interface{}) { 733 | secret := obj.(*corev1.Secret) 734 | if secret.Namespace == source.Namespace && secret.Name == source.Name { 735 | log.Debugf("AddFunc %+v", obj) 736 | wg.Done() 737 | } else if secret.Namespace == prefix+"test2" && secret.Name == source.Name { 738 | log.Debugf("AddFunc %+v", obj) 739 | wg.Done() 740 | } 741 | }, 742 | }) 743 | _, err := secrets.Create(context.TODO(), &source, metav1.CreateOptions{}) 744 | require.NoError(t, err) 745 | 746 | waitWithTimeout(wg, MaxWaitTime) 747 | close(stop) 748 | 749 | secrets2 := client.CoreV1().Secrets(prefix + "test2") 750 | updTarget, err := secrets2.Get(context.TODO(), source.Name, metav1.GetOptions{}) 751 | 752 | require.NoError(t, err) 753 | require.Equal(t, []byte("Hello Foo"), updTarget.Data["foo"]) 754 | require.False(t, reflect.DeepEqual(sourceLabels, updTarget.Labels)) 755 | 756 | require.Equal(t, map[string]string(nil), updTarget.Labels) 757 | }) 758 | 759 | t.Run("replication is pushed to other namespaces by label selector", func(t *testing.T) { 760 | source := corev1.Secret{ 761 | ObjectMeta: metav1.ObjectMeta{ 762 | Name: "source-pushed-to-other-ns-by-label", 763 | Namespace: ns.Name, 764 | Annotations: map[string]string{ 765 | common.ReplicateToMatching: "foo", 766 | }, 767 | }, 768 | Type: corev1.SecretTypeOpaque, 769 | Data: map[string][]byte{ 770 | "foo": []byte("Hello Foo"), 771 | "bar": []byte("Hello Bar"), 772 | }, 773 | } 774 | 775 | wg, stop := waitForSecrets(client, 2, EventHandlerFuncs{ 776 | AddFunc: func(wg *sync.WaitGroup, obj interface{}) { 777 | secret := obj.(*corev1.Secret) 778 | if secret.Namespace == source.Namespace && secret.Name == source.Name { 779 | log.Debugf("AddFunc %+v", obj) 780 | wg.Done() 781 | } else if secret.Namespace == prefix+"test2" && secret.Name == source.Name { 782 | log.Debugf("AddFunc %+v", obj) 783 | wg.Done() 784 | } 785 | }, 786 | }) 787 | _, err := secrets.Create(context.TODO(), &source, metav1.CreateOptions{}) 788 | require.NoError(t, err) 789 | 790 | waitWithTimeout(wg, MaxWaitTime) 791 | close(stop) 792 | 793 | secrets2 := client.CoreV1().Secrets(prefix + "test2") 794 | updTarget, err := secrets2.Get(context.TODO(), source.Name, metav1.GetOptions{}) 795 | 796 | require.NoError(t, err) 797 | require.Equal(t, []byte("Hello Foo"), updTarget.Data["foo"]) 798 | 799 | wg, stop = waitForSecrets(client, 1, EventHandlerFuncs{ 800 | UpdateFunc: func(wg *sync.WaitGroup, oldObj interface{}, newObj interface{}) { 801 | secret := oldObj.(*corev1.Secret) 802 | if secret.Namespace == prefix+"test2" && secret.Name == source.Name { 803 | log.Debugf("UpdateFunc %+v -> %+v", oldObj, newObj) 804 | wg.Done() 805 | } 806 | }, 807 | }) 808 | 809 | _, err = secrets.Patch(context.TODO(), source.Name, types.JSONPatchType, []byte(`[{"op": "remove", "path": "/data/foo"}]`), metav1.PatchOptions{}) 810 | require.NoError(t, err) 811 | 812 | waitWithTimeout(wg, MaxWaitTime) 813 | close(stop) 814 | 815 | updTarget, err = secrets2.Get(context.TODO(), source.Name, metav1.GetOptions{}) 816 | require.NoError(t, err) 817 | 818 | _, hasFoo := updTarget.Data["foo"] 819 | require.False(t, hasFoo) 820 | require.Equal(t, []byte("Hello Bar"), updTarget.Data["bar"]) 821 | }) 822 | 823 | t.Run("replication updates existing secrets", func(t *testing.T) { 824 | secrets2 := client.CoreV1().Secrets(prefix + "test2") 825 | 826 | target := corev1.Secret{ 827 | ObjectMeta: metav1.ObjectMeta{ 828 | Name: "source-repl-updates-existing", 829 | Namespace: ns2.Name, 830 | }, 831 | Type: corev1.SecretTypeOpaque, 832 | Data: map[string][]byte{}, 833 | } 834 | 835 | _, err = secrets2.Create(context.TODO(), &target, metav1.CreateOptions{}) 836 | require.NoError(t, err) 837 | 838 | time.Sleep(100 * time.Millisecond) 839 | 840 | source := corev1.Secret{ 841 | ObjectMeta: metav1.ObjectMeta{ 842 | Name: "source-repl-updates-existing", 843 | Namespace: ns.Name, 844 | Annotations: map[string]string{ 845 | common.ReplicateTo: prefix + "test2", 846 | }, 847 | }, 848 | Type: corev1.SecretTypeOpaque, 849 | Data: map[string][]byte{ 850 | "foo": []byte("Hello Foo"), 851 | "bar": []byte("Hello Bar"), 852 | }, 853 | } 854 | 855 | _, err := secrets.Create(context.TODO(), &source, metav1.CreateOptions{}) 856 | require.NoError(t, err) 857 | 858 | time.Sleep(300 * time.Millisecond) 859 | 860 | updTarget, err := secrets2.Get(context.TODO(), source.Name, metav1.GetOptions{}) 861 | 862 | require.NoError(t, err) 863 | require.Equal(t, []byte("Hello Foo"), updTarget.Data["foo"]) 864 | 865 | _, err = secrets.Patch(context.TODO(), source.Name, types.JSONPatchType, []byte(`[{"op": "remove", "path": "/data/foo"}]`), metav1.PatchOptions{}) 866 | require.NoError(t, err) 867 | 868 | time.Sleep(300 * time.Millisecond) 869 | 870 | updTarget, err = secrets2.Get(context.TODO(), source.Name, metav1.GetOptions{}) 871 | require.NoError(t, err) 872 | 873 | _, hasFoo := updTarget.Data["foo"] 874 | require.False(t, hasFoo) 875 | require.Equal(t, []byte("Hello Bar"), updTarget.Data["bar"]) 876 | }) 877 | 878 | t.Run("secrets are replicated when new namespace is created", func(t *testing.T) { 879 | namespaceName := prefix + "test-repl-new-ns" 880 | source := corev1.Secret{ 881 | ObjectMeta: metav1.ObjectMeta{ 882 | Name: "source6", 883 | Namespace: ns.Name, 884 | Annotations: map[string]string{ 885 | common.ReplicateTo: namespaceName, 886 | }, 887 | }, 888 | Type: corev1.SecretTypeOpaque, 889 | Data: map[string][]byte{ 890 | "foo": []byte("Hello Foo"), 891 | "bar": []byte("Hello Bar"), 892 | }, 893 | } 894 | 895 | wg, stop := waitForSecrets(client, 1, EventHandlerFuncs{ 896 | AddFunc: func(wg *sync.WaitGroup, obj interface{}) { 897 | secret := obj.(*corev1.Secret) 898 | if secret.Namespace == source.Namespace && secret.Name == source.Name { 899 | log.Debugf("AddFunc %+v", obj) 900 | wg.Done() 901 | } 902 | }, 903 | }) 904 | 905 | _, err := secrets.Create(context.TODO(), &source, metav1.CreateOptions{}) 906 | require.NoError(t, err) 907 | 908 | waitWithTimeout(wg, MaxWaitTime) 909 | close(stop) 910 | 911 | ns3 := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespaceName}} 912 | 913 | wg, stop = waitForNamespaces(client, 1, EventHandlerFuncs{ 914 | AddFunc: func(wg *sync.WaitGroup, obj interface{}) { 915 | ns := obj.(*corev1.Namespace) 916 | if ns.Name == ns3.Name { 917 | log.Debugf("AddFunc %+v", obj) 918 | wg.Done() 919 | } 920 | }, 921 | }) 922 | 923 | wg2, stop2 := waitForSecrets(client, 1, EventHandlerFuncs{ 924 | AddFunc: func(wg *sync.WaitGroup, obj interface{}) { 925 | secret := obj.(*corev1.Secret) 926 | if secret.Namespace == ns3.Name && secret.Name == source.Name { 927 | log.Debugf("AddFunc %+v", obj) 928 | wg.Done() 929 | } 930 | }, 931 | }) 932 | 933 | _, err = client.CoreV1().Namespaces().Create(context.TODO(), &ns3, metav1.CreateOptions{}) 934 | require.NoError(t, err) 935 | 936 | defer func() { 937 | _ = client.CoreV1().Namespaces().Delete(context.TODO(), ns3.Name, metav1.DeleteOptions{}) 938 | }() 939 | 940 | waitWithTimeout(wg, MaxWaitTime) 941 | close(stop) 942 | 943 | waitWithTimeout(wg2, MaxWaitTime) 944 | close(stop2) 945 | 946 | secrets3 := client.CoreV1().Secrets(namespaceName) 947 | updTarget, err := secrets3.Get(context.TODO(), source.Name, metav1.GetOptions{}) 948 | require.NoError(t, err) 949 | require.Equal(t, []byte("Hello Foo"), updTarget.Data["foo"]) 950 | 951 | wg, stop = waitForSecrets(client, 1, EventHandlerFuncs{ 952 | UpdateFunc: func(wg *sync.WaitGroup, objOld interface{}, objNew interface{}) { 953 | secret := objOld.(*corev1.Secret) 954 | if secret.Namespace == ns3.Name && secret.Name == source.Name { 955 | log.Debugf("UpdateFunc %+v", objOld) 956 | wg.Done() 957 | } 958 | }, 959 | }) 960 | _, err = secrets.Patch(context.TODO(), source.Name, types.JSONPatchType, []byte(`[{"op": "remove", "path": "/data/foo"}]`), metav1.PatchOptions{}) 961 | require.NoError(t, err) 962 | 963 | waitWithTimeout(wg, MaxWaitTime) 964 | close(stop) 965 | 966 | updTarget, err = secrets3.Get(context.TODO(), source.Name, metav1.GetOptions{}) 967 | require.NoError(t, err) 968 | 969 | _, hasFoo := updTarget.Data["foo"] 970 | require.False(t, hasFoo) 971 | require.Equal(t, []byte("Hello Bar"), updTarget.Data["bar"]) 972 | }) 973 | 974 | t.Run("secrets are replicated when new namespace is created with label", func(t *testing.T) { 975 | namespaceName := prefix + "test-repl-new-ns-label" 976 | source := corev1.Secret{ 977 | ObjectMeta: metav1.ObjectMeta{ 978 | Name: "source6-with-label", 979 | Namespace: ns.Name, 980 | Annotations: map[string]string{ 981 | common.ReplicateToMatching: "foo=veryspecificvalue", 982 | }, 983 | }, 984 | Type: corev1.SecretTypeOpaque, 985 | Data: map[string][]byte{ 986 | "foo": []byte("Hello Foo"), 987 | "bar": []byte("Hello Bar"), 988 | }, 989 | } 990 | 991 | wg, stop := waitForSecrets(client, 1, EventHandlerFuncs{ 992 | AddFunc: func(wg *sync.WaitGroup, obj interface{}) { 993 | secret := obj.(*corev1.Secret) 994 | if secret.Namespace == source.Namespace && secret.Name == source.Name { 995 | log.Debugf("AddFunc %+v", obj) 996 | wg.Done() 997 | } 998 | }, 999 | }) 1000 | 1001 | _, err := secrets.Create(context.TODO(), &source, metav1.CreateOptions{}) 1002 | require.NoError(t, err) 1003 | 1004 | waitWithTimeout(wg, MaxWaitTime) 1005 | close(stop) 1006 | 1007 | ns3 := corev1.Namespace{ 1008 | ObjectMeta: metav1.ObjectMeta{ 1009 | Name: namespaceName, 1010 | Labels: map[string]string{ 1011 | "foo": "veryspecificvalue", 1012 | }, 1013 | }, 1014 | } 1015 | 1016 | wg, stop = waitForNamespaces(client, 1, EventHandlerFuncs{ 1017 | AddFunc: func(wg *sync.WaitGroup, obj interface{}) { 1018 | ns := obj.(*corev1.Namespace) 1019 | if ns.Name == ns3.Name { 1020 | log.Debugf("AddFunc %+v", obj) 1021 | wg.Done() 1022 | } 1023 | }, 1024 | }) 1025 | 1026 | wg2, stop2 := waitForSecrets(client, 1, EventHandlerFuncs{ 1027 | AddFunc: func(wg *sync.WaitGroup, obj interface{}) { 1028 | secret := obj.(*corev1.Secret) 1029 | if secret.Namespace == ns3.Name && secret.Name == source.Name { 1030 | log.Debugf("AddFunc %+v", obj) 1031 | wg.Done() 1032 | } 1033 | }, 1034 | }) 1035 | 1036 | _, err = client.CoreV1().Namespaces().Create(context.TODO(), &ns3, metav1.CreateOptions{}) 1037 | require.NoError(t, err) 1038 | 1039 | defer func() { 1040 | _ = client.CoreV1().Namespaces().Delete(context.TODO(), ns3.Name, metav1.DeleteOptions{}) 1041 | }() 1042 | 1043 | waitWithTimeout(wg, MaxWaitTime) 1044 | close(stop) 1045 | 1046 | waitWithTimeout(wg2, MaxWaitTime) 1047 | close(stop2) 1048 | 1049 | secrets3 := client.CoreV1().Secrets(namespaceName) 1050 | updTarget, err := secrets3.Get(context.TODO(), source.Name, metav1.GetOptions{}) 1051 | require.NoError(t, err) 1052 | require.Equal(t, []byte("Hello Foo"), updTarget.Data["foo"]) 1053 | 1054 | wg, stop = waitForSecrets(client, 1, EventHandlerFuncs{ 1055 | UpdateFunc: func(wg *sync.WaitGroup, objOld interface{}, objNew interface{}) { 1056 | secret := objOld.(*corev1.Secret) 1057 | if secret.Namespace == ns3.Name && secret.Name == source.Name { 1058 | log.Debugf("UpdateFunc %+v", objOld) 1059 | wg.Done() 1060 | } 1061 | }, 1062 | }) 1063 | _, err = secrets.Patch(context.TODO(), source.Name, types.JSONPatchType, []byte(`[{"op": "remove", "path": "/data/foo"}]`), metav1.PatchOptions{}) 1064 | require.NoError(t, err) 1065 | 1066 | waitWithTimeout(wg, MaxWaitTime) 1067 | close(stop) 1068 | 1069 | updTarget, err = secrets3.Get(context.TODO(), source.Name, metav1.GetOptions{}) 1070 | require.NoError(t, err) 1071 | 1072 | _, hasFoo := updTarget.Data["foo"] 1073 | require.False(t, hasFoo) 1074 | require.Equal(t, []byte("Hello Bar"), updTarget.Data["bar"]) 1075 | }) 1076 | 1077 | t.Run("secrets updated when namespace is deleted", func(t *testing.T) { 1078 | ns4 := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: prefix + "test4"}} 1079 | 1080 | wg, stop := waitForNamespaces(client, 1, EventHandlerFuncs{ 1081 | AddFunc: func(wg *sync.WaitGroup, obj interface{}) { 1082 | ns := obj.(*corev1.Namespace) 1083 | if ns.Name == ns4.Name { 1084 | log.Debugf("AddFunc %+v", obj) 1085 | wg.Done() 1086 | } 1087 | }, 1088 | }) 1089 | 1090 | _, err = client.CoreV1().Namespaces().Create(context.TODO(), &ns4, metav1.CreateOptions{}) 1091 | require.NoError(t, err) 1092 | 1093 | waitWithTimeout(wg, MaxWaitTime) 1094 | close(stop) 1095 | 1096 | source := corev1.Secret{ 1097 | ObjectMeta: metav1.ObjectMeta{ 1098 | Name: "source-ns-delete", 1099 | Namespace: ns4.Name, 1100 | Annotations: map[string]string{ 1101 | common.ReplicationAllowed: "true", 1102 | common.ReplicationAllowedNamespaces: ns.Name, 1103 | }, 1104 | }, 1105 | Type: corev1.SecretTypeOpaque, 1106 | Data: map[string][]byte{ 1107 | "foo": []byte("Hello Foo"), 1108 | "bar": []byte("Hello Bar"), 1109 | }, 1110 | } 1111 | 1112 | target := corev1.Secret{ 1113 | ObjectMeta: metav1.ObjectMeta{ 1114 | Name: "target-ns-delete", 1115 | Namespace: ns.Name, 1116 | Annotations: map[string]string{ 1117 | common.ReplicateFromAnnotation: common.MustGetKey(&source), 1118 | }, 1119 | }, 1120 | Type: corev1.SecretTypeOpaque, 1121 | } 1122 | 1123 | wg, stop = waitForSecrets(client, 3, EventHandlerFuncs{ 1124 | AddFunc: func(wg *sync.WaitGroup, obj interface{}) { 1125 | secret := obj.(*corev1.Secret) 1126 | if secret.Namespace == source.Namespace && secret.Name == source.Name { 1127 | log.Debugf("AddFunc %+v", obj) 1128 | wg.Done() 1129 | } else if secret.Namespace == target.Namespace && secret.Name == target.Name { 1130 | log.Debugf("AddFunc %+v", obj) 1131 | wg.Done() 1132 | } 1133 | }, 1134 | UpdateFunc: func(wg *sync.WaitGroup, oldObj interface{}, newObj interface{}) { 1135 | secret := oldObj.(*corev1.Secret) 1136 | if secret.Namespace == target.Namespace && secret.Name == target.Name { 1137 | log.Debugf("UpdateFunc %+v -> %+v", oldObj, newObj) 1138 | wg.Done() 1139 | } 1140 | }, 1141 | }) 1142 | 1143 | secrets4 := client.CoreV1().Secrets(prefix + "test4") 1144 | 1145 | _, err := secrets4.Create(context.TODO(), &source, metav1.CreateOptions{}) 1146 | require.NoError(t, err) 1147 | 1148 | _, err = secrets.Create(context.TODO(), &target, metav1.CreateOptions{}) 1149 | require.NoError(t, err) 1150 | 1151 | waitWithTimeout(wg, MaxWaitTime) 1152 | close(stop) 1153 | 1154 | wg, stop = waitForNamespaces(client, 1, EventHandlerFuncs{ 1155 | DeleteFunc: func(wg *sync.WaitGroup, obj interface{}) { 1156 | ns := obj.(*corev1.Namespace) 1157 | if ns.Name == ns4.Name { 1158 | log.Debugf("DeleteFunc %+v", obj) 1159 | wg.Done() 1160 | } 1161 | }, 1162 | }) 1163 | 1164 | err = client.CoreV1().Namespaces().Delete(context.TODO(), ns4.Name, metav1.DeleteOptions{}) 1165 | require.NoError(t, err) 1166 | 1167 | waitWithTimeout(wg, MaxWaitTime*10) 1168 | close(stop) 1169 | 1170 | nsfound, err := client.CoreV1().Namespaces().Get(context.TODO(), ns4.Name, metav1.GetOptions{}) 1171 | require.Condition(t, func() bool { return errors.IsNotFound(err) }, "Expected no namespace but got: %v; %v", nsfound, err) 1172 | 1173 | updTarget, err := secrets.Get(context.TODO(), target.Name, metav1.GetOptions{}) 1174 | require.NoError(t, err) 1175 | require.NotEqual(t, []byte("Hello Bar"), updTarget.Data["bar"]) 1176 | }) 1177 | 1178 | t.Run("deleting a secret deletes it in other namespaces", func(t *testing.T) { 1179 | source := corev1.Secret{ 1180 | ObjectMeta: metav1.ObjectMeta{ 1181 | Name: "source7", 1182 | Namespace: ns.Name, 1183 | Annotations: map[string]string{ 1184 | common.ReplicateTo: prefix + "test2", 1185 | }, 1186 | }, 1187 | Type: corev1.SecretTypeOpaque, 1188 | Data: map[string][]byte{ 1189 | "foo": []byte("Hello Foo"), 1190 | "bar": []byte("Hello Bar"), 1191 | }, 1192 | } 1193 | 1194 | wg, stop := waitForSecrets(client, 2, EventHandlerFuncs{ 1195 | AddFunc: func(wg *sync.WaitGroup, obj interface{}) { 1196 | secret := obj.(*corev1.Secret) 1197 | if secret.Namespace == source.Namespace && secret.Name == source.Name { 1198 | log.Debugf("AddFunc %+v", obj) 1199 | wg.Done() 1200 | } else if secret.Namespace == prefix+"test2" && secret.Name == source.Name { 1201 | log.Debugf("AddFunc %+v", obj) 1202 | wg.Done() 1203 | } 1204 | }, 1205 | }) 1206 | 1207 | _, err := secrets.Create(context.TODO(), &source, metav1.CreateOptions{}) 1208 | require.NoError(t, err) 1209 | 1210 | waitWithTimeout(wg, MaxWaitTime) 1211 | close(stop) 1212 | 1213 | secrets2 := client.CoreV1().Secrets(prefix + "test2") 1214 | _, err = secrets2.Get(context.TODO(), source.Name, metav1.GetOptions{}) 1215 | require.NoError(t, err) 1216 | 1217 | wg, stop = waitForSecrets(client, 2, EventHandlerFuncs{ 1218 | DeleteFunc: func(wg *sync.WaitGroup, obj interface{}) { 1219 | secret := obj.(*corev1.Secret) 1220 | if secret.Namespace == source.Namespace && secret.Name == source.Name { 1221 | log.Debugf("DeleteFunc %+v", obj) 1222 | wg.Done() 1223 | } else if secret.Namespace == prefix+"test2" && secret.Name == source.Name { 1224 | log.Debugf("DeleteFunc %+v", obj) 1225 | wg.Done() 1226 | } 1227 | }, 1228 | }) 1229 | 1230 | err = secrets.Delete(context.TODO(), source.Name, metav1.DeleteOptions{}) 1231 | require.NoError(t, err) 1232 | 1233 | waitWithTimeout(wg, MaxWaitTime) 1234 | close(stop) 1235 | 1236 | _, err = secrets.Get(context.TODO(), source.Name, metav1.GetOptions{}) 1237 | require.Condition(t, func() bool { return errors.IsNotFound(err) }, "Expected not found, but got a secret in namespace test: %+v", err) 1238 | 1239 | _, err = secrets2.Get(context.TODO(), source.Name, metav1.GetOptions{}) 1240 | require.Condition(t, func() bool { return errors.IsNotFound(err) }, "Expected not found, but got: %+v", err) 1241 | }) 1242 | 1243 | t.Run("replication properly replicates type", func(t *testing.T) { 1244 | source := corev1.Secret{ 1245 | ObjectMeta: metav1.ObjectMeta{ 1246 | Name: "source8", 1247 | Namespace: ns.Name, 1248 | Annotations: map[string]string{ 1249 | common.ReplicateTo: prefix + "test2", 1250 | }, 1251 | }, 1252 | Type: corev1.SecretTypeDockercfg, 1253 | Data: map[string][]byte{ 1254 | ".dockerconfigjson": []byte("{}"), 1255 | ".dockercfg": []byte("{}"), 1256 | }, 1257 | } 1258 | 1259 | wg, stop := waitForSecrets(client, 2, EventHandlerFuncs{ 1260 | AddFunc: func(wg *sync.WaitGroup, obj interface{}) { 1261 | secret := obj.(*corev1.Secret) 1262 | if secret.Namespace == source.Namespace && secret.Name == source.Name { 1263 | log.Debugf("AddFunc %+v", obj) 1264 | wg.Done() 1265 | } else if secret.Namespace == prefix+"test2" && secret.Name == source.Name { 1266 | log.Debugf("AddFunc %+v", obj) 1267 | wg.Done() 1268 | } 1269 | }, 1270 | }) 1271 | 1272 | _, err := secrets.Create(context.TODO(), &source, metav1.CreateOptions{}) 1273 | require.NoError(t, err) 1274 | 1275 | waitWithTimeout(wg, MaxWaitTime) 1276 | close(stop) 1277 | 1278 | secrets2 := client.CoreV1().Secrets(prefix + "test2") 1279 | updTarget, err := secrets2.Get(context.TODO(), source.Name, metav1.GetOptions{}) 1280 | require.NoError(t, err) 1281 | require.Equal(t, []byte("{}"), updTarget.Data[".dockercfg"]) 1282 | require.Equal(t, corev1.SecretTypeDockercfg, updTarget.Type) 1283 | 1284 | }) 1285 | 1286 | } 1287 | 1288 | func TestSecretReplicatorSyncByContent(t *testing.T) { 1289 | 1290 | log.SetLevel(log.TraceLevel) 1291 | log.SetFormatter(&PlainFormatter{}) 1292 | 1293 | prefix := namespacePrefix() 1294 | client := setupRealClientSet(t) 1295 | ctx := context.TODO() 1296 | 1297 | filter := common.NewNamespaceFilter([]string{}) 1298 | 1299 | repl := NewReplicator(client, 60*time.Second, false, true, filter) 1300 | go repl.Run() 1301 | 1302 | time.Sleep(200 * time.Millisecond) 1303 | 1304 | ns := corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: prefix + "test"}} 1305 | _, err := client.CoreV1().Namespaces().Create(ctx, &ns, metav1.CreateOptions{}) 1306 | require.NoError(t, err) 1307 | 1308 | defer func() { 1309 | _ = client.CoreV1().Namespaces().Delete(ctx, ns.Name, metav1.DeleteOptions{}) 1310 | }() 1311 | 1312 | secrets := client.CoreV1().Secrets(prefix + "test") 1313 | 1314 | const MaxWaitTime = 1000 * time.Millisecond 1315 | t.Run("enforce reference secret content equals source secret", func(t *testing.T) { 1316 | source := corev1.Secret{ 1317 | ObjectMeta: metav1.ObjectMeta{ 1318 | Name: "source", 1319 | Namespace: ns.Name, 1320 | Annotations: map[string]string{ 1321 | common.ReplicationAllowed: "true", 1322 | common.ReplicationAllowedNamespaces: ns.Name, 1323 | }, 1324 | }, 1325 | Type: corev1.SecretTypeOpaque, 1326 | Data: map[string][]byte{ 1327 | "foo": []byte("Hello World"), 1328 | }, 1329 | } 1330 | 1331 | target := corev1.Secret{ 1332 | ObjectMeta: metav1.ObjectMeta{ 1333 | Name: "target", 1334 | Namespace: ns.Name, 1335 | Annotations: map[string]string{ 1336 | common.ReplicateFromAnnotation: common.MustGetKey(&source), 1337 | }, 1338 | }, 1339 | Type: corev1.SecretTypeOpaque, 1340 | } 1341 | tmpOverwrite := corev1.Secret{ 1342 | ObjectMeta: metav1.ObjectMeta{ 1343 | Name: "target", 1344 | Namespace: ns.Name, 1345 | Annotations: map[string]string{ 1346 | common.ReplicateFromAnnotation: common.MustGetKey(&source), 1347 | }, 1348 | }, 1349 | Type: corev1.SecretTypeOpaque, 1350 | Data: map[string][]byte{ 1351 | "foo": []byte("manually changed secret"), 1352 | }, 1353 | } 1354 | 1355 | wg, stop := waitForSecrets(client, 6, EventHandlerFuncs{ 1356 | AddFunc: func(wg *sync.WaitGroup, obj any) { 1357 | secret := obj.(*corev1.Secret) 1358 | if secret.Namespace == source.Namespace && secret.Name == source.Name { 1359 | log.Debugf("AddFunc %+v", obj) 1360 | wg.Done() 1361 | } else if secret.Namespace == target.Namespace && secret.Name == target.Name { 1362 | log.Debugf("AddFunc %+v", obj) 1363 | wg.Done() 1364 | } 1365 | }, 1366 | UpdateFunc: func(wg *sync.WaitGroup, oldObj, newObj any) { 1367 | secret := oldObj.(*corev1.Secret) 1368 | if secret.Namespace == target.Namespace && secret.Name == target.Name { 1369 | log.Debugf("UpdateFunc %+v -> %+v", oldObj, newObj) 1370 | wg.Done() 1371 | } 1372 | }, 1373 | }) 1374 | 1375 | _, err := secrets.Create(ctx, &source, metav1.CreateOptions{}) 1376 | require.NoError(t, err) 1377 | 1378 | _, err = secrets.Create(ctx, &target, metav1.CreateOptions{}) 1379 | require.NoError(t, err) 1380 | 1381 | waitWithTimeout(wg, MaxWaitTime) 1382 | 1383 | updTarget, err := secrets.Get(ctx, target.Name, metav1.GetOptions{}) 1384 | require.NoError(t, err) 1385 | require.Equal(t, []byte("Hello World"), updTarget.Data["foo"]) 1386 | 1387 | _, err = secrets.Update(ctx, &tmpOverwrite, metav1.UpdateOptions{}) 1388 | require.NoError(t, err) 1389 | 1390 | waitWithTimeout(wg, MaxWaitTime) 1391 | 1392 | updTarget, err = secrets.Get(ctx, target.Name, metav1.GetOptions{}) 1393 | require.NoError(t, err) 1394 | require.Equal(t, []byte("Hello World"), updTarget.Data["foo"]) 1395 | 1396 | close(stop) 1397 | }) 1398 | 1399 | } 1400 | 1401 | type createInformerFunc func(factory informers.SharedInformerFactory) cache.SharedIndexInformer 1402 | 1403 | func waitForObjects(client kubernetes.Interface, count int, eventHandlers EventHandlerFuncs, createInformerFunc createInformerFunc) (wg *sync.WaitGroup, stop chan struct{}) { 1404 | wg = &sync.WaitGroup{} 1405 | wg.Add(count) 1406 | 1407 | informerFactory := informers.NewSharedInformerFactory(client, 60*time.Second) 1408 | informer := createInformerFunc(informerFactory) 1409 | _, _ = informer.AddEventHandler(cache.ResourceEventHandlerFuncs{ 1410 | AddFunc: func(obj any) { 1411 | if eventHandlers.AddFunc != nil { 1412 | eventHandlers.AddFunc(wg, obj) 1413 | } 1414 | }, 1415 | UpdateFunc: func(oldObj, newObj any) { 1416 | if eventHandlers.UpdateFunc != nil { 1417 | eventHandlers.UpdateFunc(wg, oldObj, newObj) 1418 | } 1419 | 1420 | }, 1421 | DeleteFunc: func(obj any) { 1422 | if eventHandlers.DeleteFunc != nil { 1423 | eventHandlers.DeleteFunc(wg, obj) 1424 | } 1425 | }, 1426 | }) 1427 | 1428 | stop = make(chan struct{}) 1429 | go informerFactory.Start(stop) 1430 | 1431 | return 1432 | } 1433 | 1434 | func waitForNamespaces(client kubernetes.Interface, count int, eventHandlers EventHandlerFuncs) (wg *sync.WaitGroup, stop chan struct{}) { 1435 | createInformer := func(factory informers.SharedInformerFactory) cache.SharedIndexInformer { 1436 | return factory.Core().V1().Namespaces().Informer() 1437 | } 1438 | 1439 | return waitForObjects(client, count, eventHandlers, createInformer) 1440 | } 1441 | 1442 | func waitForSecrets(client kubernetes.Interface, count int, eventHandlers EventHandlerFuncs) (wg *sync.WaitGroup, stop chan struct{}) { 1443 | createInformer := func(factory informers.SharedInformerFactory) cache.SharedIndexInformer { 1444 | return factory.Core().V1().Secrets().Informer() 1445 | } 1446 | 1447 | return waitForObjects(client, count, eventHandlers, createInformer) 1448 | } 1449 | 1450 | func waitWithTimeout(wg *sync.WaitGroup, timeout time.Duration) { 1451 | done := make(chan struct{}) 1452 | go func() { 1453 | wg.Wait() 1454 | close(done) 1455 | }() 1456 | 1457 | select { 1458 | case <-done: 1459 | return 1460 | case <-time.After(timeout): 1461 | err := pkgerrors.Errorf("Timeout hit") 1462 | log.WithError(err).Debugf("Wait timed out") 1463 | } 1464 | } 1465 | 1466 | func homeDir() string { 1467 | if h := os.Getenv("HOME"); h != "" { 1468 | return h 1469 | } 1470 | return os.Getenv("USERPROFILE") // windows 1471 | } 1472 | --------------------------------------------------------------------------------