├── config ├── manager │ ├── kustomization.yaml │ └── manager.yaml ├── prometheus │ ├── kustomization.yaml │ └── monitor.yaml ├── certmanager │ ├── kustomization.yaml │ ├── kustomizeconfig.yaml │ └── certificate.yaml ├── webhook │ ├── kustomization.yaml │ ├── service.yaml │ ├── kustomizeconfig.yaml │ └── manifests.yaml ├── samples │ ├── kustomization.yaml │ ├── nacos.io_v1_servicediscovery.yaml │ └── nacos.io_v1_dynamicconfiguration.yaml ├── default │ ├── manager_config_patch.yaml │ ├── manager_webhook_patch.yaml │ ├── manager_auth_proxy_patch.yaml │ ├── webhookcainjection_patch.yaml │ └── kustomization.yaml ├── crd │ ├── patches │ │ ├── cainjection_in_dynamicconfigurations.yaml │ │ ├── cainjection_in_servicediscoveries.yaml │ │ ├── webhook_in_dynamicconfigurations.yaml │ │ └── webhook_in_servicediscoveries.yaml │ ├── kustomizeconfig.yaml │ ├── kustomization.yaml │ └── bases │ │ └── nacos.io_dynamicconfigurations.yaml └── rbac │ ├── service_account.yaml │ ├── auth_proxy_client_clusterrole.yaml │ ├── role_binding.yaml │ ├── auth_proxy_role_binding.yaml │ ├── leader_election_role_binding.yaml │ ├── auth_proxy_role.yaml │ ├── auth_proxy_service.yaml │ ├── kustomization.yaml │ ├── dynamicconfiguration_viewer_role.yaml │ ├── servicediscovery_viewer_role.yaml │ ├── servicediscovery_editor_role.yaml │ ├── dynamicconfiguration_editor_role.yaml │ ├── leader_election_role.yaml │ └── role.yaml ├── .dockerignore ├── pkg ├── constant.go ├── nacos │ ├── nacos_types.go │ ├── naming │ │ ├── naming_types.go │ │ ├── naming_constant.go │ │ ├── nacos_naming_controller.go │ │ ├── convert_util.go │ │ └── naming_client.go │ ├── client │ │ ├── config_client.go │ │ └── impl │ │ │ └── default_client.go │ ├── auth │ │ └── auth_provider.go │ ├── server2cluster.go │ ├── util.go │ └── dynamicconfiguration_util.go ├── controller │ ├── dynamicconfiguration_controller_test.go │ ├── secret_controller_test.go │ ├── configmap_controller_test.go │ ├── service_controller.go │ ├── suite_test.go │ ├── secret_controller.go │ ├── dynamicconfiguration_controller.go │ ├── configmap_controller.go │ ├── endpoint_controller.go │ └── servicediscovery_controller.go └── utils.go ├── .gitignore ├── charts └── nacos-controller │ ├── templates │ ├── serviceaccount.yaml │ ├── service.yaml │ ├── clusterrole.yaml │ ├── _helpers.tpl │ ├── deployment.yaml │ └── admissionregistration.yaml │ ├── .helmignore │ ├── Chart.yaml │ └── values.yaml ├── hack └── boilerplate.go.txt ├── configQuickStart.sh ├── Dockerfile ├── api └── v1 │ ├── groupversion_info.go │ ├── servicediscovery_types.go │ ├── webhook_suite_test.go │ ├── dynamicconfiguration_types.go │ └── dynamicconfiguration_webhook.go ├── PROJECT ├── .github └── workflows │ └── docker-publish.yml ├── go.mod ├── cmd └── main.go ├── README_CN.md ├── Makefile └── README.md /config/manager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - manager.yaml 3 | -------------------------------------------------------------------------------- /config/prometheus/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - monitor.yaml 3 | -------------------------------------------------------------------------------- /config/certmanager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - certificate.yaml 3 | 4 | configurations: 5 | - kustomizeconfig.yaml 6 | -------------------------------------------------------------------------------- /config/webhook/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - manifests.yaml 3 | - service.yaml 4 | 5 | configurations: 6 | - kustomizeconfig.yaml 7 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | # More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file 2 | # Ignore build and test binaries. 3 | bin/ 4 | -------------------------------------------------------------------------------- /config/samples/kustomization.yaml: -------------------------------------------------------------------------------- 1 | ## Append samples of your project ## 2 | resources: 3 | - nacos.io_v1_dynamicconfiguration.yaml 4 | - nacos.io_v1_servicediscovery.yaml 5 | #+kubebuilder:scaffold:manifestskustomizesamples 6 | -------------------------------------------------------------------------------- /config/default/manager_config_patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: controller-manager 5 | namespace: system 6 | spec: 7 | template: 8 | spec: 9 | containers: 10 | - name: manager 11 | -------------------------------------------------------------------------------- /pkg/constant.go: -------------------------------------------------------------------------------- 1 | package pkg 2 | 3 | const ( 4 | ConfigMapLabel string = "nacos.io/owned-by-dc" 5 | ) 6 | 7 | const ( 8 | FinalizerName string = "nacos.io/dc-finalizer" 9 | ) 10 | 11 | const ( 12 | PhaseSucceed string = "succeed" 13 | PhaseFailed string = "failed" 14 | ) 15 | -------------------------------------------------------------------------------- /config/certmanager/kustomizeconfig.yaml: -------------------------------------------------------------------------------- 1 | # This configuration is for teaching kustomize how to update name ref substitution 2 | nameReference: 3 | - kind: Issuer 4 | group: cert-manager.io 5 | fieldSpecs: 6 | - kind: Certificate 7 | group: cert-manager.io 8 | path: spec/issuerRef/name 9 | -------------------------------------------------------------------------------- /pkg/nacos/nacos_types.go: -------------------------------------------------------------------------------- 1 | package nacos 2 | 3 | import ( 4 | nacoscontrollerv1 "github.com/nacos-group/nacos-controller/api/v1" 5 | v1 "k8s.io/api/core/v1" 6 | ) 7 | 8 | type NacosServer interface { 9 | GetServerConf() (nacoscontrollerv1.NacosServerConfiguration, error) 10 | GetAuthRef() (v1.ObjectReference, error) 11 | } 12 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_dynamicconfigurations.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME 7 | name: dynamicconfigurations.nacos.io 8 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_servicediscoveries.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME 7 | name: servicediscoveries.nacos.io.nacos.io 8 | -------------------------------------------------------------------------------- /config/rbac/service_account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: serviceaccount 6 | app.kubernetes.io/instance: controller-manager-sa 7 | app.kubernetes.io/component: rbac 8 | app.kubernetes.io/created-by: nacos-controller 9 | app.kubernetes.io/part-of: nacos-controller 10 | app.kubernetes.io/managed-by: kustomize 11 | name: controller-manager 12 | namespace: system 13 | -------------------------------------------------------------------------------- /config/samples/nacos.io_v1_servicediscovery.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: nacos.io.nacos.io/v1 2 | kind: ServiceDiscovery 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: servicediscovery 6 | app.kubernetes.io/instance: servicediscovery-sample 7 | app.kubernetes.io/part-of: nacos-controller 8 | app.kubernetes.io/managed-by: kustomize 9 | app.kubernetes.io/created-by: nacos-controller 10 | name: servicediscovery-sample 11 | spec: 12 | # TODO(user): Add fields here -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | bin 9 | 10 | # Test binary, build with `go test -c` 11 | *.test 12 | 13 | # Output of the go coverage tool, specifically when used with LiteIDE 14 | *.out 15 | 16 | # Kubernetes Generated files - skip generated files, except for vendored files 17 | 18 | !vendor/**/zz_generated.* 19 | 20 | # editor and IDE paraphernalia 21 | .idea 22 | *.swp 23 | *.swo 24 | *~ 25 | vendor -------------------------------------------------------------------------------- /charts/nacos-controller/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.serviceAccount.create -}} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: {{ include "nacos-controller.serviceAccountName" . }} 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | {{- include "nacos-controller.labels" . | nindent 4 }} 9 | {{- with .Values.serviceAccount.annotations }} 10 | annotations: 11 | {{- toYaml . | nindent 4 }} 12 | {{- end }} 13 | {{- end }} 14 | -------------------------------------------------------------------------------- /charts/nacos-controller/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /pkg/nacos/naming/naming_types.go: -------------------------------------------------------------------------------- 1 | package naming 2 | 3 | type Address struct { 4 | IP string `json:"ip"` 5 | Port uint64 `json:"port"` 6 | } 7 | 8 | type NacosOptions struct { 9 | Namespace string 10 | 11 | // ServersIP are explicitly specified to be connected to nacos by client. 12 | ServersIP []string 13 | 14 | // ServerPort are explicitly specified to be used when the client connects to nacos. 15 | ServerPort uint64 16 | 17 | AccessKey string 18 | SecretKey string 19 | } 20 | -------------------------------------------------------------------------------- /config/samples/nacos.io_v1_dynamicconfiguration.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: nacos.io/v1 2 | kind: DynamicConfiguration 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: dynamicconfiguration 6 | app.kubernetes.io/instance: dynamicconfiguration-sample 7 | app.kubernetes.io/part-of: nacos-controller 8 | app.kubernetes.io/managed-by: kustomize 9 | app.kubernetes.io/created-by: nacos-controller 10 | name: dynamicconfiguration-sample 11 | spec: 12 | # TODO(user): Add fields here 13 | -------------------------------------------------------------------------------- /pkg/nacos/naming/naming_constant.go: -------------------------------------------------------------------------------- 1 | package naming 2 | 3 | import "time" 4 | 5 | const ( 6 | DefaultTaskDelay = 1 * time.Second 7 | 8 | DefaultResyncInterval = 0 9 | 10 | DefaultNacosEndpointWeight = 100 11 | 12 | MaxRetry = 3 13 | // 14 | //ToNacos Direction = "to-nacos" 15 | // 16 | //ToK8s Direction = "to-k8s" 17 | // 18 | //Both Direction = "both" 19 | 20 | NamingSyncedMark = "synced_by_nacos_controller" 21 | 22 | NamingDefaultGroupName string = "DEFAULT_GROUP" 23 | ) 24 | -------------------------------------------------------------------------------- /config/crd/patches/webhook_in_dynamicconfigurations.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables a conversion webhook for the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | name: dynamicconfigurations.nacos.io 6 | spec: 7 | conversion: 8 | strategy: Webhook 9 | webhook: 10 | clientConfig: 11 | service: 12 | namespace: system 13 | name: webhook-service 14 | path: /convert 15 | conversionReviewVersions: 16 | - v1 17 | -------------------------------------------------------------------------------- /config/crd/patches/webhook_in_servicediscoveries.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables a conversion webhook for the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | name: servicediscoveries.nacos.io.nacos.io 6 | spec: 7 | conversion: 8 | strategy: Webhook 9 | webhook: 10 | clientConfig: 11 | service: 12 | namespace: system 13 | name: webhook-service 14 | path: /convert 15 | conversionReviewVersions: 16 | - v1 17 | -------------------------------------------------------------------------------- /charts/nacos-controller/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ include "nacos-controller.fullname" . }} 5 | namespace: {{ .Release.Namespace }} 6 | labels: 7 | {{- include "nacos-controller.labels" . | nindent 4 }} 8 | spec: 9 | type: {{ .Values.service.type }} 10 | ports: 11 | - port: {{ .Values.service.port }} 12 | targetPort: webhook 13 | protocol: TCP 14 | name: http 15 | selector: 16 | {{- include "nacos-controller.selectorLabels" . | nindent 4 }} 17 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_client_clusterrole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: clusterrole 6 | app.kubernetes.io/instance: metrics-reader 7 | app.kubernetes.io/component: kube-rbac-proxy 8 | app.kubernetes.io/created-by: nacos-controller 9 | app.kubernetes.io/part-of: nacos-controller 10 | app.kubernetes.io/managed-by: kustomize 11 | name: metrics-reader 12 | rules: 13 | - nonResourceURLs: 14 | - "/metrics" 15 | verbs: 16 | - get 17 | -------------------------------------------------------------------------------- /pkg/controller/dynamicconfiguration_controller_test.go: -------------------------------------------------------------------------------- 1 | package controller 2 | 3 | import ( 4 | . "github.com/onsi/ginkgo/v2" 5 | ) 6 | 7 | var _ = Describe("DynamicConfigurationController", func() { 8 | Context("When reconciling a resource", func() { 9 | 10 | It("should successfully reconcile the resource", func() { 11 | 12 | // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. 13 | // Example: If you expect a certain status condition after reconciliation, verify it here. 14 | }) 15 | }) 16 | }) 17 | -------------------------------------------------------------------------------- /pkg/utils.go: -------------------------------------------------------------------------------- 1 | package pkg 2 | 3 | // Contains checks if the passed string is present in the given slice of strings. 4 | func Contains(list []string, s string) bool { 5 | for _, v := range list { 6 | if v == s { 7 | return true 8 | } 9 | } 10 | return false 11 | } 12 | 13 | // Remove deletes the passed string from the given slice of strings. 14 | func Remove(list []string, s string) []string { 15 | for i, v := range list { 16 | if v == s { 17 | list = append(list[:i], list[i+1:]...) 18 | } 19 | } 20 | return list 21 | } 22 | 23 | var CurrentContext = "null" 24 | -------------------------------------------------------------------------------- /config/webhook/service.yaml: -------------------------------------------------------------------------------- 1 | 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | labels: 6 | app.kubernetes.io/name: service 7 | app.kubernetes.io/instance: webhook-service 8 | app.kubernetes.io/component: webhook 9 | app.kubernetes.io/created-by: nacos-controller 10 | app.kubernetes.io/part-of: nacos-controller 11 | app.kubernetes.io/managed-by: kustomize 12 | name: webhook-service 13 | namespace: system 14 | spec: 15 | ports: 16 | - port: 443 17 | protocol: TCP 18 | targetPort: 9443 19 | selector: 20 | control-plane: controller-manager 21 | -------------------------------------------------------------------------------- /config/crd/kustomizeconfig.yaml: -------------------------------------------------------------------------------- 1 | # This file is for teaching kustomize how to substitute name and namespace reference in CRD 2 | nameReference: 3 | - kind: Service 4 | version: v1 5 | fieldSpecs: 6 | - kind: CustomResourceDefinition 7 | version: v1 8 | group: apiextensions.k8s.io 9 | path: spec/conversion/webhook/clientConfig/service/name 10 | 11 | namespace: 12 | - kind: CustomResourceDefinition 13 | version: v1 14 | group: apiextensions.k8s.io 15 | path: spec/conversion/webhook/clientConfig/service/namespace 16 | create: false 17 | 18 | varReference: 19 | - path: metadata/annotations 20 | -------------------------------------------------------------------------------- /hack/boilerplate.go.txt: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2023. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ -------------------------------------------------------------------------------- /config/default/manager_webhook_patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: controller-manager 5 | namespace: system 6 | spec: 7 | template: 8 | spec: 9 | containers: 10 | - name: manager 11 | ports: 12 | - containerPort: 9443 13 | name: webhook-server 14 | protocol: TCP 15 | volumeMounts: 16 | - mountPath: /tmp/k8s-webhook-server/serving-certs 17 | name: cert 18 | readOnly: true 19 | volumes: 20 | - name: cert 21 | secret: 22 | defaultMode: 420 23 | secretName: webhook-server-cert 24 | -------------------------------------------------------------------------------- /config/rbac/role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: clusterrolebinding 6 | app.kubernetes.io/instance: manager-rolebinding 7 | app.kubernetes.io/component: rbac 8 | app.kubernetes.io/created-by: nacos-controller 9 | app.kubernetes.io/part-of: nacos-controller 10 | app.kubernetes.io/managed-by: kustomize 11 | name: manager-rolebinding 12 | roleRef: 13 | apiGroup: rbac.authorization.k8s.io 14 | kind: ClusterRole 15 | name: manager-role 16 | subjects: 17 | - kind: ServiceAccount 18 | name: controller-manager 19 | namespace: system 20 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: clusterrolebinding 6 | app.kubernetes.io/instance: proxy-rolebinding 7 | app.kubernetes.io/component: kube-rbac-proxy 8 | app.kubernetes.io/created-by: nacos-controller 9 | app.kubernetes.io/part-of: nacos-controller 10 | app.kubernetes.io/managed-by: kustomize 11 | name: proxy-rolebinding 12 | roleRef: 13 | apiGroup: rbac.authorization.k8s.io 14 | kind: ClusterRole 15 | name: proxy-role 16 | subjects: 17 | - kind: ServiceAccount 18 | name: controller-manager 19 | namespace: system 20 | -------------------------------------------------------------------------------- /config/rbac/leader_election_role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: rolebinding 6 | app.kubernetes.io/instance: leader-election-rolebinding 7 | app.kubernetes.io/component: rbac 8 | app.kubernetes.io/created-by: nacos-controller 9 | app.kubernetes.io/part-of: nacos-controller 10 | app.kubernetes.io/managed-by: kustomize 11 | name: leader-election-rolebinding 12 | roleRef: 13 | apiGroup: rbac.authorization.k8s.io 14 | kind: Role 15 | name: leader-election-role 16 | subjects: 17 | - kind: ServiceAccount 18 | name: controller-manager 19 | namespace: system 20 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: clusterrole 6 | app.kubernetes.io/instance: proxy-role 7 | app.kubernetes.io/component: kube-rbac-proxy 8 | app.kubernetes.io/created-by: nacos-controller 9 | app.kubernetes.io/part-of: nacos-controller 10 | app.kubernetes.io/managed-by: kustomize 11 | name: proxy-role 12 | rules: 13 | - apiGroups: 14 | - authentication.k8s.io 15 | resources: 16 | - tokenreviews 17 | verbs: 18 | - create 19 | - apiGroups: 20 | - authorization.k8s.io 21 | resources: 22 | - subjectaccessreviews 23 | verbs: 24 | - create 25 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | control-plane: controller-manager 6 | app.kubernetes.io/name: service 7 | app.kubernetes.io/instance: controller-manager-metrics-service 8 | app.kubernetes.io/component: kube-rbac-proxy 9 | app.kubernetes.io/created-by: nacos-controller 10 | app.kubernetes.io/part-of: nacos-controller 11 | app.kubernetes.io/managed-by: kustomize 12 | name: controller-manager-metrics-service 13 | namespace: system 14 | spec: 15 | ports: 16 | - name: https 17 | port: 8443 18 | protocol: TCP 19 | targetPort: https 20 | selector: 21 | control-plane: controller-manager 22 | -------------------------------------------------------------------------------- /configQuickStart.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # 检查是否提供了 nacos-server-addr 参数 4 | if [ -z "$1" ]; then 5 | echo "Usage: $0 [nacos-namespace-id]" 6 | exit 1 7 | fi 8 | 9 | NACOS_SERVER_ADDR=$1 10 | NACOS_NAMESPACE_ID=${2:-""} # 如果第二个参数为空,则设置为默认值空字符串 11 | 12 | # 直接生成 YAML 内容 13 | GENERATED_CONTENT="apiVersion: nacos.io/v1 14 | kind: DynamicConfiguration 15 | metadata: 16 | name: dc-quickstart 17 | spec: 18 | nacosServer: 19 | serverAddr: $NACOS_SERVER_ADDR 20 | namespace: ${NACOS_NAMESPACE_ID:-\"\"} 21 | strategy: 22 | scope: full 23 | syncDeletion: true 24 | conflictPolicy: preferCluster" 25 | 26 | # 使用 kubectl 命令直接部署 27 | echo "$GENERATED_CONTENT" | kubectl apply -f - 28 | 29 | -------------------------------------------------------------------------------- /config/rbac/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | # All RBAC will be applied under this service account in 3 | # the deployment namespace. You may comment out this resource 4 | # if your manager will use a service account that exists at 5 | # runtime. Be sure to update RoleBinding and ClusterRoleBinding 6 | # subjects if changing service account names. 7 | - service_account.yaml 8 | - role.yaml 9 | - role_binding.yaml 10 | - leader_election_role.yaml 11 | - leader_election_role_binding.yaml 12 | # Comment the following 4 lines if you want to disable 13 | # the auth proxy (https://github.com/brancz/kube-rbac-proxy) 14 | # which protects your /metrics endpoint. 15 | - auth_proxy_service.yaml 16 | - auth_proxy_role.yaml 17 | - auth_proxy_role_binding.yaml 18 | - auth_proxy_client_clusterrole.yaml 19 | -------------------------------------------------------------------------------- /config/rbac/dynamicconfiguration_viewer_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to view dynamicconfigurations. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | labels: 6 | app.kubernetes.io/name: clusterrole 7 | app.kubernetes.io/instance: dynamicconfiguration-viewer-role 8 | app.kubernetes.io/component: rbac 9 | app.kubernetes.io/created-by: nacos-controller 10 | app.kubernetes.io/part-of: nacos-controller 11 | app.kubernetes.io/managed-by: kustomize 12 | name: dynamicconfiguration-viewer-role 13 | rules: 14 | - apiGroups: 15 | - nacos.io 16 | resources: 17 | - dynamicconfigurations 18 | verbs: 19 | - get 20 | - list 21 | - watch 22 | - apiGroups: 23 | - nacos.io 24 | resources: 25 | - dynamicconfigurations/status 26 | verbs: 27 | - get 28 | -------------------------------------------------------------------------------- /config/rbac/servicediscovery_viewer_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to view servicediscoveries. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | labels: 6 | app.kubernetes.io/name: clusterrole 7 | app.kubernetes.io/instance: servicediscovery-viewer-role 8 | app.kubernetes.io/component: rbac 9 | app.kubernetes.io/created-by: nacos-controller 10 | app.kubernetes.io/part-of: nacos-controller 11 | app.kubernetes.io/managed-by: kustomize 12 | name: servicediscovery-viewer-role 13 | rules: 14 | - apiGroups: 15 | - nacos.io.nacos.io 16 | resources: 17 | - servicediscoveries 18 | verbs: 19 | - get 20 | - list 21 | - watch 22 | - apiGroups: 23 | - nacos.io.nacos.io 24 | resources: 25 | - servicediscoveries/status 26 | verbs: 27 | - get 28 | -------------------------------------------------------------------------------- /config/webhook/kustomizeconfig.yaml: -------------------------------------------------------------------------------- 1 | # the following config is for teaching kustomize where to look at when substituting nameReference. 2 | # It requires kustomize v2.1.0 or newer to work properly. 3 | nameReference: 4 | - kind: Service 5 | version: v1 6 | fieldSpecs: 7 | - kind: MutatingWebhookConfiguration 8 | group: admissionregistration.k8s.io 9 | path: webhooks/clientConfig/service/name 10 | - kind: ValidatingWebhookConfiguration 11 | group: admissionregistration.k8s.io 12 | path: webhooks/clientConfig/service/name 13 | 14 | namespace: 15 | - kind: MutatingWebhookConfiguration 16 | group: admissionregistration.k8s.io 17 | path: webhooks/clientConfig/service/namespace 18 | create: true 19 | - kind: ValidatingWebhookConfiguration 20 | group: admissionregistration.k8s.io 21 | path: webhooks/clientConfig/service/namespace 22 | create: true 23 | -------------------------------------------------------------------------------- /config/rbac/servicediscovery_editor_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to edit servicediscoveries. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | labels: 6 | app.kubernetes.io/name: clusterrole 7 | app.kubernetes.io/instance: servicediscovery-editor-role 8 | app.kubernetes.io/component: rbac 9 | app.kubernetes.io/created-by: nacos-controller 10 | app.kubernetes.io/part-of: nacos-controller 11 | app.kubernetes.io/managed-by: kustomize 12 | name: servicediscovery-editor-role 13 | rules: 14 | - apiGroups: 15 | - nacos.io.nacos.io 16 | resources: 17 | - servicediscoveries 18 | verbs: 19 | - create 20 | - delete 21 | - get 22 | - list 23 | - patch 24 | - update 25 | - watch 26 | - apiGroups: 27 | - nacos.io.nacos.io 28 | resources: 29 | - servicediscoveries/status 30 | verbs: 31 | - get 32 | -------------------------------------------------------------------------------- /config/rbac/dynamicconfiguration_editor_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to edit dynamicconfigurations. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | labels: 6 | app.kubernetes.io/name: clusterrole 7 | app.kubernetes.io/instance: dynamicconfiguration-editor-role 8 | app.kubernetes.io/component: rbac 9 | app.kubernetes.io/created-by: nacos-controller 10 | app.kubernetes.io/part-of: nacos-controller 11 | app.kubernetes.io/managed-by: kustomize 12 | name: dynamicconfiguration-editor-role 13 | rules: 14 | - apiGroups: 15 | - nacos.io 16 | resources: 17 | - dynamicconfigurations 18 | verbs: 19 | - create 20 | - delete 21 | - get 22 | - list 23 | - patch 24 | - update 25 | - watch 26 | - apiGroups: 27 | - nacos.io 28 | resources: 29 | - dynamicconfigurations/status 30 | verbs: 31 | - get 32 | -------------------------------------------------------------------------------- /config/prometheus/monitor.yaml: -------------------------------------------------------------------------------- 1 | 2 | # Prometheus Monitor Service (Metrics) 3 | apiVersion: monitoring.coreos.com/v1 4 | kind: ServiceMonitor 5 | metadata: 6 | labels: 7 | control-plane: controller-manager 8 | app.kubernetes.io/name: servicemonitor 9 | app.kubernetes.io/instance: controller-manager-metrics-monitor 10 | app.kubernetes.io/component: metrics 11 | app.kubernetes.io/created-by: nacos-controller 12 | app.kubernetes.io/part-of: nacos-controller 13 | app.kubernetes.io/managed-by: kustomize 14 | name: controller-manager-metrics-monitor 15 | namespace: system 16 | spec: 17 | endpoints: 18 | - path: /metrics 19 | port: https 20 | scheme: https 21 | bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 22 | tlsConfig: 23 | insecureSkipVerify: true 24 | selector: 25 | matchLabels: 26 | control-plane: controller-manager 27 | -------------------------------------------------------------------------------- /config/rbac/leader_election_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions to do leader election. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | labels: 6 | app.kubernetes.io/name: role 7 | app.kubernetes.io/instance: leader-election-role 8 | app.kubernetes.io/component: rbac 9 | app.kubernetes.io/created-by: nacos-controller 10 | app.kubernetes.io/part-of: nacos-controller 11 | app.kubernetes.io/managed-by: kustomize 12 | name: leader-election-role 13 | rules: 14 | - apiGroups: 15 | - "" 16 | resources: 17 | - configmaps 18 | verbs: 19 | - get 20 | - list 21 | - watch 22 | - create 23 | - update 24 | - patch 25 | - delete 26 | - apiGroups: 27 | - coordination.k8s.io 28 | resources: 29 | - leases 30 | verbs: 31 | - get 32 | - list 33 | - watch 34 | - create 35 | - update 36 | - patch 37 | - delete 38 | - apiGroups: 39 | - "" 40 | resources: 41 | - events 42 | verbs: 43 | - create 44 | - patch 45 | -------------------------------------------------------------------------------- /pkg/controller/secret_controller_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2023. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package controller 18 | 19 | import ( 20 | . "github.com/onsi/ginkgo/v2" 21 | ) 22 | 23 | var _ = Describe("Secret Controller", func() { 24 | Context("When reconciling a resource", func() { 25 | 26 | It("should successfully reconcile the resource", func() { 27 | 28 | // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. 29 | // Example: If you expect a certain status condition after reconciliation, verify it here. 30 | }) 31 | }) 32 | }) 33 | -------------------------------------------------------------------------------- /pkg/controller/configmap_controller_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2023. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package controller 18 | 19 | import ( 20 | . "github.com/onsi/ginkgo/v2" 21 | ) 22 | 23 | var _ = Describe("ConfigMap Controller", func() { 24 | Context("When reconciling a resource", func() { 25 | 26 | It("should successfully reconcile the resource", func() { 27 | 28 | // TODO(user): Add more specific assertions depending on your controller's reconciliation logic. 29 | // Example: If you expect a certain status condition after reconciliation, verify it here. 30 | }) 31 | }) 32 | }) 33 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Build the manager binary 2 | FROM golang:1.19 as builder 3 | ARG TARGETOS 4 | ARG TARGETARCH 5 | 6 | RUN mkdir -p /workspace/src/github.com/nacos-group/nacos-controller/ 7 | ENV GOPATH /workspace 8 | WORKDIR /workspace/src/github.com/nacos-group/nacos-controller/ 9 | # Copy the go source 10 | COPY . . 11 | 12 | # Build 13 | # the GOARCH has not a default value to allow the binary be built according to the host where the command 14 | # was called. For example, if we call make docker-build in a local env which has the Apple Silicon M1 SO 15 | # the docker BUILDPLATFORM arg will be linux/arm64 when for Apple x86 it will be linux/amd64. Therefore, 16 | # by leaving it empty we can ensure that the container and binary shipped on it will have the same platform. 17 | RUN CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} GO111MODULE=on go build -a -o /workspace/manager cmd/main.go 18 | 19 | # Use distroless as minimal base image to package the manager binary 20 | # Refer to https://github.com/GoogleContainerTools/distroless for more details 21 | FROM alpine:3.10.2 22 | WORKDIR / 23 | COPY --from=builder /workspace/manager . 24 | 25 | ENTRYPOINT ["/manager"] 26 | -------------------------------------------------------------------------------- /pkg/nacos/naming/nacos_naming_controller.go: -------------------------------------------------------------------------------- 1 | package naming 2 | 3 | import ( 4 | "context" 5 | 6 | v1 "k8s.io/api/core/v1" 7 | "k8s.io/client-go/kubernetes" 8 | "sigs.k8s.io/controller-runtime/pkg/client" 9 | ) 10 | 11 | type SyncServiceController struct { 12 | K8sClientSet *kubernetes.Clientset 13 | NamingClient *NacosNamingClient 14 | K8sClient client.Client 15 | } 16 | 17 | type SyncServiceOptions struct { 18 | NamingClient *NacosNamingClient 19 | } 20 | 21 | func NewSyncServiceController(c client.Client, cs *kubernetes.Clientset, opt SyncServiceOptions) *SyncServiceController { 22 | if nil == opt.NamingClient { 23 | nc, err := NewNamingClient(NacosOptions{}) 24 | if err != nil { 25 | return nil 26 | } 27 | opt.NamingClient = nc 28 | } 29 | return &SyncServiceController{ 30 | K8sClientSet: cs, 31 | NamingClient: opt.NamingClient, 32 | K8sClient: c, 33 | } 34 | } 35 | 36 | func (scc *SyncServiceController) SyncService(ctx context.Context, obj client.Object) error { 37 | service := &v1.Service{} 38 | err := scc.K8sClient.Get(ctx, client.ObjectKeyFromObject(obj), service) 39 | if err != nil { 40 | return err 41 | } 42 | 43 | return nil 44 | } 45 | -------------------------------------------------------------------------------- /config/crd/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # This kustomization.yaml is not intended to be run by itself, 2 | # since it depends on service name and namespace that are out of this kustomize package. 3 | # It should be run by config/default 4 | resources: 5 | - bases/nacos.io_dynamicconfigurations.yaml 6 | - bases/nacos.io_servicediscoveries.yaml 7 | #+kubebuilder:scaffold:crdkustomizeresource 8 | 9 | patches: 10 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. 11 | # patches here are for enabling the conversion webhook for each CRD 12 | #- path: patches/webhook_in_dynamicconfigurations.yaml 13 | #- patches/webhook_in_servicediscoveries.yaml 14 | #+kubebuilder:scaffold:crdkustomizewebhookpatch 15 | 16 | # [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix. 17 | # patches here are for enabling the CA injection for each CRD 18 | #- path: patches/cainjection_in_dynamicconfigurations.yaml 19 | #- patches/cainjection_in_servicediscoveries.yaml 20 | #+kubebuilder:scaffold:crdkustomizecainjectionpatch 21 | 22 | # the following config is for teaching kustomize how to do kustomization for CRDs. 23 | configurations: 24 | - kustomizeconfig.yaml 25 | -------------------------------------------------------------------------------- /charts/nacos-controller/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: nacos-controller 3 | description: A Helm chart for Nacos controller 4 | 5 | # A chart can be either an 'application' or a 'library' chart. 6 | # 7 | # Application charts are a collection of templates that can be packaged into versioned archives 8 | # to be deployed. 9 | # 10 | # Library charts provide useful utilities or functions for the chart developer. They're included as 11 | # a dependency of application charts to inject those utilities and functions into the rendering 12 | # pipeline. Library charts do not define any templates and therefore cannot be deployed. 13 | type: application 14 | 15 | # This is the chart version. This version number should be incremented each time you make changes 16 | # to the chart and its templates, including the app version. 17 | # Versions are expected to follow Semantic Versioning (https://semver.org/) 18 | version: 1.0.0 19 | 20 | # This is the version number of the application being deployed. This version number should be 21 | # incremented each time you make changes to the application. Versions are not expected to 22 | # follow Semantic Versioning. They should reflect the version the application is using. 23 | # It is recommended to use it with quotes. 24 | appVersion: "1.0.0" 25 | -------------------------------------------------------------------------------- /config/default/manager_auth_proxy_patch.yaml: -------------------------------------------------------------------------------- 1 | # This patch inject a sidecar container which is a HTTP proxy for the 2 | # controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. 3 | apiVersion: apps/v1 4 | kind: Deployment 5 | metadata: 6 | name: controller-manager 7 | namespace: system 8 | spec: 9 | template: 10 | spec: 11 | containers: 12 | - name: kube-rbac-proxy 13 | securityContext: 14 | allowPrivilegeEscalation: false 15 | capabilities: 16 | drop: 17 | - "ALL" 18 | image: gcr.io/kubebuilder/kube-rbac-proxy:v0.14.1 19 | args: 20 | - "--secure-listen-address=0.0.0.0:8443" 21 | - "--upstream=http://127.0.0.1:8080/" 22 | - "--logtostderr=true" 23 | - "--v=0" 24 | ports: 25 | - containerPort: 8443 26 | protocol: TCP 27 | name: https 28 | resources: 29 | limits: 30 | cpu: 500m 31 | memory: 128Mi 32 | requests: 33 | cpu: 5m 34 | memory: 64Mi 35 | - name: manager 36 | args: 37 | - "--health-probe-bind-address=:8081" 38 | - "--metrics-bind-address=127.0.0.1:8080" 39 | - "--leader-elect" 40 | -------------------------------------------------------------------------------- /config/webhook/manifests.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: admissionregistration.k8s.io/v1 3 | kind: MutatingWebhookConfiguration 4 | metadata: 5 | name: mutating-webhook-configuration 6 | webhooks: 7 | - admissionReviewVersions: 8 | - v1 9 | clientConfig: 10 | service: 11 | name: webhook-service 12 | namespace: system 13 | path: /mutate-nacos-io-v1-dynamicconfiguration 14 | failurePolicy: Fail 15 | name: mdynamicconfiguration.kb.io 16 | rules: 17 | - apiGroups: 18 | - nacos.io 19 | apiVersions: 20 | - v1 21 | operations: 22 | - CREATE 23 | - UPDATE 24 | resources: 25 | - dynamicconfigurations 26 | sideEffects: None 27 | --- 28 | apiVersion: admissionregistration.k8s.io/v1 29 | kind: ValidatingWebhookConfiguration 30 | metadata: 31 | name: validating-webhook-configuration 32 | webhooks: 33 | - admissionReviewVersions: 34 | - v1 35 | clientConfig: 36 | service: 37 | name: webhook-service 38 | namespace: system 39 | path: /validate-nacos-io-v1-dynamicconfiguration 40 | failurePolicy: Fail 41 | name: vdynamicconfiguration.kb.io 42 | rules: 43 | - apiGroups: 44 | - nacos.io 45 | apiVersions: 46 | - v1 47 | operations: 48 | - CREATE 49 | - UPDATE 50 | resources: 51 | - dynamicconfigurations 52 | sideEffects: None 53 | -------------------------------------------------------------------------------- /api/v1/groupversion_info.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2023. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Package v1 contains API Schema definitions for the nacos.io v1 API group 18 | // +kubebuilder:object:generate=true 19 | // +groupName=nacos.io 20 | package v1 21 | 22 | import ( 23 | "k8s.io/apimachinery/pkg/runtime/schema" 24 | "sigs.k8s.io/controller-runtime/pkg/scheme" 25 | ) 26 | 27 | var ( 28 | // GroupVersion is group version used to register these objects 29 | GroupVersion = schema.GroupVersion{Group: "nacos.io", Version: "v1"} 30 | 31 | // SchemeBuilder is used to add go types to the GroupVersionKind scheme 32 | SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} 33 | 34 | // AddToScheme adds the types in this group-version to the given scheme. 35 | AddToScheme = SchemeBuilder.AddToScheme 36 | ) 37 | -------------------------------------------------------------------------------- /config/default/webhookcainjection_patch.yaml: -------------------------------------------------------------------------------- 1 | # This patch add annotation to admission webhook config and 2 | # CERTIFICATE_NAMESPACE and CERTIFICATE_NAME will be substituted by kustomize 3 | apiVersion: admissionregistration.k8s.io/v1 4 | kind: MutatingWebhookConfiguration 5 | metadata: 6 | labels: 7 | app.kubernetes.io/name: mutatingwebhookconfiguration 8 | app.kubernetes.io/instance: mutating-webhook-configuration 9 | app.kubernetes.io/component: webhook 10 | app.kubernetes.io/created-by: nacos-controller 11 | app.kubernetes.io/part-of: nacos-controller 12 | app.kubernetes.io/managed-by: kustomize 13 | name: mutating-webhook-configuration 14 | annotations: 15 | cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME 16 | --- 17 | apiVersion: admissionregistration.k8s.io/v1 18 | kind: ValidatingWebhookConfiguration 19 | metadata: 20 | labels: 21 | app.kubernetes.io/name: validatingwebhookconfiguration 22 | app.kubernetes.io/instance: validating-webhook-configuration 23 | app.kubernetes.io/component: webhook 24 | app.kubernetes.io/created-by: nacos-controller 25 | app.kubernetes.io/part-of: nacos-controller 26 | app.kubernetes.io/managed-by: kustomize 27 | name: validating-webhook-configuration 28 | annotations: 29 | cert-manager.io/inject-ca-from: CERTIFICATE_NAMESPACE/CERTIFICATE_NAME 30 | -------------------------------------------------------------------------------- /charts/nacos-controller/templates/clusterrole.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: {{ include "nacos-controller.fullname" . }} 6 | rules: 7 | - apiGroups: 8 | - nacos.io 9 | resources: 10 | - '*' 11 | verbs: 12 | - '*' 13 | - apiGroups: 14 | - "" 15 | resources: 16 | - "configmaps" 17 | - "secrets" 18 | - "services" 19 | - "endpoints" 20 | verbs: 21 | - create 22 | - delete 23 | - get 24 | - list 25 | - patch 26 | - update 27 | - watch 28 | - apiGroups: 29 | - "coordination.k8s.io" 30 | resources: 31 | - "leases" 32 | verbs: 33 | - create 34 | - delete 35 | - get 36 | - list 37 | - patch 38 | - update 39 | - watch 40 | - apiGroups: 41 | - "" 42 | resources: 43 | - events 44 | verbs: 45 | - create 46 | - patch 47 | --- 48 | apiVersion: rbac.authorization.k8s.io/v1 49 | kind: ClusterRoleBinding 50 | metadata: 51 | name: {{ include "nacos-controller.fullname" . }} 52 | roleRef: 53 | apiGroup: rbac.authorization.k8s.io 54 | kind: ClusterRole 55 | name: {{ include "nacos-controller.fullname" . }} 56 | subjects: 57 | - kind: ServiceAccount 58 | name: {{ include "nacos-controller.fullname" . }} 59 | namespace: {{ .Release.Namespace }} -------------------------------------------------------------------------------- /PROJECT: -------------------------------------------------------------------------------- 1 | # Code generated by tool. DO NOT EDIT. 2 | # This file is used to track the info used to scaffold your project 3 | # and allow the plugins properly work. 4 | # More info: https://book.kubebuilder.io/reference/project-config.html 5 | domain: nacos.io 6 | layout: 7 | - go.kubebuilder.io/v4 8 | projectName: nacos-controller 9 | repo: nacos-controller 10 | resources: 11 | - api: 12 | crdVersion: v1 13 | namespaced: true 14 | controller: true 15 | domain: nacos.io 16 | group: nacos.io 17 | kind: DynamicConfiguration 18 | path: nacos-controller/api/v1 19 | version: v1 20 | - domain: nacos.io 21 | kind: DynamicConfiguration 22 | path: nacos-controller/api/v1 23 | version: v1 24 | webhooks: 25 | defaulting: true 26 | validation: true 27 | webhookVersion: v1 28 | - controller: true 29 | domain: nacos.io 30 | kind: ConfigMap 31 | version: v1 32 | - controller: true 33 | core: true 34 | group: core 35 | kind: Secret 36 | path: k8s.io/api/core/v1 37 | version: v1 38 | - api: 39 | crdVersion: v1 40 | namespaced: true 41 | controller: true 42 | domain: nacos.io 43 | group: nacos.io 44 | kind: ServiceDiscovery 45 | path: nacos-controller/api/v1 46 | version: v1 47 | - controller: true 48 | group: core 49 | kind: Endpoint 50 | path: k8s.io/api/core/v1 51 | version: v1 52 | - controller: true 53 | group: core 54 | kind: Service 55 | path: k8s.io/api/core/v1 56 | version: v1 57 | version: "3" 58 | -------------------------------------------------------------------------------- /config/certmanager/certificate.yaml: -------------------------------------------------------------------------------- 1 | # The following manifests contain a self-signed issuer CR and a certificate CR. 2 | # More document can be found at https://docs.cert-manager.io 3 | # WARNING: Targets CertManager v1.0. Check https://cert-manager.io/docs/installation/upgrading/ for breaking changes. 4 | apiVersion: cert-manager.io/v1 5 | kind: Issuer 6 | metadata: 7 | labels: 8 | app.kubernetes.io/name: certificate 9 | app.kubernetes.io/instance: serving-cert 10 | app.kubernetes.io/component: certificate 11 | app.kubernetes.io/created-by: nacos-controller 12 | app.kubernetes.io/part-of: nacos-controller 13 | app.kubernetes.io/managed-by: kustomize 14 | name: selfsigned-issuer 15 | namespace: system 16 | spec: 17 | selfSigned: {} 18 | --- 19 | apiVersion: cert-manager.io/v1 20 | kind: Certificate 21 | metadata: 22 | labels: 23 | app.kubernetes.io/name: certificate 24 | app.kubernetes.io/instance: serving-cert 25 | app.kubernetes.io/component: certificate 26 | app.kubernetes.io/created-by: nacos-controller 27 | app.kubernetes.io/part-of: nacos-controller 28 | app.kubernetes.io/managed-by: kustomize 29 | name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml 30 | namespace: system 31 | spec: 32 | # SERVICE_NAME and SERVICE_NAMESPACE will be substituted by kustomize 33 | dnsNames: 34 | - SERVICE_NAME.SERVICE_NAMESPACE.svc 35 | - SERVICE_NAME.SERVICE_NAMESPACE.svc.cluster.local 36 | issuerRef: 37 | kind: Issuer 38 | name: selfsigned-issuer 39 | secretName: webhook-server-cert # this secret will not be prefixed, since it's not managed by kustomize 40 | -------------------------------------------------------------------------------- /charts/nacos-controller/values.yaml: -------------------------------------------------------------------------------- 1 | # Default values for nacos-controller. 2 | # This is a YAML-formatted file. 3 | # Declare variables to be passed into your templates. 4 | 5 | replicaCount: 1 6 | 7 | image: 8 | repository: nacos/nacos-controller 9 | pullPolicy: IfNotPresent 10 | # Overrides the image tag whose default is the chart appVersion. 11 | tag: "main" 12 | 13 | imagePullSecrets: [] 14 | 15 | serviceAccount: 16 | # Specifies whether a service account should be created 17 | create: true 18 | # Annotations to add to the service account 19 | annotations: {} 20 | # The name of the service account to use. 21 | # If not set and create is true, a name is generated using the fullname template 22 | name: "" 23 | 24 | podAnnotations: {} 25 | 26 | podSecurityContext: {} 27 | # fsGroup: 2000 28 | 29 | securityContext: {} 30 | # capabilities: 31 | # drop: 32 | # - ALL 33 | # readOnlyRootFilesystem: true 34 | # runAsNonRoot: true 35 | # runAsUser: 1000 36 | 37 | service: 38 | type: ClusterIP 39 | port: 443 40 | 41 | 42 | resources: {} 43 | # We usually recommend not to specify default resources and to leave this as a conscious 44 | # choice for the user. This also increases chances charts run on environments with little 45 | # resources, such as Minikube. If you do want to specify resources, uncomment the following 46 | # lines, adjust them as necessary, and remove the curly braces after 'resources:'. 47 | # limits: 48 | # cpu: 100m 49 | # memory: 128Mi 50 | # requests: 51 | # cpu: 100m 52 | # memory: 128Mi 53 | 54 | 55 | nodeSelector: {} 56 | 57 | tolerations: [] 58 | 59 | affinity: {} 60 | -------------------------------------------------------------------------------- /config/rbac/role.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: manager-role 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - endpoints 11 | - secrets 12 | - services 13 | verbs: 14 | - create 15 | - delete 16 | - get 17 | - list 18 | - patch 19 | - update 20 | - watch 21 | - apiGroups: 22 | - "" 23 | resources: 24 | - endpoints/finalizers 25 | - secrets/finalizers 26 | - services/finalizers 27 | verbs: 28 | - update 29 | - apiGroups: 30 | - "" 31 | resources: 32 | - endpoints/status 33 | - secrets/status 34 | - services/status 35 | verbs: 36 | - get 37 | - patch 38 | - update 39 | - apiGroups: 40 | - nacos.io 41 | resources: 42 | - configmaps 43 | - dynamicconfigurations 44 | verbs: 45 | - create 46 | - delete 47 | - get 48 | - list 49 | - patch 50 | - update 51 | - watch 52 | - apiGroups: 53 | - nacos.io 54 | resources: 55 | - configmaps/finalizers 56 | - dynamicconfigurations/finalizers 57 | verbs: 58 | - update 59 | - apiGroups: 60 | - nacos.io 61 | resources: 62 | - configmaps/status 63 | - dynamicconfigurations/status 64 | verbs: 65 | - get 66 | - patch 67 | - update 68 | - apiGroups: 69 | - nacos.io.nacos.io 70 | resources: 71 | - servicediscoveries 72 | verbs: 73 | - create 74 | - delete 75 | - get 76 | - list 77 | - patch 78 | - update 79 | - watch 80 | - apiGroups: 81 | - nacos.io.nacos.io 82 | resources: 83 | - servicediscoveries/finalizers 84 | verbs: 85 | - update 86 | - apiGroups: 87 | - nacos.io.nacos.io 88 | resources: 89 | - servicediscoveries/status 90 | verbs: 91 | - get 92 | - patch 93 | - update 94 | -------------------------------------------------------------------------------- /pkg/nacos/client/config_client.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "sync" 5 | 6 | "github.com/nacos-group/nacos-sdk-go/v2/model" 7 | v1 "k8s.io/api/core/v1" 8 | "k8s.io/apimachinery/pkg/types" 9 | ) 10 | 11 | var _defaultClient NacosConfigClient 12 | var _lock = sync.Mutex{} 13 | 14 | func RegisterNacosClientIfAbsent(c NacosConfigClient) { 15 | _lock.Lock() 16 | defer _lock.Unlock() 17 | if _defaultClient == nil { 18 | _defaultClient = c 19 | } 20 | } 21 | 22 | func RegisterNacosClient(c NacosConfigClient) { 23 | _lock.Lock() 24 | defer _lock.Unlock() 25 | _defaultClient = c 26 | } 27 | 28 | func GetDefaultNacosClient() NacosConfigClient { 29 | if _defaultClient == nil { 30 | panic("No default NacosConfigClient registered") 31 | } 32 | return _defaultClient 33 | } 34 | 35 | type NacosConfigClient interface { 36 | GetConfig(param NacosConfigParam) (string, error) 37 | PublishConfig(param NacosConfigParam) (bool, error) 38 | DeleteConfig(param NacosConfigParam) (bool, error) 39 | ListenConfig(param NacosConfigParam) error 40 | CancelListenConfig(param NacosConfigParam) error 41 | CloseClient(param NacosConfigParam) 42 | SearchConfigs(param SearchConfigParam) (*model.ConfigPage, error) 43 | } 44 | 45 | type NacosConfigParam struct { 46 | Key types.NamespacedName 47 | AuthRef *v1.ObjectReference 48 | NacosServerParam NacosServerParam 49 | DataId string 50 | Group string 51 | Content string 52 | OnChange func(namespace, group, dataId, data string) 53 | } 54 | 55 | type NacosServerParam struct { 56 | Endpoint string 57 | ServerAddr string 58 | Namespace string 59 | } 60 | 61 | type SearchConfigParam struct { 62 | Key types.NamespacedName 63 | AuthRef *v1.ObjectReference 64 | NacosServerParam NacosServerParam 65 | DataId string 66 | Group string 67 | PageNo int 68 | PageSize int 69 | } 70 | -------------------------------------------------------------------------------- /charts/nacos-controller/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* 2 | Expand the name of the chart. 3 | */}} 4 | {{- define "nacos-controller.name" -}} 5 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} 6 | {{- end }} 7 | 8 | {{/* 9 | Create a default fully qualified app name. 10 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 11 | If release name contains chart name it will be used as a full name. 12 | */}} 13 | {{- define "nacos-controller.fullname" -}} 14 | {{- if .Values.fullnameOverride }} 15 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} 16 | {{- else }} 17 | {{- $name := default .Chart.Name .Values.nameOverride }} 18 | {{- if contains $name .Release.Name }} 19 | {{- .Release.Name | trunc 63 | trimSuffix "-" }} 20 | {{- else }} 21 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} 22 | {{- end }} 23 | {{- end }} 24 | {{- end }} 25 | 26 | {{/* 27 | Create chart name and version as used by the chart label. 28 | */}} 29 | {{- define "nacos-controller.chart" -}} 30 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} 31 | {{- end }} 32 | 33 | {{/* 34 | Common labels 35 | */}} 36 | {{- define "nacos-controller.labels" -}} 37 | helm.sh/chart: {{ include "nacos-controller.chart" . }} 38 | {{ include "nacos-controller.selectorLabels" . }} 39 | {{- if .Chart.AppVersion }} 40 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} 41 | {{- end }} 42 | app.kubernetes.io/managed-by: {{ .Release.Service }} 43 | {{- end }} 44 | 45 | {{/* 46 | Selector labels 47 | */}} 48 | {{- define "nacos-controller.selectorLabels" -}} 49 | app.kubernetes.io/name: {{ include "nacos-controller.name" . }} 50 | app.kubernetes.io/instance: {{ .Release.Name }} 51 | {{- end }} 52 | 53 | {{/* 54 | Create the name of the service account to use 55 | */}} 56 | {{- define "nacos-controller.serviceAccountName" -}} 57 | {{- if .Values.serviceAccount.create }} 58 | {{- default (include "nacos-controller.fullname" .) .Values.serviceAccount.name }} 59 | {{- else }} 60 | {{- default "default" .Values.serviceAccount.name }} 61 | {{- end }} 62 | {{- end }} 63 | -------------------------------------------------------------------------------- /pkg/controller/service_controller.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2023. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package controller 18 | 19 | import ( 20 | "context" 21 | 22 | corev1 "k8s.io/api/core/v1" 23 | "k8s.io/apimachinery/pkg/runtime" 24 | ctrl "sigs.k8s.io/controller-runtime" 25 | "sigs.k8s.io/controller-runtime/pkg/client" 26 | "sigs.k8s.io/controller-runtime/pkg/log" 27 | ) 28 | 29 | // ServiceReconciler reconciles a Service object 30 | type ServiceReconciler struct { 31 | client.Client 32 | Scheme *runtime.Scheme 33 | } 34 | 35 | //+kubebuilder:rbac:groups=core,resources=services,verbs=get;list;watch;create;update;patch;delete 36 | //+kubebuilder:rbac:groups=core,resources=services/status,verbs=get;update;patch 37 | //+kubebuilder:rbac:groups=core,resources=services/finalizers,verbs=update 38 | 39 | // Reconcile is part of the main kubernetes reconciliation loop which aims to 40 | // move the current state of the cluster closer to the desired state. 41 | // TODO(user): Modify the Reconcile function to compare the state specified by 42 | // the Service object against the actual cluster state, and then 43 | // perform operations to make the cluster state reflect the state specified by 44 | // the user. 45 | // 46 | // For more details, check Reconcile and its Result here: 47 | // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.14.4/pkg/reconcile 48 | func (r *ServiceReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { 49 | _ = log.FromContext(ctx) 50 | 51 | // TODO(user): your logic here 52 | 53 | return ctrl.Result{}, nil 54 | } 55 | 56 | // SetupWithManager sets up the controller with the Manager. 57 | func (r *ServiceReconciler) SetupWithManager(mgr ctrl.Manager) error { 58 | return ctrl.NewControllerManagedBy(mgr). 59 | For(&corev1.Service{}). 60 | Named("ServiceController"). 61 | Complete(r) 62 | } 63 | -------------------------------------------------------------------------------- /charts/nacos-controller/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: {{ include "nacos-controller.fullname" . }} 5 | namespace: {{ .Release.Namespace }} 6 | labels: 7 | {{- include "nacos-controller.labels" . | nindent 4 }} 8 | spec: 9 | replicas: {{ .Values.replicaCount }} 10 | selector: 11 | matchLabels: 12 | {{- include "nacos-controller.selectorLabels" . | nindent 6 }} 13 | template: 14 | metadata: 15 | {{- with .Values.podAnnotations }} 16 | annotations: 17 | {{- toYaml . | nindent 8 }} 18 | {{- end }} 19 | labels: 20 | {{- include "nacos-controller.selectorLabels" . | nindent 8 }} 21 | spec: 22 | {{- with .Values.imagePullSecrets }} 23 | imagePullSecrets: 24 | {{- toYaml . | nindent 8 }} 25 | {{- end }} 26 | serviceAccountName: {{ include "nacos-controller.serviceAccountName" . }} 27 | securityContext: 28 | {{- toYaml .Values.podSecurityContext | nindent 8 }} 29 | volumes: 30 | - name: certs 31 | secret: 32 | defaultMode: 420 33 | secretName: {{ include "nacos-controller.fullname" . }} 34 | containers: 35 | - name: {{ .Chart.Name }} 36 | securityContext: 37 | {{- toYaml .Values.securityContext | nindent 12 }} 38 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" 39 | imagePullPolicy: {{ .Values.image.pullPolicy }} 40 | command: 41 | - /manager 42 | args: 43 | - --leader-elect 44 | - --enable-webhook 45 | ports: 46 | - name: webhook 47 | containerPort: 9443 48 | protocol: TCP 49 | livenessProbe: 50 | httpGet: 51 | path: /healthz 52 | port: 8081 53 | initialDelaySeconds: 15 54 | periodSeconds: 20 55 | readinessProbe: 56 | httpGet: 57 | path: /readyz 58 | port: 8081 59 | initialDelaySeconds: 5 60 | periodSeconds: 10 61 | volumeMounts: 62 | - mountPath: /tmp/k8s-webhook-server/serving-certs/ 63 | name: certs 64 | readOnly: true 65 | resources: 66 | {{- toYaml .Values.resources | nindent 12 }} 67 | {{- with .Values.nodeSelector }} 68 | nodeSelector: 69 | {{- toYaml . | nindent 8 }} 70 | {{- end }} 71 | {{- with .Values.affinity }} 72 | affinity: 73 | {{- toYaml . | nindent 8 }} 74 | {{- end }} 75 | {{- with .Values.tolerations }} 76 | tolerations: 77 | {{- toYaml . | nindent 8 }} 78 | {{- end }} 79 | -------------------------------------------------------------------------------- /charts/nacos-controller/templates/admissionregistration.yaml: -------------------------------------------------------------------------------- 1 | {{- $ca := genCA "admission-controller-ca" 3650 }} 2 | {{- $cn := printf "%s-%s" .Release.Name .Chart.Name }} 3 | {{- $altName1 := printf "%s.%s" (default (include "nacos-controller.fullname" .) .Release.Name) .Release.Namespace }} 4 | {{- $altName2 := printf "%s.%s.svc" (default (include "nacos-controller.fullname" .) .Release.Name) .Release.Namespace }} 5 | {{- $cert := genSignedCert $cn nil (list $altName1 $altName2) 3650 $ca }} 6 | --- 7 | apiVersion: admissionregistration.k8s.io/v1 8 | kind: ValidatingWebhookConfiguration 9 | metadata: 10 | name: {{ include "nacos-controller.fullname" . }} 11 | webhooks: 12 | - admissionReviewVersions: 13 | - v1 14 | - v1beta1 15 | clientConfig: 16 | caBundle: {{ b64enc $ca.Cert }} 17 | service: 18 | name: {{ include "nacos-controller.fullname" . }} 19 | namespace: {{ .Release.Namespace }} 20 | path: /validate-nacos-io-v1-dynamicconfiguration 21 | port: 443 22 | failurePolicy: Fail 23 | matchPolicy: Equivalent 24 | name: dc.validating.nacos.io 25 | namespaceSelector: {} 26 | objectSelector: {} 27 | rules: 28 | - apiGroups: 29 | - nacos.io 30 | apiVersions: 31 | - v1 32 | operations: 33 | - CREATE 34 | - UPDATE 35 | resources: 36 | - dynamicconfigurations 37 | scope: '*' 38 | sideEffects: None 39 | timeoutSeconds: 5 40 | --- 41 | apiVersion: admissionregistration.k8s.io/v1 42 | kind: MutatingWebhookConfiguration 43 | metadata: 44 | annotations: 45 | name: {{ include "nacos-controller.fullname" . }} 46 | webhooks: 47 | - admissionReviewVersions: 48 | - v1 49 | - v1beta1 50 | clientConfig: 51 | caBundle: {{ b64enc $ca.Cert }} 52 | service: 53 | name: {{ include "nacos-controller.fullname" . }} 54 | namespace: {{ .Release.Namespace }} 55 | path: /mutate-nacos-io-v1-dynamicconfiguration 56 | port: 443 57 | failurePolicy: Fail 58 | matchPolicy: Equivalent 59 | name: dc.mutating.nacos.io 60 | namespaceSelector: {} 61 | objectSelector: {} 62 | reinvocationPolicy: Never 63 | rules: 64 | - apiGroups: 65 | - nacos.io 66 | apiVersions: 67 | - v1 68 | operations: 69 | - CREATE 70 | - UPDATE 71 | resources: 72 | - dynamicconfigurations 73 | scope: '*' 74 | sideEffects: None 75 | timeoutSeconds: 5 76 | --- 77 | apiVersion: v1 78 | kind: Secret 79 | metadata: 80 | name: {{ include "nacos-controller.fullname" . }} 81 | namespace: {{ .Release.Namespace }} 82 | type: Opaque 83 | data: 84 | tls.crt: {{ b64enc $cert.Cert }} 85 | tls.key: {{ b64enc $cert.Key }} -------------------------------------------------------------------------------- /api/v1/servicediscovery_types.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2023. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package v1 18 | 19 | import ( 20 | v1 "k8s.io/api/core/v1" 21 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 22 | ) 23 | 24 | // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! 25 | // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. 26 | 27 | // ServiceDiscoverySpec defines the desired state of ServiceDiscovery 28 | type ServiceDiscoverySpec struct { 29 | // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster 30 | // Important: Run "make" to regenerate code after modifying this file 31 | Services []string `json:"services,omitempty"` 32 | SyncDirect ServiceSyncDirection `json:"syncDirect,omitempty"` 33 | NacosServer NacosServerConfiguration `json:"nacosServer,omitempty"` 34 | ObjectRef *v1.ObjectReference `json:"objectRef,omitempty"` 35 | } 36 | 37 | type ServiceSyncDirection string 38 | 39 | const ( 40 | ToNacos ServiceSyncDirection = "K8s2nacos" 41 | ToK8s ServiceSyncDirection = "nacos2k8s" 42 | ) 43 | 44 | // ServiceDiscoveryStatus defines the observed state of ServiceDiscovery 45 | type ServiceDiscoveryStatus struct { 46 | // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster 47 | // Important: Run "make" to regenerate code after modifying this file 48 | Message string `json:"message,omitempty"` 49 | SyncStatuses map[string]*ServiceSyncStatus `json:"syncStatuses,omitempty"` 50 | ObjectRef *v1.ObjectReference `json:"objectRef,omitempty"` 51 | } 52 | 53 | type ServiceSyncStatus struct { 54 | ServiceName string `json:"serviceName,omitempty"` 55 | GroupName string `json:"groupName,omitempty"` 56 | LastSyncFrom string `json:"lastSyncFrom,omitempty"` 57 | LastSyncTime metav1.Time `json:"lastSyncTime,omitempty"` 58 | Ready bool `json:"ready,omitempty"` 59 | Message string `json:"message,omitempty"` 60 | } 61 | 62 | //+kubebuilder:object:root=true 63 | //+kubebuilder:subresource:status 64 | 65 | // ServiceDiscovery is the Schema for the servicediscoveries API 66 | type ServiceDiscovery struct { 67 | metav1.TypeMeta `json:",inline"` 68 | metav1.ObjectMeta `json:"metadata,omitempty"` 69 | 70 | Spec ServiceDiscoverySpec `json:"spec,omitempty"` 71 | Status ServiceDiscoveryStatus `json:"status,omitempty"` 72 | } 73 | 74 | //+kubebuilder:object:root=true 75 | 76 | // ServiceDiscoveryList contains a list of ServiceDiscovery 77 | type ServiceDiscoveryList struct { 78 | metav1.TypeMeta `json:",inline"` 79 | metav1.ListMeta `json:"metadata,omitempty"` 80 | Items []*ServiceDiscovery `json:"items"` 81 | } 82 | 83 | func init() { 84 | SchemeBuilder.Register(&ServiceDiscovery{}, &ServiceDiscoveryList{}) 85 | } 86 | -------------------------------------------------------------------------------- /pkg/nacos/auth/auth_provider.go: -------------------------------------------------------------------------------- 1 | package auth 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | client2 "github.com/nacos-group/nacos-controller/pkg/nacos/client" 8 | 9 | v1 "k8s.io/api/core/v1" 10 | "k8s.io/apimachinery/pkg/api/errors" 11 | "k8s.io/apimachinery/pkg/runtime/schema" 12 | "k8s.io/apimachinery/pkg/types" 13 | "sigs.k8s.io/controller-runtime/pkg/client" 14 | ) 15 | 16 | const ( 17 | secretAuthKeyAccessKey = "accessKey" 18 | secretAuthKeySecretKey = "secretKey" 19 | secretAuthKeyUsername = "username" 20 | secretAuthKeyPassword = "password" 21 | ) 22 | 23 | var ( 24 | secretGVK = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Secret"} 25 | ) 26 | 27 | type NacosClientParam struct { 28 | Endpoint string 29 | ServerAddr string 30 | Namespace string 31 | AuthInfo NacosClientAuthInfo 32 | } 33 | 34 | type NacosClientAuthInfo struct { 35 | AccessKey string 36 | SecretKey string 37 | Username string 38 | Password string 39 | } 40 | 41 | type NacosAuthProvider interface { 42 | GetNacosClientParams(authRef *v1.ObjectReference, nacosServerParam client2.NacosServerParam, namespace string) (*NacosClientParam, error) 43 | } 44 | 45 | type DefaultNacosAuthProvider struct { 46 | Client client.Client 47 | } 48 | 49 | func NewDefaultNacosAuthProvider(c client.Client) NacosAuthProvider { 50 | return &DefaultNacosAuthProvider{Client: c} 51 | } 52 | 53 | func (p *DefaultNacosAuthProvider) GetNacosClientParams(authRef *v1.ObjectReference, nacosServerParam client2.NacosServerParam, namespace string) (*NacosClientParam, error) { 54 | var authInfo = &NacosClientAuthInfo{} 55 | if authRef != nil { 56 | authRef = authRef.DeepCopy() 57 | authRef.Namespace = namespace 58 | var err error 59 | authInfo, err = p.getNacosAuthInfo(authRef) 60 | if err != nil { 61 | return nil, err 62 | } 63 | } 64 | if len(nacosServerParam.Endpoint) > 0 { 65 | return &NacosClientParam{ 66 | Endpoint: nacosServerParam.Endpoint, 67 | Namespace: nacosServerParam.Namespace, 68 | AuthInfo: *authInfo, 69 | }, nil 70 | } 71 | if len(nacosServerParam.ServerAddr) > 0 { 72 | return &NacosClientParam{ 73 | ServerAddr: nacosServerParam.ServerAddr, 74 | Namespace: nacosServerParam.Namespace, 75 | AuthInfo: *authInfo, 76 | }, nil 77 | } 78 | return nil, fmt.Errorf("either endpoint or serverAddr should be set") 79 | } 80 | 81 | func (p *DefaultNacosAuthProvider) getNacosAuthInfo(obj *v1.ObjectReference) (*NacosClientAuthInfo, error) { 82 | switch obj.GroupVersionKind().String() { 83 | case secretGVK.String(): 84 | return p.getNaocsAuthFromSecret(obj) 85 | default: 86 | return nil, fmt.Errorf("unsupported nacos auth reference type: %s", obj.GroupVersionKind().String()) 87 | } 88 | } 89 | 90 | func (p *DefaultNacosAuthProvider) getNaocsAuthFromSecret(obj *v1.ObjectReference) (*NacosClientAuthInfo, error) { 91 | s := v1.Secret{} 92 | info := NacosClientAuthInfo{} 93 | if err := p.Client.Get(context.TODO(), types.NamespacedName{Namespace: obj.Namespace, Name: obj.Name}, &s); err != nil { 94 | if errors.IsNotFound(err) { 95 | return &info, nil 96 | } 97 | return nil, err 98 | } 99 | if v, ok := s.Data[secretAuthKeyAccessKey]; ok && len(v) > 0 { 100 | info.AccessKey = string(v) 101 | } 102 | if v, ok := s.Data[secretAuthKeySecretKey]; ok && len(v) > 0 { 103 | info.SecretKey = string(v) 104 | } 105 | if v, ok := s.Data[secretAuthKeyUsername]; ok && len(v) > 0 { 106 | info.Username = string(v) 107 | } 108 | if v, ok := s.Data[secretAuthKeyPassword]; ok && len(v) > 0 { 109 | info.Password = string(v) 110 | } 111 | return &info, nil 112 | } 113 | -------------------------------------------------------------------------------- /pkg/controller/suite_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2023. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package controller 18 | 19 | import ( 20 | "context" 21 | "github.com/nacos-group/nacos-controller/pkg/nacos" 22 | "github.com/nacos-group/nacos-controller/pkg/nacos/auth" 23 | "github.com/nacos-group/nacos-controller/pkg/nacos/client/impl" 24 | "k8s.io/client-go/kubernetes" 25 | "k8s.io/utils/pointer" 26 | "path/filepath" 27 | ctrl "sigs.k8s.io/controller-runtime" 28 | "sigs.k8s.io/controller-runtime/pkg/client/config" 29 | "testing" 30 | 31 | . "github.com/onsi/ginkgo/v2" 32 | . "github.com/onsi/gomega" 33 | 34 | "k8s.io/client-go/kubernetes/scheme" 35 | "k8s.io/client-go/rest" 36 | "sigs.k8s.io/controller-runtime/pkg/client" 37 | "sigs.k8s.io/controller-runtime/pkg/envtest" 38 | logf "sigs.k8s.io/controller-runtime/pkg/log" 39 | "sigs.k8s.io/controller-runtime/pkg/log/zap" 40 | 41 | nacosiov1 "github.com/nacos-group/nacos-controller/api/v1" 42 | //+kubebuilder:scaffold:imports 43 | ) 44 | 45 | // These tests use Ginkgo (BDD-style Go testing framework). Refer to 46 | // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. 47 | 48 | var cfg *rest.Config 49 | var k8sClient client.Client 50 | var testEnv *envtest.Environment 51 | var ctx context.Context 52 | var cancel context.CancelFunc 53 | 54 | func TestControllers(t *testing.T) { 55 | RegisterFailHandler(Fail) 56 | 57 | RunSpecs(t, "Controller Suite") 58 | } 59 | 60 | var _ = BeforeSuite(func() { 61 | logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) 62 | 63 | By("bootstrapping test environment") 64 | testEnv = &envtest.Environment{ 65 | CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, 66 | ErrorIfCRDPathMissing: true, 67 | Config: config.GetConfigOrDie(), 68 | UseExistingCluster: pointer.Bool(true), 69 | } 70 | 71 | var err error 72 | // cfg is defined in this file globally. 73 | cfg, err = testEnv.Start() 74 | Expect(err).NotTo(HaveOccurred()) 75 | Expect(cfg).NotTo(BeNil()) 76 | 77 | err = nacosiov1.AddToScheme(scheme.Scheme) 78 | Expect(err).NotTo(HaveOccurred()) 79 | ctx, cancel = context.WithCancel(context.Background()) 80 | 81 | k8sManager, err := ctrl.NewManager(cfg, ctrl.Options{ 82 | Scheme: scheme.Scheme, 83 | }) 84 | Expect(err).ToNot(HaveOccurred()) 85 | 86 | clientSet := kubernetes.NewForConfigOrDie(ctrl.GetConfigOrDie()) 87 | 88 | err = NewDynamicConfigurationReconciler(k8sManager.GetClient(), clientSet, nacos.SyncConfigOptions{ 89 | ConfigClient: impl.NewDefaultNacosConfigClient(auth.NewDefaultNacosAuthProvider(k8sManager.GetClient())), 90 | }).SetupWithManager(k8sManager) 91 | Expect(err).ToNot(HaveOccurred()) 92 | 93 | //+kubebuilder:scaffold:scheme 94 | 95 | k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) 96 | Expect(err).NotTo(HaveOccurred()) 97 | Expect(k8sClient).NotTo(BeNil()) 98 | 99 | go func() { 100 | err = k8sManager.Start(ctx) 101 | Expect(err).ToNot(HaveOccurred()) 102 | }() 103 | }) 104 | 105 | var _ = AfterSuite(func() { 106 | By("tearing down the test environment") 107 | cancel() 108 | err := testEnv.Stop() 109 | Expect(err).NotTo(HaveOccurred()) 110 | }) 111 | -------------------------------------------------------------------------------- /.github/workflows/docker-publish.yml: -------------------------------------------------------------------------------- 1 | name: Docker 2 | 3 | # This workflow uses actions that are not certified by GitHub. 4 | # They are provided by a third-party and are governed by 5 | # separate terms of service, privacy policy, and support 6 | # documentation. 7 | 8 | on: 9 | # schedule: 10 | # - cron: '22 16 * * *' 11 | push: 12 | branches: [ "main" ] 13 | # Publish semver tags as releases. 14 | tags: [ 'v*.*.*' ] 15 | 16 | env: 17 | # Use docker.io for Docker Hub if empty 18 | REGISTRY: docker.io 19 | # github.repository as / 20 | IMAGE_NAME: nacos/nacos-controller 21 | 22 | 23 | jobs: 24 | build: 25 | 26 | runs-on: ubuntu-latest 27 | permissions: 28 | contents: read 29 | packages: write 30 | # This is used to complete the identity challenge 31 | # with sigstore/fulcio when running outside of PRs. 32 | id-token: write 33 | 34 | steps: 35 | - name: Set up Go 1.x 36 | uses: actions/setup-go@v2 37 | with: 38 | go-version: 1.19 39 | 40 | - name: Checkout repository 41 | uses: actions/checkout@v3 42 | 43 | # Install the cosign tool except on PR 44 | # https://github.com/sigstore/cosign-installer 45 | - name: Install cosign 46 | uses: sigstore/cosign-installer@v3.8.1 #v3.1.1 47 | with: 48 | cosign-release: 'v2.4.3' 49 | 50 | # Set up BuildKit Docker container builder to be able to build 51 | # multi-platform images and export cache 52 | # https://github.com/docker/setup-buildx-action 53 | - name: Set up Docker Buildx 54 | uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0 55 | 56 | # Login against a Docker registry except on PR 57 | # https://github.com/docker/login-action 58 | - name: Log into registry ${{ env.REGISTRY }} 59 | uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0 60 | with: 61 | registry: ${{ env.REGISTRY }} 62 | username: ${{ secrets.DOCKER_USERNAME }} 63 | password: ${{ secrets.DOCKER_PASSWORD }} 64 | 65 | # Extract metadata (tags, labels) for Docker 66 | # https://github.com/docker/metadata-action 67 | - name: Extract Docker metadata 68 | id: meta 69 | uses: docker/metadata-action@96383f45573cb7f253c731d3b3ab81c87ef81934 # v5.0.0 70 | with: 71 | images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} 72 | 73 | # Build and push Docker image with Buildx (don't push on PR) 74 | # https://github.com/docker/build-push-action 75 | - name: Build and push Docker image 76 | id: build-and-push 77 | uses: docker/build-push-action@0565240e2d4ab88bba5387d719585280857ece09 # v5.0.0 78 | with: 79 | context: . 80 | push: ${{ github.event_name != 'pull_request' }} 81 | tags: ${{ steps.meta.outputs.tags }} 82 | labels: ${{ steps.meta.outputs.labels }} 83 | cache-from: type=gha 84 | cache-to: type=gha,mode=max 85 | 86 | # Sign the resulting Docker image digest except on PRs. 87 | # This will only write to the public Rekor transparency log when the Docker 88 | # repository is public to avoid leaking data. If you would like to publish 89 | # transparency data even for private images, pass --force to cosign below. 90 | # https://github.com/sigstore/cosign 91 | - name: Sign the published Docker image 92 | env: 93 | # https://docs.github.com/en/actions/security-guides/security-hardening-for-github-actions#using-an-intermediate-environment-variable 94 | TAGS: ${{ steps.meta.outputs.tags }} 95 | DIGEST: ${{ steps.build-and-push.outputs.digest }} 96 | # This step uses the identity token to provision an ephemeral certificate 97 | # against the sigstore community Fulcio instance. 98 | run: echo "${TAGS}" | xargs -I {} cosign sign --yes {}@${DIGEST} 99 | -------------------------------------------------------------------------------- /config/manager/manager.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | labels: 5 | control-plane: controller-manager 6 | app.kubernetes.io/name: namespace 7 | app.kubernetes.io/instance: system 8 | app.kubernetes.io/component: manager 9 | app.kubernetes.io/created-by: nacos-controller 10 | app.kubernetes.io/part-of: nacos-controller 11 | app.kubernetes.io/managed-by: kustomize 12 | name: system 13 | --- 14 | apiVersion: apps/v1 15 | kind: Deployment 16 | metadata: 17 | name: controller-manager 18 | namespace: system 19 | labels: 20 | control-plane: controller-manager 21 | app.kubernetes.io/name: deployment 22 | app.kubernetes.io/instance: controller-manager 23 | app.kubernetes.io/component: manager 24 | app.kubernetes.io/created-by: nacos-controller 25 | app.kubernetes.io/part-of: nacos-controller 26 | app.kubernetes.io/managed-by: kustomize 27 | spec: 28 | selector: 29 | matchLabels: 30 | control-plane: controller-manager 31 | replicas: 1 32 | template: 33 | metadata: 34 | annotations: 35 | kubectl.kubernetes.io/default-container: manager 36 | labels: 37 | control-plane: controller-manager 38 | spec: 39 | # TODO(user): Uncomment the following code to configure the nodeAffinity expression 40 | # according to the platforms which are supported by your solution. 41 | # It is considered best practice to support multiple architectures. You can 42 | # build your manager image using the makefile target docker-buildx. 43 | # affinity: 44 | # nodeAffinity: 45 | # requiredDuringSchedulingIgnoredDuringExecution: 46 | # nodeSelectorTerms: 47 | # - matchExpressions: 48 | # - key: kubernetes.io/arch 49 | # operator: In 50 | # values: 51 | # - amd64 52 | # - arm64 53 | # - ppc64le 54 | # - s390x 55 | # - key: kubernetes.io/os 56 | # operator: In 57 | # values: 58 | # - linux 59 | securityContext: 60 | runAsNonRoot: true 61 | # TODO(user): For common cases that do not require escalating privileges 62 | # it is recommended to ensure that all your Pods/Containers are restrictive. 63 | # More info: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted 64 | # Please uncomment the following code if your project does NOT have to work on old Kubernetes 65 | # versions < 1.19 or on vendors versions which do NOT support this field by default (i.e. Openshift < 4.11 ). 66 | # seccompProfile: 67 | # type: RuntimeDefault 68 | containers: 69 | - command: 70 | - /manager 71 | args: 72 | - --leader-elect 73 | image: controller:latest 74 | name: manager 75 | securityContext: 76 | allowPrivilegeEscalation: false 77 | capabilities: 78 | drop: 79 | - "ALL" 80 | livenessProbe: 81 | httpGet: 82 | path: /healthz 83 | port: 8081 84 | initialDelaySeconds: 15 85 | periodSeconds: 20 86 | readinessProbe: 87 | httpGet: 88 | path: /readyz 89 | port: 8081 90 | initialDelaySeconds: 5 91 | periodSeconds: 10 92 | # TODO(user): Configure the resources accordingly based on the project requirements. 93 | # More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ 94 | resources: 95 | limits: 96 | cpu: 500m 97 | memory: 128Mi 98 | requests: 99 | cpu: 10m 100 | memory: 64Mi 101 | serviceAccountName: controller-manager 102 | terminationGracePeriodSeconds: 10 103 | -------------------------------------------------------------------------------- /pkg/nacos/naming/convert_util.go: -------------------------------------------------------------------------------- 1 | package naming 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "strconv" 7 | "strings" 8 | 9 | "github.com/nacos-group/nacos-sdk-go/v2/common/logger" 10 | v1 "k8s.io/api/core/v1" 11 | ) 12 | 13 | const ( 14 | // annotationServiceSync is the key of the annotation that determines 15 | // whether to sync the Service resource or not. 16 | annotationServiceSync = "nacos.io/service-sync" 17 | 18 | // annotationServiceName is set to override the name of the service 19 | // registered. 20 | annotationServiceName = "nacos.io/service-name" 21 | 22 | // annotationServiceGroup is set to override the group of the service 23 | // registered. 24 | annotationServiceGroup = "nacos.io/service-group" 25 | 26 | // annotationServicePort specifies the port to use as the service instance 27 | // port when registering a service. This can be a named port in the 28 | // service or an integer value. 29 | annotationServicePort = "nacos.io/service-port" 30 | 31 | // annotationServiceMeta specifies the meta of nacos service. 32 | // The format must be json. 33 | annotationServiceMeta = "nacos.io/service-meta" 34 | ) 35 | 36 | func ShouldServiceSync(svc *v1.Service) bool { 37 | raw, ok := svc.Annotations[annotationServiceSync] 38 | if !ok { 39 | return false 40 | } 41 | 42 | v, err := strconv.ParseBool(raw) 43 | if err != nil { 44 | return false 45 | } 46 | 47 | return v 48 | } 49 | 50 | func ConvertToAddresses(endpoints *v1.Endpoints, serviceInfo ServiceInfo) []Address { 51 | addresses := make([]Address, 0) 52 | bytes, _ := json.Marshal(endpoints) 53 | 54 | fmt.Println("endpoints: ", string(bytes)) 55 | for _, subset := range endpoints.Subsets { 56 | for _, address := range subset.Addresses { 57 | if serviceInfo.Port > 0 { 58 | addresses = append(addresses, Address{ 59 | IP: address.IP, 60 | Port: uint64(serviceInfo.Port), 61 | }) 62 | } else { 63 | addresses = append(addresses, Address{ 64 | IP: address.IP, 65 | Port: uint64(subset.Ports[0].Port), 66 | }) 67 | } 68 | } 69 | } 70 | bytes, _ = json.Marshal(addresses) 71 | fmt.Println("address: ", string(bytes)) 72 | return addresses 73 | } 74 | 75 | func GetEndpointPort(ep *v1.EndpointPort) { 76 | logger.Info("Get endpoint port") 77 | } 78 | 79 | func GenerateServiceInfo(svc *v1.Service) (ServiceInfo, error) { 80 | serviceName := svc.Annotations[annotationServiceName] 81 | if serviceName == "" { 82 | // fall back to get the name of service resource 83 | logger.Info("The service name annotion is empty, so we use the name of service resource.") 84 | serviceName = svc.Name 85 | } 86 | 87 | port, err := strconv.ParseUint(svc.Annotations[annotationServicePort], 0, 0) 88 | if err != nil { 89 | logger.Info("Failed to parse the service's port, caused: " + err.Error()) 90 | port = 0 91 | } 92 | 93 | meta := make(map[string]string) 94 | rawMeta := svc.Annotations[annotationServiceMeta] 95 | if rawMeta != "" { 96 | if err := json.Unmarshal([]byte(svc.Annotations[annotationServiceMeta]), &meta); err != nil { 97 | logger.Info("Failed to parse the service's meta, caused: " + err.Error() + ", raw meta: " + rawMeta) 98 | return ServiceInfo{}, err 99 | } 100 | } 101 | 102 | for k, v := range svc.Annotations { 103 | if !strings.HasPrefix(k, "nacos.io/") { 104 | meta[k] = v 105 | } 106 | } 107 | 108 | // We need to mark the service is synced by nacos controller 109 | meta[NamingSyncedMark] = "true" 110 | 111 | groupName := svc.Annotations[annotationServiceGroup] 112 | 113 | if groupName == "" { 114 | groupName = NamingDefaultGroupName 115 | } 116 | 117 | // Now we only trust the annotations. 118 | // TODO Extract value from the spec of service resource for extended features 119 | return ServiceInfo{ 120 | ServiceKey: ServiceKey{ 121 | ServiceName: serviceName, 122 | Group: groupName, 123 | }, 124 | Port: port, 125 | Metadata: meta, 126 | }, nil 127 | } 128 | -------------------------------------------------------------------------------- /api/v1/webhook_suite_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2023. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package v1 18 | 19 | import ( 20 | "context" 21 | "crypto/tls" 22 | "fmt" 23 | "net" 24 | "path/filepath" 25 | "testing" 26 | "time" 27 | 28 | . "github.com/onsi/ginkgo/v2" 29 | . "github.com/onsi/gomega" 30 | 31 | admissionv1 "k8s.io/api/admission/v1" 32 | //+kubebuilder:scaffold:imports 33 | "k8s.io/apimachinery/pkg/runtime" 34 | "k8s.io/client-go/rest" 35 | ctrl "sigs.k8s.io/controller-runtime" 36 | "sigs.k8s.io/controller-runtime/pkg/client" 37 | "sigs.k8s.io/controller-runtime/pkg/envtest" 38 | logf "sigs.k8s.io/controller-runtime/pkg/log" 39 | "sigs.k8s.io/controller-runtime/pkg/log/zap" 40 | ) 41 | 42 | // These tests use Ginkgo (BDD-style Go testing framework). Refer to 43 | // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. 44 | 45 | var cfg *rest.Config 46 | var k8sClient client.Client 47 | var testEnv *envtest.Environment 48 | var ctx context.Context 49 | var cancel context.CancelFunc 50 | 51 | func TestAPIs(t *testing.T) { 52 | RegisterFailHandler(Fail) 53 | 54 | RunSpecs(t, "Webhook Suite") 55 | } 56 | 57 | var _ = BeforeSuite(func() { 58 | logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) 59 | 60 | ctx, cancel = context.WithCancel(context.TODO()) 61 | 62 | By("bootstrapping test environment") 63 | testEnv = &envtest.Environment{ 64 | CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, 65 | ErrorIfCRDPathMissing: false, 66 | WebhookInstallOptions: envtest.WebhookInstallOptions{ 67 | Paths: []string{filepath.Join("..", "..", "config", "webhook")}, 68 | }, 69 | } 70 | 71 | var err error 72 | // cfg is defined in this file globally. 73 | cfg, err = testEnv.Start() 74 | Expect(err).NotTo(HaveOccurred()) 75 | Expect(cfg).NotTo(BeNil()) 76 | 77 | scheme := runtime.NewScheme() 78 | err = AddToScheme(scheme) 79 | Expect(err).NotTo(HaveOccurred()) 80 | 81 | err = admissionv1.AddToScheme(scheme) 82 | Expect(err).NotTo(HaveOccurred()) 83 | 84 | //+kubebuilder:scaffold:scheme 85 | 86 | k8sClient, err = client.New(cfg, client.Options{Scheme: scheme}) 87 | Expect(err).NotTo(HaveOccurred()) 88 | Expect(k8sClient).NotTo(BeNil()) 89 | 90 | // start webhook server using Manager 91 | webhookInstallOptions := &testEnv.WebhookInstallOptions 92 | mgr, err := ctrl.NewManager(cfg, ctrl.Options{ 93 | Scheme: scheme, 94 | Host: webhookInstallOptions.LocalServingHost, 95 | Port: webhookInstallOptions.LocalServingPort, 96 | CertDir: webhookInstallOptions.LocalServingCertDir, 97 | LeaderElection: false, 98 | MetricsBindAddress: "0", 99 | }) 100 | Expect(err).NotTo(HaveOccurred()) 101 | 102 | err = (&DynamicConfiguration{}).SetupWebhookWithManager(mgr) 103 | Expect(err).NotTo(HaveOccurred()) 104 | 105 | //+kubebuilder:scaffold:webhook 106 | 107 | go func() { 108 | defer GinkgoRecover() 109 | err = mgr.Start(ctx) 110 | Expect(err).NotTo(HaveOccurred()) 111 | }() 112 | 113 | // wait for the webhook server to get ready 114 | dialer := &net.Dialer{Timeout: time.Second} 115 | addrPort := fmt.Sprintf("%s:%d", webhookInstallOptions.LocalServingHost, webhookInstallOptions.LocalServingPort) 116 | Eventually(func() error { 117 | conn, err := tls.DialWithDialer(dialer, "tcp", addrPort, &tls.Config{InsecureSkipVerify: true}) 118 | if err != nil { 119 | return err 120 | } 121 | conn.Close() 122 | return nil 123 | }).Should(Succeed()) 124 | 125 | }) 126 | 127 | var _ = AfterSuite(func() { 128 | cancel() 129 | By("tearing down the test environment") 130 | err := testEnv.Stop() 131 | Expect(err).NotTo(HaveOccurred()) 132 | }) 133 | -------------------------------------------------------------------------------- /pkg/controller/secret_controller.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2023. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package controller 18 | 19 | import ( 20 | "context" 21 | "fmt" 22 | 23 | "github.com/nacos-group/nacos-controller/pkg" 24 | "github.com/nacos-group/nacos-controller/pkg/nacos" 25 | "k8s.io/apimachinery/pkg/api/errors" 26 | "k8s.io/client-go/kubernetes" 27 | "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 28 | 29 | corev1 "k8s.io/api/core/v1" 30 | "k8s.io/apimachinery/pkg/runtime" 31 | ctrl "sigs.k8s.io/controller-runtime" 32 | "sigs.k8s.io/controller-runtime/pkg/client" 33 | "sigs.k8s.io/controller-runtime/pkg/log" 34 | ) 35 | 36 | // SecretReconciler reconciles a Secret object 37 | type SecretReconciler struct { 38 | client.Client 39 | controller *nacos.ConfigurationSyncController 40 | Scheme *runtime.Scheme 41 | } 42 | 43 | func NewSecretReconciler(c client.Client, cs *kubernetes.Clientset, opt nacos.SyncConfigOptions, scheme *runtime.Scheme) *SecretReconciler { 44 | return &SecretReconciler{ 45 | Client: c, 46 | controller: nacos.NewConfigurationSyncController(c, cs, opt), 47 | Scheme: scheme, 48 | } 49 | } 50 | 51 | // +kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch;create;update;patch;delete 52 | // +kubebuilder:rbac:groups=core,resources=secrets/status,verbs=get;update;patch 53 | // +kubebuilder:rbac:groups=core,resources=secrets/finalizers,verbs=update 54 | 55 | // Reconcile is part of the main kubernetes reconciliation loop which aims to 56 | // move the current state of the cluster closer to the desired state. 57 | // TODO(user): Modify the Reconcile function to compare the state specified by 58 | // the Secret object against the actual cluster state, and then 59 | // perform operations to make the cluster state reflect the state specified by 60 | // the user. 61 | // 62 | // For more details, check Reconcile and its Result here: 63 | // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.1/pkg/reconcile 64 | func (r *SecretReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { 65 | l := log.FromContext(ctx) 66 | defer func() { 67 | if r := recover(); r != nil { 68 | err := fmt.Errorf("panic occurred") 69 | l.Error(err, "panic", "req", req, "recover", r) 70 | } 71 | }() 72 | 73 | secret := corev1.Secret{} 74 | if err := r.Get(ctx, req.NamespacedName, &secret); err != nil { 75 | if errors.IsNotFound(err) { 76 | return ctrl.Result{}, nil 77 | } 78 | l.Error(err, "get Secret error") 79 | return ctrl.Result{}, err 80 | } 81 | if secret.DeletionTimestamp != nil { 82 | return ctrl.Result{}, r.doFinalization(ctx, &secret) 83 | } 84 | if err := r.ensureFinalizer(ctx, &secret); err != nil { 85 | if errors.IsConflict(err) { 86 | return ctrl.Result{Requeue: true}, nil 87 | } 88 | return ctrl.Result{}, err 89 | } 90 | if err := r.controller.DoReconcile(ctx, &secret); err != nil { 91 | l.Error(err, "doReconcile error") 92 | return ctrl.Result{}, err 93 | } 94 | if err := r.Update(ctx, &secret); err != nil { 95 | if errors.IsConflict(err) { 96 | return ctrl.Result{Requeue: true}, nil 97 | } 98 | return ctrl.Result{}, err 99 | } 100 | return ctrl.Result{}, nil 101 | } 102 | 103 | // SetupWithManager sets up the controller with the Manager. 104 | func (r *SecretReconciler) SetupWithManager(mgr ctrl.Manager) error { 105 | return ctrl.NewControllerManagedBy(mgr). 106 | For(&corev1.Secret{}). 107 | Named("secret"). 108 | Complete(r) 109 | } 110 | 111 | func (r *SecretReconciler) doFinalization(ctx context.Context, secret *corev1.Secret) error { 112 | if !pkg.Contains(secret.GetFinalizers(), pkg.FinalizerName) { 113 | return nil 114 | } 115 | l := log.FromContext(ctx, "stage", "secret finalize") 116 | l.Info("doFinalization", "secret", secret) 117 | if err := r.controller.Finalize(ctx, secret); err != nil { 118 | l.Error(err, "secret finalize error") 119 | return err 120 | } 121 | l.Info("Remove finalizer", "secret", secret) 122 | secret.SetFinalizers(pkg.Remove(secret.GetFinalizers(), pkg.FinalizerName)) 123 | if err := r.Update(ctx, secret); err != nil { 124 | l.Error(err, "secret finalize error") 125 | return err 126 | } 127 | return nil 128 | } 129 | 130 | func (r *SecretReconciler) ensureFinalizer(ctx context.Context, object client.Object) error { 131 | if controllerutil.ContainsFinalizer(object, pkg.FinalizerName) { 132 | return nil 133 | } 134 | l := log.FromContext(ctx) 135 | controllerutil.AddFinalizer(object, pkg.FinalizerName) 136 | if err := r.Update(ctx, object); err != nil { 137 | l.Error(err, "add ConfigMap finalizer error") 138 | return err 139 | } 140 | return nil 141 | } 142 | -------------------------------------------------------------------------------- /config/default/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # Adds namespace to all resources. 2 | namespace: nacos-controller-system 3 | 4 | # Value of this field is prepended to the 5 | # names of all resources, e.g. a deployment named 6 | # "wordpress" becomes "alices-wordpress". 7 | # Note that it should also match with the prefix (text before '-') of the namespace 8 | # field above. 9 | namePrefix: nacos-controller- 10 | 11 | # Labels to add to all resources and selectors. 12 | #labels: 13 | #- includeSelectors: true 14 | # pairs: 15 | # someName: someValue 16 | 17 | resources: 18 | - ../crd 19 | - ../rbac 20 | - ../manager 21 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in 22 | # crd/kustomization.yaml 23 | #- ../webhook 24 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. 25 | #- ../certmanager 26 | # [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. 27 | #- ../prometheus 28 | 29 | patchesStrategicMerge: 30 | # Protect the /metrics endpoint by putting it behind auth. 31 | # If you want your controller-manager to expose the /metrics 32 | # endpoint w/o any authn/z, please comment the following line. 33 | - manager_auth_proxy_patch.yaml 34 | 35 | 36 | 37 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in 38 | # crd/kustomization.yaml 39 | #- manager_webhook_patch.yaml 40 | 41 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 42 | # Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. 43 | # 'CERTMANAGER' needs to be enabled to use ca injection 44 | #- webhookcainjection_patch.yaml 45 | 46 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. 47 | # Uncomment the following replacements to add the cert-manager CA injection annotations 48 | #replacements: 49 | # - source: # Add cert-manager annotation to ValidatingWebhookConfiguration, MutatingWebhookConfiguration and CRDs 50 | # kind: Certificate 51 | # group: cert-manager.io 52 | # version: v1 53 | # name: serving-cert # this name should match the one in certificate.yaml 54 | # fieldPath: .metadata.namespace # namespace of the certificate CR 55 | # targets: 56 | # - select: 57 | # kind: ValidatingWebhookConfiguration 58 | # fieldPaths: 59 | # - .metadata.annotations.[cert-manager.io/inject-ca-from] 60 | # options: 61 | # delimiter: '/' 62 | # index: 0 63 | # create: true 64 | # - select: 65 | # kind: MutatingWebhookConfiguration 66 | # fieldPaths: 67 | # - .metadata.annotations.[cert-manager.io/inject-ca-from] 68 | # options: 69 | # delimiter: '/' 70 | # index: 0 71 | # create: true 72 | # - select: 73 | # kind: CustomResourceDefinition 74 | # fieldPaths: 75 | # - .metadata.annotations.[cert-manager.io/inject-ca-from] 76 | # options: 77 | # delimiter: '/' 78 | # index: 0 79 | # create: true 80 | # - source: 81 | # kind: Certificate 82 | # group: cert-manager.io 83 | # version: v1 84 | # name: serving-cert # this name should match the one in certificate.yaml 85 | # fieldPath: .metadata.name 86 | # targets: 87 | # - select: 88 | # kind: ValidatingWebhookConfiguration 89 | # fieldPaths: 90 | # - .metadata.annotations.[cert-manager.io/inject-ca-from] 91 | # options: 92 | # delimiter: '/' 93 | # index: 1 94 | # create: true 95 | # - select: 96 | # kind: MutatingWebhookConfiguration 97 | # fieldPaths: 98 | # - .metadata.annotations.[cert-manager.io/inject-ca-from] 99 | # options: 100 | # delimiter: '/' 101 | # index: 1 102 | # create: true 103 | # - select: 104 | # kind: CustomResourceDefinition 105 | # fieldPaths: 106 | # - .metadata.annotations.[cert-manager.io/inject-ca-from] 107 | # options: 108 | # delimiter: '/' 109 | # index: 1 110 | # create: true 111 | # - source: # Add cert-manager annotation to the webhook Service 112 | # kind: Service 113 | # version: v1 114 | # name: webhook-service 115 | # fieldPath: .metadata.name # namespace of the service 116 | # targets: 117 | # - select: 118 | # kind: Certificate 119 | # group: cert-manager.io 120 | # version: v1 121 | # fieldPaths: 122 | # - .spec.dnsNames.0 123 | # - .spec.dnsNames.1 124 | # options: 125 | # delimiter: '.' 126 | # index: 0 127 | # create: true 128 | # - source: 129 | # kind: Service 130 | # version: v1 131 | # name: webhook-service 132 | # fieldPath: .metadata.namespace # namespace of the service 133 | # targets: 134 | # - select: 135 | # kind: Certificate 136 | # group: cert-manager.io 137 | # version: v1 138 | # fieldPaths: 139 | # - .spec.dnsNames.0 140 | # - .spec.dnsNames.1 141 | # options: 142 | # delimiter: '.' 143 | # index: 1 144 | # create: true 145 | -------------------------------------------------------------------------------- /api/v1/dynamicconfiguration_types.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2023. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package v1 18 | 19 | import ( 20 | v1 "k8s.io/api/core/v1" 21 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 22 | ) 23 | 24 | // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! 25 | // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. 26 | 27 | // DynamicConfigurationSpec defines the desired state of DynamicConfiguration 28 | type DynamicConfigurationSpec struct { 29 | // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster 30 | // Important: Run "make" to regenerate code after modifying this file 31 | AdditionalConf *AdditionalConfiguration `json:"additionalConf,omitempty"` 32 | Strategy SyncStrategy `json:"strategy,omitempty"` 33 | NacosServer NacosServerConfiguration `json:"nacosServer,omitempty"` 34 | ObjectRefs []*v1.ObjectReference `json:"objectRefs,omitempty"` 35 | } 36 | 37 | // DynamicConfigurationStatus defines the observed state of DynamicConfiguration 38 | type DynamicConfigurationStatus struct { 39 | // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster 40 | // Important: Run "make" to regenerate code after modifying this file 41 | Phase string `json:"phase,omitempty"` 42 | Message string `json:"message,omitempty"` 43 | ObservedGeneration int64 `json:"observedGeneration,omitempty"` 44 | SyncStatuses map[string][]SyncStatus `json:"syncStatuses,omitempty"` 45 | ListenConfigs map[string][]string `json:"listenConfigs,omitempty"` 46 | NacosServerStatus NacosServerStatus `json:"nacosServerStatus,omitempty"` 47 | SyncStrategyStatus SyncStrategy `json:"syncStrategyStatus,omitempty"` 48 | } 49 | 50 | //+kubebuilder:object:root=true 51 | //+kubebuilder:resource:shortName=dc 52 | //+kubebuilder:subresource:status 53 | //+kubebuilder:printcolumn:name="Phase",type=string,JSONPath=`.status.phase` 54 | //+kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" 55 | 56 | // DynamicConfiguration is the Schema for the dynamicconfigurations API 57 | type DynamicConfiguration struct { 58 | metav1.TypeMeta `json:",inline"` 59 | metav1.ObjectMeta `json:"metadata,omitempty"` 60 | 61 | Spec DynamicConfigurationSpec `json:"spec,omitempty"` 62 | Status DynamicConfigurationStatus `json:"status,omitempty"` 63 | } 64 | 65 | type AdditionalConfiguration struct { 66 | Labels map[string]string `json:"labels,omitempty"` 67 | Properties map[string]string `json:"properties,omitempty"` 68 | Tags map[string]string `json:"tags,omitempty"` 69 | } 70 | 71 | type SyncStrategy struct { 72 | SyncScope DynamicConfigurationSyncScope `json:"scope,omitempty"` 73 | //+kubebuilder:default=false 74 | SyncDeletion bool `json:"syncDeletion,omitempty"` 75 | ConflictPolicy DynamicConfigurationSyncConflictPolicy `json:"conflictPolicy,omitempty"` 76 | } 77 | 78 | type DynamicConfigurationSyncConflictPolicy string 79 | 80 | const ( 81 | PreferCluster DynamicConfigurationSyncConflictPolicy = "preferCluster" 82 | PreferServer DynamicConfigurationSyncConflictPolicy = "preferServer" 83 | ) 84 | 85 | type DynamicConfigurationSyncScope string 86 | 87 | const ( 88 | SyncScopePartial DynamicConfigurationSyncScope = "partial" 89 | SyncScopeFull DynamicConfigurationSyncScope = "full" 90 | ) 91 | 92 | type NacosServerConfiguration struct { 93 | Endpoint string `json:"endpoint,omitempty"` 94 | ServerAddr string `json:"serverAddr,omitempty"` 95 | Namespace string `json:"namespace,omitempty"` 96 | AuthRef *v1.ObjectReference `json:"authRef,omitempty"` 97 | } 98 | 99 | type NacosServerStatus struct { 100 | Endpoint string `json:"endpoint,omitempty"` 101 | ServerAddr string `json:"serverAddr,omitempty"` 102 | Namespace string `json:"namespace,omitempty"` 103 | } 104 | 105 | type SyncStatus struct { 106 | DataId string `json:"dataId,omitempty"` 107 | LastSyncTime metav1.Time `json:"lastSyncTime,omitempty"` 108 | LastSyncFrom string `json:"lastSyncFrom,omitempty"` 109 | Md5 string `json:"md5,omitempty"` 110 | Ready bool `json:"ready,omitempty"` 111 | Message string `json:"message,omitempty"` 112 | } 113 | 114 | //+kubebuilder:object:root=true 115 | 116 | // DynamicConfigurationList contains a list of DynamicConfiguration 117 | type DynamicConfigurationList struct { 118 | metav1.TypeMeta `json:",inline"` 119 | metav1.ListMeta `json:"metadata,omitempty"` 120 | Items []DynamicConfiguration `json:"items"` 121 | } 122 | 123 | func init() { 124 | SchemeBuilder.Register(&DynamicConfiguration{}, &DynamicConfigurationList{}) 125 | } 126 | -------------------------------------------------------------------------------- /pkg/nacos/server2cluster.go: -------------------------------------------------------------------------------- 1 | package nacos 2 | 3 | import ( 4 | "context" 5 | "sync" 6 | "time" 7 | 8 | nacosiov1 "github.com/nacos-group/nacos-controller/api/v1" 9 | v1 "k8s.io/api/core/v1" 10 | "k8s.io/apimachinery/pkg/api/errors" 11 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 12 | "k8s.io/apimachinery/pkg/types" 13 | "k8s.io/apimachinery/pkg/util/wait" 14 | "k8s.io/client-go/kubernetes" 15 | "k8s.io/client-go/util/retry" 16 | "sigs.k8s.io/controller-runtime/pkg/client" 17 | "sigs.k8s.io/controller-runtime/pkg/log" 18 | ) 19 | 20 | type Server2ClusterCallback interface { 21 | Callback(namespace, group, dataId, content string) 22 | CallbackWithContext(ctx context.Context, namespace, group, dataId, content string) 23 | } 24 | 25 | func NewDefaultServer2ClusterCallback(c client.Client, cs *kubernetes.Clientset, locks *LockManager, dcKey types.NamespacedName, objectRef v1.ObjectReference) Server2ClusterCallback { 26 | return &DefaultServer2ClusterCallback{ 27 | Client: c, 28 | cs: cs, 29 | locks: locks, 30 | dcKey: dcKey, 31 | objectRef: objectRef, 32 | } 33 | } 34 | 35 | type DefaultServer2ClusterCallback struct { 36 | Client client.Client 37 | cs *kubernetes.Clientset 38 | locks *LockManager 39 | dcKey types.NamespacedName 40 | objectRef v1.ObjectReference 41 | } 42 | 43 | func (cb *DefaultServer2ClusterCallback) Callback(namespace, group, dataId, content string) { 44 | cb.CallbackWithContext(context.Background(), namespace, group, dataId, content) 45 | } 46 | 47 | func (cb *DefaultServer2ClusterCallback) CallbackWithContext(ctx context.Context, namespace, group, dataId, content string) { 48 | l := log.FromContext(ctx, "dynamicConfiguration", cb.dcKey.String(), "namespace", namespace, "group", group, "dataId", dataId, "type", "listenCallBack") 49 | l.Info("server2cluster callback for " + cb.dcKey.String()) 50 | if err := retry.RetryOnConflict(wait.Backoff{ 51 | Duration: 1 * time.Second, 52 | Factor: 2, 53 | Steps: 3, 54 | }, func() error { 55 | lockName := cb.dcKey.String() 56 | lock := cb.locks.GetLock(lockName) 57 | lock.Lock() 58 | defer lock.Unlock() 59 | return cb.syncConfigToLocal(ctx, namespace, group, dataId, content) 60 | }); err != nil { 61 | l.Error(err, "update config failed", "dc", cb.dcKey) 62 | } 63 | l.Info("server2cluster callback processed," + cb.dcKey.String()) 64 | return 65 | } 66 | 67 | func (cb *DefaultServer2ClusterCallback) syncConfigToLocal(ctx context.Context, namespace, group, dataId, content string) error { 68 | l := log.FromContext(ctx) 69 | l = l.WithValues("dynamicConfiguration", cb.dcKey.String(), "namespace", namespace, "group", group, "dataId", dataId, "type", "listenCallBack") 70 | dc := nacosiov1.DynamicConfiguration{} 71 | if err := cb.Client.Get(ctx, cb.dcKey, &dc); err != nil { 72 | if errors.IsNotFound(err) { 73 | l.Info("DynamicConfiguration not found") 74 | return nil 75 | } 76 | l.Error(err, "get DynamicConfiguration error") 77 | return err 78 | } 79 | objWrapper, err := NewObjectReferenceWrapper(cb.cs, &dc, &cb.objectRef) 80 | if err != nil { 81 | l.Error(err, "create object reference wrapper error", "obj", cb.objectRef) 82 | return err 83 | } 84 | oldContent, _, err := objWrapper.GetContent(dataId) 85 | if err != nil { 86 | l.Error(err, "read content error") 87 | return err 88 | } 89 | if len(content) == 0 && dc.Spec.Strategy.SyncDeletion == false { 90 | l.Info("ignored due to syncDeletion is false", "dc", dc.Name) 91 | UpdateSyncStatusIfAbsent(&dc, group, dataId, "", "server", metav1.Now(), true, "skipped due to syncDeletion is false") 92 | return cb.Client.Status().Update(ctx, &dc) 93 | } 94 | newMd5 := CalcMd5(content) 95 | if newMd5 == CalcMd5(oldContent) { 96 | l.Info("ignored due to same content", "md5", newMd5) 97 | UpdateSyncStatusIfAbsent(&dc, group, dataId, newMd5, "server", metav1.Now(), true, "skipped due to same md5") 98 | return nil 99 | } 100 | if err := objWrapper.StoreContent(dataId, content); err != nil { 101 | l.Error(err, "update content error", "obj", cb.objectRef) 102 | return err 103 | } 104 | l.Info("update content success", "newContent", content, "oldContent", oldContent) 105 | UpdateSyncStatus(&dc, group, dataId, newMd5, "server", metav1.Now(), true, "") 106 | if err := cb.Client.Status().Update(ctx, &dc); err != nil { 107 | l.Error(err, "update status error") 108 | return err 109 | } 110 | if err := objWrapper.Flush(); err != nil { 111 | l.Error(err, "flush object reference error") 112 | return err 113 | } 114 | return nil 115 | } 116 | 117 | type LockManager struct { 118 | locks map[string]*sync.Mutex 119 | lock sync.RWMutex 120 | } 121 | 122 | func NewLockManager() *LockManager { 123 | return &LockManager{ 124 | locks: map[string]*sync.Mutex{}, 125 | lock: sync.RWMutex{}, 126 | } 127 | } 128 | 129 | func (lm *LockManager) GetLock(key string) *sync.Mutex { 130 | if !lm.HasLock(key) { 131 | lm.lock.Lock() 132 | defer lm.lock.Unlock() 133 | lm.locks[key] = &sync.Mutex{} 134 | return lm.locks[key] 135 | } 136 | lm.lock.RLock() 137 | defer lm.lock.RUnlock() 138 | return lm.locks[key] 139 | } 140 | 141 | func (lm *LockManager) HasLock(key string) bool { 142 | lm.lock.RLock() 143 | defer lm.lock.RUnlock() 144 | 145 | _, ok := lm.locks[key] 146 | return ok 147 | } 148 | 149 | func (lm *LockManager) DelLock(key string) { 150 | if !lm.HasLock(key) { 151 | return 152 | } 153 | lm.lock.Lock() 154 | defer lm.lock.Unlock() 155 | delete(lm.locks, key) 156 | } 157 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/nacos-group/nacos-controller 2 | 3 | go 1.19 4 | 5 | 6 | require ( 7 | github.com/onsi/ginkgo/v2 v2.9.5 8 | github.com/onsi/gomega v1.27.7 9 | k8s.io/api v0.27.2 10 | k8s.io/apimachinery v0.27.2 11 | k8s.io/client-go v0.27.2 12 | sigs.k8s.io/controller-runtime v0.15.0 13 | ) 14 | 15 | require ( 16 | github.com/alibabacloud-go/alibabacloud-gateway-pop v0.0.6 // indirect 17 | github.com/alibabacloud-go/alibabacloud-gateway-spi v0.0.5 // indirect 18 | github.com/alibabacloud-go/darabonba-array v0.1.0 // indirect 19 | github.com/alibabacloud-go/darabonba-encode-util v0.0.2 // indirect 20 | github.com/alibabacloud-go/darabonba-map v0.0.2 // indirect 21 | github.com/alibabacloud-go/darabonba-openapi/v2 v2.0.10 // indirect 22 | github.com/alibabacloud-go/darabonba-signature-util v0.0.7 // indirect 23 | github.com/alibabacloud-go/darabonba-string v1.0.2 // indirect 24 | github.com/alibabacloud-go/debug v1.0.1 // indirect 25 | github.com/alibabacloud-go/endpoint-util v1.1.0 // indirect 26 | github.com/alibabacloud-go/kms-20160120/v3 v3.2.3 // indirect 27 | github.com/alibabacloud-go/openapi-util v0.1.0 // indirect 28 | github.com/alibabacloud-go/tea v1.2.2 // indirect 29 | github.com/alibabacloud-go/tea-utils v1.4.4 // indirect 30 | github.com/alibabacloud-go/tea-utils/v2 v2.0.7 // indirect 31 | github.com/alibabacloud-go/tea-xml v1.1.3 // indirect 32 | github.com/aliyun/alibaba-cloud-sdk-go v1.61.1800 // indirect 33 | github.com/aliyun/alibabacloud-dkms-gcs-go-sdk v0.5.1 // indirect 34 | github.com/aliyun/alibabacloud-dkms-transfer-go-sdk v0.1.8 // indirect 35 | github.com/aliyun/aliyun-secretsmanager-client-go v1.1.5 // indirect 36 | github.com/aliyun/credentials-go v1.4.3 // indirect 37 | github.com/buger/jsonparser v1.1.1 // indirect 38 | github.com/clbanning/mxj/v2 v2.5.5 // indirect 39 | github.com/deckarep/golang-set v1.7.1 // indirect 40 | github.com/golang/mock v1.6.0 // indirect 41 | github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af // indirect 42 | github.com/orcaman/concurrent-map v0.0.0-20210501183033-44dafcb38ecc // indirect 43 | github.com/tjfoc/gmsm v1.4.1 // indirect 44 | golang.org/x/crypto v0.31.0 // indirect 45 | golang.org/x/sync v0.10.0 // indirect 46 | google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect 47 | google.golang.org/grpc v1.56.3 // indirect 48 | gopkg.in/ini.v1 v1.67.0 // indirect 49 | gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect 50 | ) 51 | 52 | require ( 53 | github.com/beorn7/perks v1.0.1 // indirect 54 | github.com/cespare/xxhash/v2 v2.2.0 // indirect 55 | github.com/davecgh/go-spew v1.1.1 // indirect 56 | github.com/emicklei/go-restful/v3 v3.9.0 // indirect 57 | github.com/evanphx/json-patch/v5 v5.6.0 // indirect 58 | github.com/fsnotify/fsnotify v1.6.0 // indirect 59 | github.com/go-logr/logr v1.2.4 60 | github.com/go-logr/zapr v1.2.4 // indirect 61 | github.com/go-openapi/jsonpointer v0.19.6 // indirect 62 | github.com/go-openapi/jsonreference v0.20.1 // indirect 63 | github.com/go-openapi/swag v0.22.3 // indirect 64 | github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect 65 | github.com/gogo/protobuf v1.3.2 // indirect 66 | github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect 67 | github.com/golang/protobuf v1.5.3 // indirect 68 | github.com/google/gnostic v0.5.7-v3refs // indirect 69 | github.com/google/go-cmp v0.6.0 // indirect 70 | github.com/google/gofuzz v1.1.0 // indirect 71 | github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect 72 | github.com/google/uuid v1.3.0 // indirect 73 | github.com/imdario/mergo v0.3.6 // indirect 74 | github.com/josharian/intern v1.0.0 // indirect 75 | github.com/json-iterator/go v1.1.12 // indirect 76 | github.com/mailru/easyjson v0.7.7 // indirect 77 | github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect 78 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 79 | github.com/modern-go/reflect2 v1.0.2 // indirect 80 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 81 | github.com/nacos-group/nacos-sdk-go/v2 v2.2.9 82 | github.com/pkg/errors v0.9.1 // indirect 83 | github.com/prometheus/client_golang v1.15.1 // indirect 84 | github.com/prometheus/client_model v0.4.0 // indirect 85 | github.com/prometheus/common v0.42.0 // indirect 86 | github.com/prometheus/procfs v0.9.0 // indirect 87 | github.com/spf13/pflag v1.0.5 // indirect 88 | go.uber.org/atomic v1.7.0 // indirect 89 | go.uber.org/multierr v1.6.0 // indirect 90 | go.uber.org/zap v1.24.0 // indirect 91 | golang.org/x/net v0.33.0 // indirect 92 | golang.org/x/oauth2 v0.7.0 // indirect 93 | golang.org/x/sys v0.28.0 // indirect 94 | golang.org/x/term v0.27.0 // indirect 95 | golang.org/x/text v0.21.0 // indirect 96 | golang.org/x/time v0.3.0 // indirect 97 | golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect 98 | gomodules.xyz/jsonpatch/v2 v2.3.0 // indirect 99 | google.golang.org/appengine v1.6.7 // indirect 100 | google.golang.org/protobuf v1.33.0 // indirect 101 | gopkg.in/inf.v0 v0.9.1 // indirect 102 | gopkg.in/yaml.v2 v2.4.0 // indirect 103 | gopkg.in/yaml.v3 v3.0.1 // indirect 104 | k8s.io/apiextensions-apiserver v0.27.2 // indirect 105 | k8s.io/component-base v0.27.2 // indirect 106 | k8s.io/klog/v2 v2.90.1 // indirect 107 | k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f // indirect 108 | k8s.io/utils v0.0.0-20230209194617-a36077c30491 109 | sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect 110 | sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect 111 | sigs.k8s.io/yaml v1.3.0 // indirect 112 | ) 113 | -------------------------------------------------------------------------------- /pkg/controller/dynamicconfiguration_controller.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2023. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package controller 18 | 19 | import ( 20 | "context" 21 | "fmt" 22 | 23 | "github.com/nacos-group/nacos-controller/pkg" 24 | "github.com/nacos-group/nacos-controller/pkg/nacos" 25 | "k8s.io/apimachinery/pkg/api/errors" 26 | "k8s.io/apimachinery/pkg/types" 27 | "k8s.io/client-go/kubernetes" 28 | ctrl "sigs.k8s.io/controller-runtime" 29 | "sigs.k8s.io/controller-runtime/pkg/builder" 30 | "sigs.k8s.io/controller-runtime/pkg/client" 31 | "sigs.k8s.io/controller-runtime/pkg/log" 32 | "sigs.k8s.io/controller-runtime/pkg/predicate" 33 | 34 | nacosiov1 "github.com/nacos-group/nacos-controller/api/v1" 35 | ) 36 | 37 | // DynamicConfigurationReconciler reconciles a DynamicConfiguration object 38 | type DynamicConfigurationReconciler struct { 39 | client.Client 40 | controller *nacos.DynamicConfigurationUpdateController 41 | } 42 | 43 | func NewDynamicConfigurationReconciler(c client.Client, cs *kubernetes.Clientset, opt nacos.SyncConfigOptions) *DynamicConfigurationReconciler { 44 | return &DynamicConfigurationReconciler{ 45 | Client: c, 46 | controller: nacos.NewDynamicConfigurationUpdateController(c, cs, opt), 47 | } 48 | } 49 | 50 | //+kubebuilder:rbac:groups=nacos.io,resources=dynamicconfigurations,verbs=get;list;watch;create;update;patch;delete 51 | //+kubebuilder:rbac:groups=nacos.io,resources=dynamicconfigurations/status,verbs=get;update;patch 52 | //+kubebuilder:rbac:groups=nacos.io,resources=dynamicconfigurations/finalizers,verbs=update 53 | 54 | // Reconcile is part of the main kubernetes reconciliation loop which aims to 55 | // move the current state of the cluster closer to the desired state. 56 | // TODO(user): Modify the Reconcile function to compare the state specified by 57 | // the DynamicConfiguration object against the actual cluster state, and then 58 | // perform operations to make the cluster state reflect the state specified by 59 | // the user. 60 | // 61 | // For more details, check Reconcile and its Result here: 62 | // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.15.0/pkg/reconcile 63 | func (r *DynamicConfigurationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { 64 | l := log.FromContext(ctx) 65 | defer func() { 66 | if r := recover(); r != nil { 67 | err := fmt.Errorf("panic occurred") 68 | l.Error(err, "panic", "req", req, "recover", r) 69 | } 70 | }() 71 | dc := nacosiov1.DynamicConfiguration{} 72 | if err := r.Get(ctx, types.NamespacedName{ 73 | Namespace: req.Namespace, 74 | Name: req.Name, 75 | }, &dc); err != nil { 76 | if errors.IsNotFound(err) { 77 | return ctrl.Result{}, nil 78 | } 79 | l.Error(err, "get DynamicConfiguration error") 80 | return ctrl.Result{}, err 81 | } 82 | if dc.DeletionTimestamp != nil { 83 | return ctrl.Result{}, r.doFinalization(ctx, &dc) 84 | } 85 | if err := r.ensureFinalizer(ctx, &dc); err != nil { 86 | return ctrl.Result{}, err 87 | } 88 | err := r.controller.SyncDynamicConfiguration(ctx, &dc) 89 | if err != nil { 90 | l.Error(err, "sync error") 91 | nacos.FailedStatus(&dc, err.Error()) 92 | err_ := r.Status().Update(ctx, &dc) 93 | if err_ != nil && errors.IsConflict(err_) { 94 | return ctrl.Result{Requeue: true}, err 95 | } else { 96 | return ctrl.Result{}, err 97 | } 98 | } 99 | nacos.UpdateStatus(&dc) 100 | err = r.Status().Update(ctx, &dc) 101 | if err != nil { 102 | if errors.IsConflict(err) { 103 | return ctrl.Result{Requeue: true}, nil 104 | } 105 | return ctrl.Result{}, err 106 | } 107 | return ctrl.Result{}, nil 108 | } 109 | 110 | func (r *DynamicConfigurationReconciler) ensureFinalizer(ctx context.Context, obj client.Object) error { 111 | if pkg.Contains(obj.GetFinalizers(), pkg.FinalizerName) { 112 | return nil 113 | } 114 | l := log.FromContext(ctx) 115 | l.Info("Add finalizer") 116 | obj.SetFinalizers(append(obj.GetFinalizers(), pkg.FinalizerName)) 117 | if err := r.Update(ctx, obj); err != nil { 118 | l.Error(err, "update finalizer error") 119 | return err 120 | } 121 | return nil 122 | } 123 | 124 | func (r *DynamicConfigurationReconciler) doFinalization(ctx context.Context, dc *nacosiov1.DynamicConfiguration) error { 125 | if !pkg.Contains(dc.GetFinalizers(), pkg.FinalizerName) { 126 | return nil 127 | } 128 | l := log.FromContext(ctx) 129 | // 执行清理逻辑 130 | if err := r.controller.Finalize(ctx, dc); err != nil { 131 | nacos.FailedStatus(dc, err.Error()) 132 | if e := r.Status().Update(ctx, dc); e != nil { 133 | l.Error(e, "update status error") 134 | } 135 | return err 136 | } 137 | l.Info("Remove finalizer") 138 | dc.SetFinalizers(pkg.Remove(dc.GetFinalizers(), pkg.FinalizerName)) 139 | if err := r.Update(ctx, dc); err != nil { 140 | l.Error(err, "remove finalizer error") 141 | return err 142 | } 143 | return nil 144 | } 145 | 146 | // SetupWithManager sets up the controller with the Manager. 147 | func (r *DynamicConfigurationReconciler) SetupWithManager(mgr ctrl.Manager) error { 148 | return ctrl.NewControllerManagedBy(mgr). 149 | For(&nacosiov1.DynamicConfiguration{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})). 150 | Complete(r) 151 | } 152 | -------------------------------------------------------------------------------- /pkg/nacos/util.go: -------------------------------------------------------------------------------- 1 | package nacos 2 | 3 | import ( 4 | "crypto/md5" 5 | "encoding/base64" 6 | "encoding/hex" 7 | "fmt" 8 | "strings" 9 | 10 | nacosiov1 "github.com/nacos-group/nacos-controller/api/v1" 11 | v1 "k8s.io/api/core/v1" 12 | "k8s.io/apimachinery/pkg/runtime/schema" 13 | "sigs.k8s.io/controller-runtime/pkg/client" 14 | ) 15 | 16 | var ( 17 | ConfigMapGVK = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ConfigMap"} 18 | SecretGVK = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Secret"} 19 | ) 20 | 21 | func GetNacosConfigurationUniKey(namespace, group, dataId string) string { 22 | return fmt.Sprintf("%s/%s/%s", namespace, group, dataId) 23 | } 24 | 25 | func CalcMd5(s string) string { 26 | if len(s) == 0 { 27 | return "" 28 | } 29 | sum := md5.Sum([]byte(s)) 30 | return hex.EncodeToString(sum[:]) 31 | } 32 | 33 | func StringSliceContains(arr []string, item string) bool { 34 | for _, v := range arr { 35 | if v == item { 36 | return true 37 | } 38 | } 39 | return false 40 | } 41 | 42 | func DynamicConfigurationMatch(object client.Object, dc *nacosiov1.DynamicConfiguration) bool { 43 | if object.GetNamespace() != dc.Namespace { 44 | return false 45 | } 46 | if dc.Spec.Strategy.SyncScope == nacosiov1.SyncScopeFull { 47 | return true 48 | } 49 | _, kind := object.GetObjectKind().GroupVersionKind().ToAPIVersionAndKind() 50 | for _, ref := range dc.Spec.ObjectRefs { 51 | if ref.Kind == kind && ref.Name == object.GetName() { 52 | return true 53 | } 54 | } 55 | return true 56 | } 57 | 58 | func GetAllKeys(object client.Object) []string { 59 | switch object.GetObjectKind().GroupVersionKind().Kind { 60 | case "ConfigMap": 61 | return GetConfigMapAllKeys(object.(*v1.ConfigMap)) 62 | case "Secret": 63 | return GetSecretAllKeys(object.(*v1.Secret)) 64 | default: 65 | return []string{} 66 | } 67 | } 68 | 69 | func GetConfigMapAllKeys(configMap *v1.ConfigMap) []string { 70 | data := configMap.Data 71 | binaryData := configMap.BinaryData 72 | var keys []string 73 | for key := range data { 74 | keys = append(keys, key) 75 | } 76 | for key := range binaryData { 77 | keys = append(keys, key) 78 | } 79 | return keys 80 | } 81 | 82 | func GetSecretAllKeys(secret *v1.Secret) []string { 83 | var keys []string 84 | for key := range secret.Data { 85 | keys = append(keys, key) 86 | } 87 | return keys 88 | } 89 | 90 | func GetContent(object client.Object, dataId string) (string, bool) { 91 | switch object.GetObjectKind().GroupVersionKind().Kind { 92 | case "ConfigMap": 93 | return GetConfigMapContent(object.(*v1.ConfigMap), dataId) 94 | case "Secret": 95 | return GetSecretContent(object.(*v1.Secret), dataId) 96 | default: 97 | return "", false 98 | } 99 | } 100 | 101 | func GetConfigMapContent(configMap *v1.ConfigMap, dataId string) (string, bool) { 102 | data := configMap.Data 103 | binaryData := configMap.BinaryData 104 | if v, ok := data[dataId]; ok { 105 | return v, true 106 | } 107 | if v, ok := binaryData[dataId]; ok { 108 | return string(v), true 109 | } 110 | return "", false 111 | } 112 | 113 | func GetSecretContent(secret *v1.Secret, dataId string) (string, bool) { 114 | data := secret.Data 115 | if v, ok := data[dataId]; ok { 116 | return base64.StdEncoding.EncodeToString(v), true 117 | //return string(v), true 118 | } 119 | return "", false 120 | } 121 | 122 | func StoreContent(object client.Object, dataId string, content string) error { 123 | switch object.GetObjectKind().GroupVersionKind().Kind { 124 | case "ConfigMap": 125 | return StoreConfigMapContent(object.(*v1.ConfigMap), dataId, content) 126 | case "Secret": 127 | return StoreSecretContent(object.(*v1.Secret), dataId, content) 128 | default: 129 | return nil 130 | } 131 | } 132 | 133 | func StoreConfigMapContent(configMap *v1.ConfigMap, dataId string, content string) error { 134 | if strings.HasPrefix(content, "{") || strings.HasPrefix(content, "[") { 135 | if configMap.BinaryData == nil { 136 | configMap.BinaryData = map[string][]byte{} 137 | } 138 | configMap.BinaryData[dataId] = []byte(content) 139 | } else { 140 | if configMap.Data == nil { 141 | configMap.Data = map[string]string{} 142 | } 143 | configMap.Data[dataId] = content 144 | } 145 | return nil 146 | } 147 | 148 | func StoreSecretContent(secret *v1.Secret, dataId string, content string) error { 149 | if secret.Data == nil { 150 | secret.Data = map[string][]byte{} 151 | } 152 | base64content, err := base64.StdEncoding.DecodeString(content) 153 | if err != nil { 154 | return err 155 | } 156 | secret.Data[dataId] = []byte(base64content) 157 | return nil 158 | } 159 | 160 | func CompareDataIds(listenDataIds []string, localDataIds []string) ([]string, []string, []string) { 161 | listenOnly := make(map[string]bool) 162 | localOnly := make(map[string]bool) 163 | allIds := make(map[string]bool) 164 | 165 | // Mark all IDs from listenDataIds 166 | for _, id := range listenDataIds { 167 | listenOnly[id] = true 168 | 169 | } 170 | 171 | // Mark all IDs from localDataIds and find common IDs 172 | for _, id := range localDataIds { 173 | if listenOnly[id] { 174 | delete(listenOnly, id) 175 | allIds[id] = true 176 | } else { 177 | localOnly[id] = true 178 | } 179 | } 180 | 181 | // Collect IDs that are only in listenDataIds 182 | listenOnlySlice := make([]string, 0, len(listenOnly)) 183 | for id := range listenOnly { 184 | listenOnlySlice = append(listenOnlySlice, id) 185 | } 186 | 187 | // Collect IDs that are only in localDataIds 188 | localOnlySlice := make([]string, 0, len(localOnly)) 189 | for id := range localOnly { 190 | localOnlySlice = append(localOnlySlice, id) 191 | } 192 | 193 | bothSlice := make([]string, 0, len(allIds)) 194 | for id := range allIds { 195 | bothSlice = append(bothSlice, id) 196 | } 197 | 198 | return listenOnlySlice, localOnlySlice, bothSlice 199 | } 200 | -------------------------------------------------------------------------------- /pkg/controller/configmap_controller.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2023. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package controller 18 | 19 | import ( 20 | "context" 21 | "fmt" 22 | 23 | "github.com/nacos-group/nacos-controller/pkg" 24 | "github.com/nacos-group/nacos-controller/pkg/nacos" 25 | v1 "k8s.io/api/core/v1" 26 | "k8s.io/apimachinery/pkg/api/errors" 27 | "k8s.io/apimachinery/pkg/runtime" 28 | "k8s.io/apimachinery/pkg/types" 29 | "k8s.io/client-go/kubernetes" 30 | ctrl "sigs.k8s.io/controller-runtime" 31 | "sigs.k8s.io/controller-runtime/pkg/client" 32 | "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 33 | "sigs.k8s.io/controller-runtime/pkg/log" 34 | ) 35 | 36 | // ConfigMapReconciler reconciles a ConfigMap object 37 | type ConfigMapReconciler struct { 38 | client.Client 39 | controller *nacos.ConfigurationSyncController 40 | Scheme *runtime.Scheme 41 | } 42 | 43 | func NewConfigMapReconciler(c client.Client, cs *kubernetes.Clientset, opt nacos.SyncConfigOptions, scheme *runtime.Scheme) *ConfigMapReconciler { 44 | return &ConfigMapReconciler{ 45 | Client: c, 46 | controller: nacos.NewConfigurationSyncController(c, cs, opt), 47 | Scheme: scheme, 48 | } 49 | } 50 | 51 | // +kubebuilder:rbac:groups=nacos.io,resources=configmaps,verbs=get;list;watch;create;update;patch;delete 52 | // +kubebuilder:rbac:groups=nacos.io,resources=configmaps/status,verbs=get;update;patch 53 | // +kubebuilder:rbac:groups=nacos.io,resources=configmaps/finalizers,verbs=update 54 | 55 | // Reconcile is part of the main kubernetes reconciliation loop which aims to 56 | // move the current state of the cluster closer to the desired state. 57 | // TODO(user): Modify the Reconcile function to compare the state specified by 58 | // the ConfigMap object against the actual cluster state, and then 59 | // perform operations to make the cluster state reflect the state specified by 60 | // the user. 61 | // 62 | // For more details, check Reconcile and its Result here: 63 | // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.19.1/pkg/reconcile 64 | func (r *ConfigMapReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { 65 | l := log.FromContext(ctx) 66 | defer func() { 67 | if r := recover(); r != nil { 68 | err := fmt.Errorf("panic occurred") 69 | l.Error(err, "panic", "req", req, "recover", r) 70 | } 71 | }() 72 | if pkg.CurrentContext == "null" { 73 | clusterConfig := v1.ConfigMap{} 74 | if err := r.Get(nil, types.NamespacedName{Namespace: "kube-system", 75 | Name: "ack-cluster-profile"}, &clusterConfig); err != nil { 76 | if errors.IsNotFound(err) { 77 | l.Error(err, "unable to get cluster profile") 78 | } 79 | l.Error(err, "unable to get cluster profile") 80 | } 81 | pkg.CurrentContext = clusterConfig.Data["clusterid"] 82 | } 83 | configMap := v1.ConfigMap{} 84 | if err := r.Get(ctx, req.NamespacedName, &configMap); err != nil { 85 | if errors.IsNotFound(err) { 86 | return ctrl.Result{}, nil 87 | } 88 | l.Error(err, "get ConfigMap error") 89 | return ctrl.Result{}, err 90 | } 91 | if configMap.DeletionTimestamp != nil { 92 | return ctrl.Result{}, r.doFinalization(ctx, &configMap) 93 | } 94 | if err := r.ensureFinalizer(ctx, &configMap); err != nil { 95 | if errors.IsConflict(err) { 96 | return ctrl.Result{Requeue: true}, nil 97 | } 98 | return ctrl.Result{}, err 99 | } 100 | if err := r.controller.DoReconcile(ctx, &configMap); err != nil { 101 | l.Error(err, "doReconcile error") 102 | return ctrl.Result{}, err 103 | } 104 | err := r.Update(ctx, &configMap) 105 | if err != nil { 106 | if errors.IsConflict(err) { 107 | return ctrl.Result{Requeue: true}, nil 108 | } 109 | return ctrl.Result{}, err 110 | } 111 | return ctrl.Result{}, nil 112 | } 113 | 114 | // SetupWithManager sets up the controller with the Manager. 115 | func (r *ConfigMapReconciler) SetupWithManager(mgr ctrl.Manager) error { 116 | return ctrl.NewControllerManagedBy(mgr). 117 | // Uncomment the following line adding a pointer to an instance of the controlled resource as an argument 118 | For(&v1.ConfigMap{}). 119 | Named("configmap"). 120 | Complete(r) 121 | } 122 | 123 | func (r *ConfigMapReconciler) doFinalization(ctx context.Context, configMap *v1.ConfigMap) error { 124 | if !pkg.Contains(configMap.GetFinalizers(), pkg.FinalizerName) { 125 | return nil 126 | } 127 | l := log.FromContext(ctx, "stage", "configMap finalize") 128 | l.Info("doFinalization", "configMap", configMap) 129 | if err := r.controller.Finalize(ctx, configMap); err != nil { 130 | l.Error(err, "configMap finalize error") 131 | return err 132 | } 133 | l.Info("Remove finalizer", "configMap", configMap) 134 | configMap.SetFinalizers(pkg.Remove(configMap.GetFinalizers(), pkg.FinalizerName)) 135 | if err := r.Update(ctx, configMap); err != nil { 136 | l.Error(err, "remove finalizer error") 137 | return err 138 | } 139 | return nil 140 | } 141 | 142 | func (r *ConfigMapReconciler) ensureFinalizer(ctx context.Context, object client.Object) error { 143 | if controllerutil.ContainsFinalizer(object, pkg.FinalizerName) { 144 | return nil 145 | } 146 | l := log.FromContext(ctx) 147 | controllerutil.AddFinalizer(object, pkg.FinalizerName) 148 | if err := r.Update(ctx, object); err != nil { 149 | l.Error(err, "add ConfigMap finalizer error") 150 | return err 151 | } 152 | return nil 153 | } 154 | -------------------------------------------------------------------------------- /pkg/nacos/dynamicconfiguration_util.go: -------------------------------------------------------------------------------- 1 | package nacos 2 | 3 | import ( 4 | "fmt" 5 | nacosiov1 "github.com/nacos-group/nacos-controller/api/v1" 6 | "github.com/nacos-group/nacos-controller/pkg" 7 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 8 | "strings" 9 | ) 10 | 11 | func UpdateSyncStatus(dc *nacosiov1.DynamicConfiguration, group, dataId, md5, from string, t metav1.Time, ready bool, message string) { 12 | if dc == nil { 13 | return 14 | } 15 | if len(dc.Status.SyncStatuses) == 0 { 16 | dc.Status.SyncStatuses = make(map[string][]nacosiov1.SyncStatus) 17 | } 18 | syncStatuses := dc.Status.SyncStatuses 19 | groupSyncStatuses := syncStatuses[group] 20 | groupSyncStatuses = replaceSyncStatus(groupSyncStatuses, nacosiov1.SyncStatus{ 21 | DataId: dataId, 22 | LastSyncFrom: from, 23 | LastSyncTime: t, 24 | Ready: ready, 25 | Message: message, 26 | Md5: md5, 27 | }) 28 | syncStatuses[group] = groupSyncStatuses 29 | dc.Status.SyncStatuses = syncStatuses 30 | } 31 | 32 | func UpdateSyncStatusIfAbsent(dc *nacosiov1.DynamicConfiguration, group, dataId, md5, from string, t metav1.Time, ready bool, message string) { 33 | if dc == nil { 34 | return 35 | } 36 | if len(dc.Status.SyncStatuses) == 0 { 37 | dc.Status.SyncStatuses = make(map[string][]nacosiov1.SyncStatus) 38 | } 39 | syncStatuses := dc.Status.SyncStatuses 40 | if GetSyncStatusByDataId(dc, group, dataId) != nil { 41 | return 42 | } 43 | groupSyncStatuses := syncStatuses[group] 44 | if len(groupSyncStatuses) == 0 { 45 | groupSyncStatuses = []nacosiov1.SyncStatus{} 46 | } 47 | groupSyncStatuses = append(groupSyncStatuses, nacosiov1.SyncStatus{ 48 | DataId: dataId, 49 | LastSyncFrom: from, 50 | LastSyncTime: t, 51 | Ready: ready, 52 | Message: message, 53 | Md5: md5, 54 | }) 55 | syncStatuses[group] = groupSyncStatuses 56 | dc.Status.SyncStatuses = syncStatuses 57 | } 58 | 59 | func RemoveSyncStatus(dc *nacosiov1.DynamicConfiguration, group, dataId string) { 60 | if dc == nil || len(dc.Status.SyncStatuses) == 0 { 61 | return 62 | } 63 | if len(dc.Status.SyncStatuses) == 0 { 64 | dc.Status.SyncStatuses = make(map[string][]nacosiov1.SyncStatus) 65 | } 66 | groupSyncStatues := dc.Status.SyncStatuses[group] 67 | if len(groupSyncStatues) == 0 { 68 | return 69 | } 70 | var newStatus []nacosiov1.SyncStatus 71 | for _, status := range dc.Status.SyncStatuses[group] { 72 | if dataId == status.DataId { 73 | continue 74 | } 75 | newStatus = append(newStatus, status) 76 | } 77 | dc.Status.SyncStatuses[group] = newStatus 78 | } 79 | 80 | func replaceSyncStatus(statuses []nacosiov1.SyncStatus, s nacosiov1.SyncStatus) []nacosiov1.SyncStatus { 81 | if len(statuses) == 0 { 82 | return []nacosiov1.SyncStatus{s} 83 | } 84 | idx := -1 85 | for i, v := range statuses { 86 | if v.DataId == s.DataId { 87 | idx = i 88 | break 89 | } 90 | } 91 | if idx >= 0 { 92 | statuses[idx] = s 93 | } else { 94 | statuses = append(statuses, s) 95 | } 96 | return statuses 97 | } 98 | 99 | func GetSyncStatusByDataId(dc *nacosiov1.DynamicConfiguration, group, dataId string) *nacosiov1.SyncStatus { 100 | if len(dc.Status.SyncStatuses) == 0 { 101 | dc.Status.SyncStatuses = make(map[string][]nacosiov1.SyncStatus) 102 | } 103 | statuses := dc.Status.SyncStatuses 104 | if len(statuses) == 0 { 105 | return nil 106 | } 107 | groupSyncStatues := statuses[group] 108 | if len(groupSyncStatues) == 0 { 109 | return nil 110 | } 111 | for i := range groupSyncStatues { 112 | v := groupSyncStatues[i] 113 | if v.DataId == dataId { 114 | return &v 115 | } 116 | } 117 | return nil 118 | } 119 | 120 | func AddListenConfig(dc *nacosiov1.DynamicConfiguration, group, dataId string) { 121 | if dc == nil { 122 | return 123 | } 124 | if len(dc.Status.ListenConfigs) == 0 { 125 | dc.Status.ListenConfigs = make(map[string][]string) 126 | } 127 | groupListConfigs := dc.Status.ListenConfigs[group] 128 | if len(groupListConfigs) == 0 { 129 | groupListConfigs = []string{dataId} 130 | dc.Status.ListenConfigs[group] = groupListConfigs 131 | return 132 | } else { 133 | for _, v := range groupListConfigs { 134 | if v == dataId { 135 | return 136 | } 137 | } 138 | groupListConfigs = append(groupListConfigs, dataId) 139 | dc.Status.ListenConfigs[group] = groupListConfigs 140 | return 141 | } 142 | } 143 | 144 | func UpdateDynamicConfigurationStatus(dc *nacosiov1.DynamicConfiguration) { 145 | if dc == nil { 146 | return 147 | } 148 | dc.Status.SyncStrategyStatus = dc.Spec.Strategy 149 | dc.Status.NacosServerStatus.Namespace = dc.Spec.NacosServer.Namespace 150 | dc.Status.NacosServerStatus.ServerAddr = dc.Spec.NacosServer.ServerAddr 151 | dc.Status.NacosServerStatus.Endpoint = dc.Spec.NacosServer.Endpoint 152 | return 153 | } 154 | 155 | func CleanDynamicConfigurationStatus(dc *nacosiov1.DynamicConfiguration) { 156 | if dc == nil { 157 | return 158 | } 159 | dc.Status.ListenConfigs = map[string][]string{} 160 | dc.Status.SyncStatuses = map[string][]nacosiov1.SyncStatus{} 161 | dc.Status.SyncStrategyStatus = nacosiov1.SyncStrategy{} 162 | dc.Status.NacosServerStatus = nacosiov1.NacosServerStatus{} 163 | } 164 | 165 | func checkNacosServerChange(dc *nacosiov1.DynamicConfiguration) bool { 166 | if dc.Spec.NacosServer.Namespace != dc.Status.NacosServerStatus.Namespace { 167 | return true 168 | } 169 | if dc.Spec.NacosServer.ServerAddr != dc.Status.NacosServerStatus.ServerAddr { 170 | return true 171 | } 172 | if dc.Spec.NacosServer.Endpoint != dc.Status.NacosServerStatus.Endpoint { 173 | return true 174 | } 175 | return false 176 | } 177 | func FailedStatus(dc *nacosiov1.DynamicConfiguration, message string) { 178 | if dc == nil { 179 | return 180 | } 181 | dc.Status.Phase = pkg.PhaseFailed 182 | dc.Status.Message = message 183 | dc.Status.ObservedGeneration = dc.Generation 184 | } 185 | 186 | func UpdateStatus(dc *nacosiov1.DynamicConfiguration) { 187 | if dc == nil { 188 | return 189 | } 190 | dc.Status.ObservedGeneration = dc.Generation 191 | dc.Status.Phase = pkg.PhaseSucceed 192 | dc.Status.Message = "" 193 | 194 | var notReadyDataIds []string 195 | for group, groupSyncStatuses := range dc.Status.SyncStatuses { 196 | for _, dataIdSyncStatus := range groupSyncStatuses { 197 | if dataIdSyncStatus.Ready { 198 | continue 199 | } 200 | if !dataIdSyncStatus.Ready { 201 | notReadyDataIds = append(notReadyDataIds, group+"#"+dataIdSyncStatus.DataId) 202 | } 203 | } 204 | } 205 | if len(notReadyDataIds) > 0 { 206 | dc.Status.Phase = pkg.PhaseFailed 207 | dc.Status.Message = fmt.Sprintf("not ready dataIds: %s", strings.Join(notReadyDataIds, ",")) 208 | } 209 | } 210 | -------------------------------------------------------------------------------- /cmd/main.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2023. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package main 18 | 19 | import ( 20 | "encoding/json" 21 | "flag" 22 | "os" 23 | 24 | "github.com/nacos-group/nacos-controller/pkg/nacos" 25 | "github.com/nacos-group/nacos-controller/pkg/nacos/auth" 26 | "github.com/nacos-group/nacos-controller/pkg/nacos/client/impl" 27 | "k8s.io/client-go/kubernetes" 28 | 29 | // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) 30 | // to ensure that exec-entrypoint and run can make use of them. 31 | _ "k8s.io/client-go/plugin/pkg/client/auth" 32 | 33 | "k8s.io/apimachinery/pkg/runtime" 34 | utilruntime "k8s.io/apimachinery/pkg/util/runtime" 35 | clientgoscheme "k8s.io/client-go/kubernetes/scheme" 36 | ctrl "sigs.k8s.io/controller-runtime" 37 | "sigs.k8s.io/controller-runtime/pkg/healthz" 38 | "sigs.k8s.io/controller-runtime/pkg/log/zap" 39 | 40 | nacosiov1 "github.com/nacos-group/nacos-controller/api/v1" 41 | "github.com/nacos-group/nacos-controller/pkg/controller" 42 | //+kubebuilder:scaffold:imports 43 | ) 44 | 45 | var ( 46 | scheme = runtime.NewScheme() 47 | setupLog = ctrl.Log.WithName("setup") 48 | ) 49 | 50 | func init() { 51 | utilruntime.Must(clientgoscheme.AddToScheme(scheme)) 52 | 53 | utilruntime.Must(nacosiov1.AddToScheme(scheme)) 54 | //+kubebuilder:scaffold:scheme 55 | } 56 | 57 | func main() { 58 | var metricsAddr string 59 | var enableLeaderElection bool 60 | var probeAddr string 61 | var enableWebhook bool 62 | flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.") 63 | flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") 64 | flag.BoolVar(&enableLeaderElection, "leader-elect", false, 65 | "Enable leader election for controller manager. "+ 66 | "Enabling this will ensure there is only one active controller manager.") 67 | flag.BoolVar(&enableWebhook, "enable-webhook", false, "Enable webhook for validation and defaulting") 68 | opts := zap.Options{ 69 | Development: true, 70 | } 71 | opts.BindFlags(flag.CommandLine) 72 | flag.Parse() 73 | ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts))) 74 | 75 | mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ 76 | Scheme: scheme, 77 | MetricsBindAddress: metricsAddr, 78 | Port: 9443, 79 | HealthProbeBindAddress: probeAddr, 80 | LeaderElection: enableLeaderElection, 81 | LeaderElectionID: "nacos.io", 82 | // LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily 83 | // when the Manager ends. This requires the binary to immediately end when the 84 | // Manager is stopped, otherwise, this setting is unsafe. Setting this significantly 85 | // speeds up voluntary leader transitions as the new leader don't have to wait 86 | // LeaseDuration time first. 87 | // 88 | // In the default scaffold provided, the program ends immediately after 89 | // the manager stops, so would be fine to enable this option. However, 90 | // if you are doing or is intended to do any operation such as perform cleanups 91 | // after the manager stops then its usage might be unsafe. 92 | LeaderElectionReleaseOnCancel: true, 93 | }) 94 | if err != nil { 95 | setupLog.Error(err, "unable to start manager") 96 | os.Exit(1) 97 | } 98 | 99 | clientSet := kubernetes.NewForConfigOrDie(ctrl.GetConfigOrDie()) 100 | 101 | nacosConfigClient := impl.NewDefaultNacosConfigClient(auth.NewDefaultNacosAuthProvider(mgr.GetClient())) 102 | locks := nacos.NewLockManager() 103 | 104 | if err = controller.NewDynamicConfigurationReconciler(mgr.GetClient(), clientSet, nacos.SyncConfigOptions{ 105 | ConfigClient: nacosConfigClient, 106 | Locks: locks, 107 | }).SetupWithManager(mgr); err != nil { 108 | setupLog.Error(err, "unable to create controller", "controller", "DynamicConfiguration") 109 | os.Exit(1) 110 | } 111 | if enableWebhook { 112 | setupLog.Info("webhook enabled") 113 | if err = (&nacosiov1.DynamicConfiguration{}).SetupWebhookWithManager(mgr); err != nil { 114 | setupLog.Error(err, "unable to create webhook", "webhook", "DynamicConfiguration") 115 | os.Exit(1) 116 | } 117 | } 118 | 119 | if err = controller.NewConfigMapReconciler(mgr.GetClient(), clientSet, nacos.SyncConfigOptions{ 120 | ConfigClient: nacosConfigClient, 121 | Locks: locks, 122 | }, mgr.GetScheme()).SetupWithManager(mgr); err != nil { 123 | setupLog.Error(err, "unable to create controller", "controller", "ConfigMap") 124 | os.Exit(1) 125 | } 126 | if err = controller.NewSecretReconciler(mgr.GetClient(), clientSet, nacos.SyncConfigOptions{ 127 | ConfigClient: nacosConfigClient, 128 | Locks: locks, 129 | }, mgr.GetScheme()). 130 | SetupWithManager(mgr); err != nil { 131 | setupLog.Error(err, "unable to create controller", "controller", "Secret") 132 | os.Exit(1) 133 | } 134 | 135 | if err = (controller.NewServiceDiscoveryReconciler(mgr.GetClient(), clientSet)).SetupWithManager(mgr); err != nil { 136 | setupLog.Error(err, "unable to create controller", "controller", "ServiceDiscovery") 137 | os.Exit(1) 138 | } 139 | 140 | c := mgr.GetClient() 141 | bytes, _ := json.Marshal(c) 142 | setupLog.Info("client", "client", string(bytes)) 143 | if err = (&controller.EndpointReconciler{ 144 | Client: c, 145 | Scheme: mgr.GetScheme(), 146 | }).SetupWithManager(mgr); err != nil { 147 | setupLog.Error(err, "unable to create controller", "controller", "Endpoint") 148 | os.Exit(1) 149 | } 150 | if err = (&controller.ServiceReconciler{ 151 | Client: mgr.GetClient(), 152 | Scheme: mgr.GetScheme(), 153 | }).SetupWithManager(mgr); err != nil { 154 | setupLog.Error(err, "unable to create controller", "controller", "Service") 155 | os.Exit(1) 156 | } 157 | //+kubebuilder:scaffold:builder 158 | 159 | if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { 160 | setupLog.Error(err, "unable to set up health check") 161 | os.Exit(1) 162 | } 163 | if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { 164 | setupLog.Error(err, "unable to set up ready check") 165 | os.Exit(1) 166 | } 167 | 168 | setupLog.Info("starting manager") 169 | if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { 170 | setupLog.Error(err, "problem running manager") 171 | os.Exit(1) 172 | } 173 | } 174 | -------------------------------------------------------------------------------- /pkg/controller/endpoint_controller.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2023. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package controller 18 | 19 | import ( 20 | "context" 21 | "encoding/json" 22 | "errors" 23 | "fmt" 24 | 25 | nacosiov1 "github.com/nacos-group/nacos-controller/api/v1" 26 | nacosv1 "github.com/nacos-group/nacos-controller/api/v1" 27 | "github.com/nacos-group/nacos-controller/pkg" 28 | "github.com/nacos-group/nacos-controller/pkg/nacos" 29 | "github.com/nacos-group/nacos-controller/pkg/nacos/auth" 30 | "github.com/nacos-group/nacos-controller/pkg/nacos/naming" 31 | corev1 "k8s.io/api/core/v1" 32 | apierrors "k8s.io/apimachinery/pkg/api/errors" 33 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 34 | "k8s.io/apimachinery/pkg/runtime" 35 | ctrl "sigs.k8s.io/controller-runtime" 36 | "sigs.k8s.io/controller-runtime/pkg/client" 37 | "sigs.k8s.io/controller-runtime/pkg/log" 38 | ) 39 | 40 | // EndpointReconciler reconciles a Endpoint object 41 | type EndpointReconciler struct { 42 | Client client.Client 43 | Scheme *runtime.Scheme 44 | } 45 | 46 | //+kubebuilder:rbac:groups=core,resources=endpoints,verbs=get;list;watch;create;update;patch;delete 47 | //+kubebuilder:rbac:groups=core,resources=endpoints/status,verbs=get;update;patch 48 | //+kubebuilder:rbac:groups=core,resources=endpoints/finalizers,verbs=update 49 | 50 | // Reconcile is part of the main kubernetes reconciliation loop which aims to 51 | // move the current state of the cluster closer to the desired state. 52 | // TODO(user): Modify the Reconcile function to compare the state specified by 53 | // the Endpoint object against the actual cluster state, and then 54 | // perform operations to make the cluster state reflect the state specified by 55 | // the user. 56 | // 57 | // For more details, check Reconcile and its Result here: 58 | // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.14.4/pkg/reconcile 59 | func (r *EndpointReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { 60 | _ = log.FromContext(ctx) 61 | es := &corev1.Endpoints{} 62 | svcDeleted := false 63 | if err := r.Client.Get(ctx, req.NamespacedName, es); err != nil { 64 | if apierrors.IsNotFound(err) { //资源被删除,清除Nacos provider 65 | fmt.Println("Service deleted:", req.NamespacedName) 66 | svcDeleted = true 67 | } else { 68 | return ctrl.Result{}, client.IgnoreNotFound(err) 69 | } 70 | } 71 | 72 | if svcDeleted { 73 | es = &corev1.Endpoints{} 74 | es.Name = req.Name 75 | es.Namespace = req.Namespace 76 | es.Subsets = make([]corev1.EndpointSubset, 0) 77 | } 78 | 79 | log.Log.Info("Reconcile Endpoint", "endpoint", es.Name, "namespace", es.Namespace, "subset", es.Subsets) 80 | 81 | sdl := &nacosv1.ServiceDiscoveryList{} 82 | if err := r.Client.List(ctx, sdl, client.InNamespace(es.Namespace)); err != nil { 83 | return ctrl.Result{}, err 84 | } 85 | svcSds := make([]*nacosv1.ServiceDiscovery, 0) 86 | for _, sd := range sdl.Items { 87 | if len(sd.Spec.Services) == 0 || nacos.StringSliceContains(sd.Spec.Services, es.Name) { 88 | svcSds = append(svcSds, sd) 89 | } 90 | } 91 | 92 | if len(svcSds) == 0 { 93 | return ctrl.Result{}, nil 94 | } 95 | 96 | svc := &corev1.Service{} 97 | if err := r.Client.Get(ctx, client.ObjectKey{Namespace: es.Namespace, Name: es.Name}, svc); err != nil { 98 | if !apierrors.IsNotFound(err) { 99 | return ctrl.Result{}, client.IgnoreNotFound(err) 100 | } else { 101 | svc = &corev1.Service{} 102 | svc.Name = es.Name 103 | svc.Namespace = es.Namespace 104 | svc.Annotations = make(map[string]string) 105 | } 106 | 107 | } 108 | 109 | serviceInfo, err := naming.GenerateServiceInfo(svc) 110 | 111 | if err != nil { 112 | return ctrl.Result{}, err 113 | } 114 | 115 | log.Log.Info("Sync instance to nacos", "serviceInfo", serviceInfo.ServiceName) 116 | for _, sd := range svcSds { 117 | 118 | if sd.Spec.NacosServer.ServerAddr != "" { 119 | log.Log.Info("Sync instance to nacos", "serverAddr", sd.Spec.NacosServer.ServerAddr) 120 | if err := r.SyncInstanceToNacos(ctx, es, serviceInfo, sd); err != nil { 121 | return ctrl.Result{}, err 122 | } 123 | } 124 | } 125 | 126 | return ctrl.Result{}, nil 127 | } 128 | 129 | func (r *EndpointReconciler) SyncInstanceToNacos(ctx context.Context, es *corev1.Endpoints, serviceInfo naming.ServiceInfo, sd *nacosv1.ServiceDiscovery) error { 130 | addresses := naming.ConvertToAddresses(es, serviceInfo) 131 | log.Log.Info("Sync instance to nacos", "addresses", addresses) 132 | authProvider := auth.NewDefaultNacosAuthProvider(r.Client) 133 | log.Log.Info("Sync instance to nacos", "authProvider", authProvider) 134 | 135 | serviceInfo.Metadata["k8s.name"] = sd.Name 136 | serviceInfo.Metadata["k8s.namespace"] = sd.Namespace 137 | serviceInfo.Metadata["k8s.cluster"] = pkg.CurrentContext 138 | 139 | if nClient, err := naming.GetNacosNamingClientBuilder().BuildNamingClient(authProvider, sd); err == nil && nClient != nil { 140 | log.Log.Info("Sync instance to nacos1", "service", serviceInfo.ServiceName, "addresses", addresses) 141 | if !nClient.RegisterServiceInstances(serviceInfo, addresses) { 142 | marshal, err := json.Marshal(addresses) 143 | addressStr := "" 144 | if err == nil { 145 | addressStr = string(marshal) 146 | } 147 | return errors.New("Register service fail, serviceName: " + serviceInfo.ServiceName + ", addresses: " + addressStr) 148 | } 149 | 150 | status := nacosiov1.ServiceSyncStatus{ 151 | ServiceName: serviceInfo.ServiceName, 152 | GroupName: serviceInfo.Group, 153 | Ready: true, 154 | LastSyncTime: metav1.Now(), 155 | } 156 | 157 | if sd.Status.SyncStatuses == nil { 158 | sd.Status.SyncStatuses = make(map[string]*nacosiov1.ServiceSyncStatus) 159 | } 160 | 161 | sd.Status.SyncStatuses[serviceInfo.ServiceKey.String()] = &status 162 | 163 | if err := r.Client.Status().Update(ctx, sd); err != nil { 164 | return err 165 | } 166 | 167 | } else if err != nil { 168 | log.Log.Info("failed to sync instance to nacos ", "error ", err) 169 | return errors.New("Build nacos client fail") 170 | } else { 171 | return errors.New("Build nacos client fail, NacosNamingClient is nil") 172 | } 173 | return nil 174 | } 175 | 176 | // SetupWithManager sets up the controller with the Manager. 177 | func (r *EndpointReconciler) SetupWithManager(mgr ctrl.Manager) error { 178 | return ctrl.NewControllerManagedBy(mgr). 179 | For(&corev1.Endpoints{}). 180 | Complete(r) 181 | } 182 | -------------------------------------------------------------------------------- /pkg/nacos/client/impl/default_client.go: -------------------------------------------------------------------------------- 1 | package impl 2 | 3 | import ( 4 | "fmt" 5 | "strconv" 6 | "strings" 7 | "sync" 8 | 9 | "github.com/nacos-group/nacos-controller/pkg" 10 | "github.com/nacos-group/nacos-controller/pkg/nacos/auth" 11 | "github.com/nacos-group/nacos-controller/pkg/nacos/client" 12 | "github.com/nacos-group/nacos-sdk-go/v2/clients" 13 | "github.com/nacos-group/nacos-sdk-go/v2/clients/config_client" 14 | "github.com/nacos-group/nacos-sdk-go/v2/common/constant" 15 | "github.com/nacos-group/nacos-sdk-go/v2/model" 16 | "github.com/nacos-group/nacos-sdk-go/v2/vo" 17 | v1 "k8s.io/api/core/v1" 18 | "k8s.io/apimachinery/pkg/types" 19 | ) 20 | 21 | type ClientBuilder struct { 22 | cache sync.Map 23 | } 24 | 25 | var builder = ClientBuilder{ 26 | cache: sync.Map{}, 27 | } 28 | 29 | func GetNacosClientBuilder() *ClientBuilder { 30 | return &builder 31 | } 32 | 33 | func (m *ClientBuilder) Remove(nacosServerParam client.NacosServerParam, key types.NamespacedName) { 34 | cacheKey := fmt.Sprintf("%s-%s-%s-%s-%s", nacosServerParam.Endpoint, nacosServerParam.ServerAddr, nacosServerParam.Namespace, key.Namespace, key.Name) 35 | cachedClient, ok := m.cache.Load(cacheKey) 36 | if ok && cachedClient != nil { 37 | cachedClient.(config_client.IConfigClient).CloseClient() 38 | } 39 | m.cache.Delete(cacheKey) 40 | return 41 | } 42 | 43 | func (m *ClientBuilder) Get(nacosServerParam client.NacosServerParam, key types.NamespacedName) (config_client.IConfigClient, error) { 44 | cacheKey := fmt.Sprintf("%s-%s-%s-%s-%s", nacosServerParam.Endpoint, nacosServerParam.ServerAddr, nacosServerParam.Namespace, key.Namespace, key.Name) 45 | cachedClient, ok := m.cache.Load(cacheKey) 46 | if ok && cachedClient != nil { 47 | return cachedClient.(config_client.IConfigClient), nil 48 | } 49 | return nil, fmt.Errorf("empty DynamicConfiguration") 50 | } 51 | 52 | func (m *ClientBuilder) Build(authProvider auth.NacosAuthProvider, authRef *v1.ObjectReference, nacosServerParam client.NacosServerParam, key types.NamespacedName) (config_client.IConfigClient, error) { 53 | cacheKey := fmt.Sprintf("%s-%s-%s-%s-%s", nacosServerParam.Endpoint, nacosServerParam.ServerAddr, nacosServerParam.Namespace, key.Namespace, key.Name) 54 | cachedClient, ok := m.cache.Load(cacheKey) 55 | if ok && cachedClient != nil { 56 | return cachedClient.(config_client.IConfigClient), nil 57 | } 58 | clientParams, err := authProvider.GetNacosClientParams(authRef, nacosServerParam, key.Namespace) 59 | if err != nil { 60 | return nil, err 61 | } 62 | var sc []constant.ServerConfig 63 | clientOpts := []constant.ClientOption{ 64 | constant.WithAccessKey(clientParams.AuthInfo.AccessKey), 65 | constant.WithSecretKey(clientParams.AuthInfo.SecretKey), 66 | constant.WithUsername(clientParams.AuthInfo.Username), 67 | constant.WithPassword(clientParams.AuthInfo.Password), 68 | constant.WithTimeoutMs(5000), 69 | constant.WithNotLoadCacheAtStart(true), 70 | constant.WithLogDir("/tmp/nacos/log"), 71 | constant.WithCacheDir("/tmp/nacos/cache"), 72 | constant.WithLogLevel("debug"), 73 | constant.WithNamespaceId(clientParams.Namespace), 74 | constant.WithAppConnLabels(map[string]string{"k8s.namespace": key.Namespace, 75 | "k8s.cluster": pkg.CurrentContext, 76 | "k8s.name": key.Name}), 77 | } 78 | if len(clientParams.Endpoint) > 0 { 79 | clientOpts = append(clientOpts, constant.WithEndpoint(clientParams.Endpoint)) 80 | } else if len(clientParams.ServerAddr) > 0 { 81 | port := 8848 82 | ip := clientParams.ServerAddr 83 | if strings.Contains(ip, ":") { 84 | split := strings.Split(ip, ":") 85 | ip = split[0] 86 | if v, err := strconv.Atoi(split[1]); err != nil { 87 | return nil, fmt.Errorf("invalid ServerAddr: %s", clientParams.ServerAddr) 88 | } else { 89 | port = v 90 | } 91 | 92 | } 93 | sc = []constant.ServerConfig{ 94 | *constant.NewServerConfig(ip, uint64(port)), 95 | } 96 | } 97 | cc := *constant.NewClientConfig(clientOpts...) 98 | configClient, err := clients.NewConfigClient( 99 | vo.NacosClientParam{ 100 | ClientConfig: &cc, 101 | ServerConfigs: sc, 102 | }) 103 | if err != nil { 104 | return nil, err 105 | } 106 | m.cache.Store(cacheKey, configClient) 107 | return configClient, nil 108 | } 109 | 110 | // DefaultNacosConfigClient 基于Nacos SDK GO 实现配置操作 111 | type DefaultNacosConfigClient struct { 112 | authProvider auth.NacosAuthProvider 113 | clientBuilder *ClientBuilder 114 | } 115 | 116 | func (c *DefaultNacosConfigClient) CancelListenConfig(param client.NacosConfigParam) error { 117 | proxyClient, err := c.clientBuilder.Get(param.NacosServerParam, param.Key) 118 | if err != nil { 119 | return fmt.Errorf("get proxyClient failed, %v", err) 120 | } 121 | return proxyClient.CancelListenConfig(vo.ConfigParam{ 122 | Group: param.Group, 123 | DataId: param.DataId, 124 | }) 125 | } 126 | 127 | func (c *DefaultNacosConfigClient) GetConfig(param client.NacosConfigParam) (string, error) { 128 | proxyClient, err := c.clientBuilder.Build(c.authProvider, param.AuthRef, param.NacosServerParam, param.Key) 129 | if err != nil { 130 | return "", err 131 | } 132 | return proxyClient.GetConfig(vo.ConfigParam{ 133 | Group: param.Group, 134 | DataId: param.DataId, 135 | }) 136 | } 137 | 138 | func (c *DefaultNacosConfigClient) PublishConfig(param client.NacosConfigParam) (bool, error) { 139 | proxyClient, err := c.clientBuilder.Build(c.authProvider, param.AuthRef, param.NacosServerParam, param.Key) 140 | if err != nil { 141 | return false, err 142 | } 143 | return proxyClient.PublishConfig(vo.ConfigParam{ 144 | Group: param.Group, 145 | DataId: param.DataId, 146 | Content: param.Content, 147 | ConfigTags: "k8s.cluster/" + pkg.CurrentContext + "," + "k8s.namespace/" + param.Key.Namespace + "," + "k8s.name/" + param.Key.Name, 148 | }) 149 | } 150 | 151 | func (c *DefaultNacosConfigClient) DeleteConfig(param client.NacosConfigParam) (bool, error) { 152 | proxyClient, err := c.clientBuilder.Build(c.authProvider, param.AuthRef, param.NacosServerParam, param.Key) 153 | if err != nil { 154 | return false, err 155 | } 156 | return proxyClient.DeleteConfig(vo.ConfigParam{ 157 | Group: param.Group, 158 | DataId: param.DataId, 159 | }) 160 | } 161 | 162 | func (c *DefaultNacosConfigClient) ListenConfig(param client.NacosConfigParam) error { 163 | proxyClient, err := c.clientBuilder.Build(c.authProvider, param.AuthRef, param.NacosServerParam, param.Key) 164 | if err != nil { 165 | return err 166 | } 167 | return proxyClient.ListenConfig(vo.ConfigParam{ 168 | Group: param.Group, 169 | DataId: param.DataId, 170 | OnChange: param.OnChange, 171 | }) 172 | } 173 | 174 | func (c *DefaultNacosConfigClient) CloseClient(param client.NacosConfigParam) { 175 | c.clientBuilder.Remove(param.NacosServerParam, param.Key) 176 | } 177 | 178 | func (c *DefaultNacosConfigClient) SearchConfigs(param client.SearchConfigParam) (*model.ConfigPage, error) { 179 | proxyClient, err := c.clientBuilder.Build(c.authProvider, param.AuthRef, param.NacosServerParam, param.Key) 180 | if err != nil { 181 | return nil, fmt.Errorf("get proxyClient failed, %v", err) 182 | } 183 | return proxyClient.SearchConfig(vo.SearchConfigParam{ 184 | Search: "blur", 185 | Group: param.Group, 186 | DataId: param.DataId, 187 | PageNo: param.PageNo, 188 | PageSize: param.PageSize, 189 | }) 190 | } 191 | 192 | func NewDefaultNacosConfigClient(p auth.NacosAuthProvider) client.NacosConfigClient { 193 | return &DefaultNacosConfigClient{ 194 | authProvider: p, 195 | clientBuilder: GetNacosClientBuilder(), 196 | } 197 | } 198 | -------------------------------------------------------------------------------- /api/v1/dynamicconfiguration_webhook.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2023. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package v1 18 | 19 | import ( 20 | "k8s.io/apimachinery/pkg/api/errors" 21 | "k8s.io/apimachinery/pkg/runtime" 22 | "k8s.io/apimachinery/pkg/runtime/schema" 23 | "k8s.io/apimachinery/pkg/util/validation/field" 24 | ctrl "sigs.k8s.io/controller-runtime" 25 | logf "sigs.k8s.io/controller-runtime/pkg/log" 26 | "sigs.k8s.io/controller-runtime/pkg/webhook" 27 | "sigs.k8s.io/controller-runtime/pkg/webhook/admission" 28 | ) 29 | 30 | // log is for logging in this package. 31 | var dynamicconfigurationlog = logf.Log.WithName("dynamicconfiguration-resource") 32 | var ( 33 | SecretGVK = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Secret"} 34 | ConfigMapGVK = schema.GroupVersionKind{Group: "", Version: "v1", Kind: "ConfigMap"} 35 | ) 36 | 37 | func (r *DynamicConfiguration) SetupWebhookWithManager(mgr ctrl.Manager) error { 38 | return ctrl.NewWebhookManagedBy(mgr). 39 | For(r). 40 | Complete() 41 | } 42 | 43 | // TODO(user): EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! 44 | 45 | //+kubebuilder:webhook:path=/mutate-nacos-io-v1-dynamicconfiguration,mutating=true,failurePolicy=fail,sideEffects=None,groups=nacos.io,resources=dynamicconfigurations,verbs=create;update,versions=v1,name=mdynamicconfiguration.kb.io,admissionReviewVersions=v1 46 | 47 | var _ webhook.Defaulter = &DynamicConfiguration{} 48 | 49 | // Default implements webhook.Defaulter so a webhook will be registered for the type 50 | func (r *DynamicConfiguration) Default() { 51 | dynamicconfigurationlog.Info("default", "name", r.Name) 52 | 53 | if r.Spec.Strategy.SyncScope == "" { 54 | if r.Spec.ObjectRefs != nil { 55 | r.Spec.Strategy.SyncScope = SyncScopePartial 56 | } else { 57 | r.Spec.Strategy.SyncScope = SyncScopeFull 58 | } 59 | } 60 | 61 | if r.Spec.Strategy.ConflictPolicy == "" { 62 | r.Spec.Strategy.ConflictPolicy = PreferCluster 63 | } 64 | } 65 | 66 | // TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. 67 | //+kubebuilder:webhook:path=/validate-nacos-io-v1-dynamicconfiguration,mutating=false,failurePolicy=fail,sideEffects=None,groups=nacos.io,resources=dynamicconfigurations,verbs=create;update,versions=v1,name=vdynamicconfiguration.kb.io,admissionReviewVersions=v1 68 | 69 | var _ webhook.Validator = &DynamicConfiguration{} 70 | 71 | // ValidateCreate implements webhook.Validator so a webhook will be registered for the type 72 | func (r *DynamicConfiguration) ValidateCreate() (admission.Warnings, error) { 73 | dynamicconfigurationlog.Info("validate create", "name", r.Name) 74 | 75 | // TODO(user): fill in your validation logic upon object creation. 76 | return nil, r.validateDC() 77 | } 78 | 79 | // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type 80 | func (r *DynamicConfiguration) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { 81 | dynamicconfigurationlog.Info("validate update", "name", r.Name) 82 | 83 | // TODO(user): fill in your validation logic upon object update. 84 | return nil, r.validateDC() 85 | } 86 | 87 | // ValidateDelete implements webhook.Validator so a webhook will be registered for the type 88 | func (r *DynamicConfiguration) ValidateDelete() (admission.Warnings, error) { 89 | dynamicconfigurationlog.Info("validate delete", "name", r.Name) 90 | 91 | // TODO(user): fill in your validation logic upon object deletion. 92 | return nil, nil 93 | } 94 | 95 | func (r *DynamicConfiguration) validateDC() error { 96 | var allErrs field.ErrorList 97 | if err := r.validateNacosServerConfiguration(); err != nil { 98 | allErrs = append(allErrs, err) 99 | } 100 | if err := r.validateSyncStrategy(); err != nil { 101 | allErrs = append(allErrs, err) 102 | } 103 | if err := r.validateObjectRefs(); err != nil { 104 | allErrs = append(allErrs, err) 105 | } 106 | if len(allErrs) == 0 { 107 | return nil 108 | } 109 | return errors.NewInvalid( 110 | schema.GroupKind{Group: "nacos.io", Kind: "DynamicConfiguration"}, 111 | r.Name, 112 | allErrs) 113 | } 114 | 115 | func (r *DynamicConfiguration) validateNacosServerConfiguration() *field.Error { 116 | serverAddrEmpty := len(r.Spec.NacosServer.ServerAddr) == 0 117 | endpoint := len(r.Spec.NacosServer.Endpoint) == 0 118 | if serverAddrEmpty && endpoint { 119 | return field.Required(field.NewPath("spec").Child("nacosServer"), "either ServerAddr or Endpoint should be set") 120 | } 121 | if len(r.Spec.NacosServer.Namespace) == 0 { 122 | return field.Required(field.NewPath("spec").Child("nacosServer").Child("namespace"), "nacos namespace should be set") 123 | } 124 | if r.Spec.NacosServer.AuthRef != nil { 125 | supportGVKs := []string{SecretGVK.String()} 126 | gvk := r.Spec.NacosServer.AuthRef.GroupVersionKind().String() 127 | if !stringsContains(supportGVKs, gvk) { 128 | return field.NotSupported( 129 | field.NewPath("spec").Child("nacosServer").Child("authRef"), 130 | r.Spec.NacosServer.AuthRef, 131 | supportGVKs) 132 | } 133 | } 134 | return nil 135 | } 136 | 137 | func (r *DynamicConfiguration) validateObjectRefs() *field.Error { 138 | if r.Spec.Strategy.SyncScope == SyncScopeFull { 139 | if r.Spec.ObjectRefs != nil { 140 | return field.Invalid(field.NewPath("spec").Child("objectRefs"), r.Spec.ObjectRefs, "ObjectRefs should be empty when SyncStrategy is full") 141 | } else { 142 | return nil 143 | } 144 | } 145 | if r.Spec.ObjectRefs == nil { 146 | return field.Required(field.NewPath("spec").Child("objectRefs"), "ObjectRefs should be set when SyncStrategy is partial") 147 | } else { 148 | supportGVKs := []string{ConfigMapGVK.String(), SecretGVK.String()} 149 | for _, objRef := range r.Spec.ObjectRefs { 150 | gvk := objRef.GroupVersionKind().String() 151 | if !stringsContains(supportGVKs, gvk) { 152 | return field.NotSupported( 153 | field.NewPath("spec").Child("objectRefs"), 154 | objRef, 155 | supportGVKs) 156 | } 157 | } 158 | } 159 | return nil 160 | } 161 | 162 | func (r *DynamicConfiguration) validateSyncStrategy() *field.Error { 163 | syncScopeSupportList := []string{string(SyncScopePartial), string(SyncScopeFull)} 164 | if !stringsContains(syncScopeSupportList, string(r.Spec.Strategy.SyncScope)) { 165 | return field.NotSupported( 166 | field.NewPath("spec").Child("strategy").Child("syncDirection"), 167 | r.Spec.Strategy.SyncScope, 168 | syncScopeSupportList) 169 | } 170 | 171 | conflictPolicySupportList := []string{string(PreferCluster), string(PreferServer)} 172 | if !stringsContains(conflictPolicySupportList, string(r.Spec.Strategy.ConflictPolicy)) { 173 | return field.NotSupported( 174 | field.NewPath("spec").Child("strategy").Child("conflictPolicy"), 175 | r.Spec.Strategy.ConflictPolicy, 176 | conflictPolicySupportList) 177 | } 178 | return nil 179 | } 180 | 181 | func stringsContains(arr []string, item string) bool { 182 | if len(arr) == 0 { 183 | return false 184 | } 185 | for _, v := range arr { 186 | if v == item { 187 | return true 188 | } 189 | } 190 | return false 191 | } 192 | -------------------------------------------------------------------------------- /README_CN.md: -------------------------------------------------------------------------------- 1 | # Nacos Controller 2 | 本项目包含一系列Kubernetes自定义资源(CustomResourceDefinition)以及相关控制器实现。 3 | 4 | 当前版本定义CRD如下: 5 | - DynamicConfiguration:Nacos配置与Kubernetes配置的同步桥梁 6 | - ServiceDiscovery::Nacos服务发现与Kubernetes服务发现同步桥梁 7 | 8 | [English Document](./README.md) 9 | 10 | # 快速开始 11 | ## 部署Nacos Controller 12 | 1. 安装helm,参考[文档](https://helm.sh/docs/intro/install/) 13 | 2. 安装Nacos Controller 14 | ```bash 15 | git clone https://github.com/nacos-group/nacos-controller.git 16 | cd nacos-controller/charts/nacos-controller 17 | 18 | export KUBECONFIG=/你的K8s集群/访问凭证/文件路径 19 | kubectl create ns nacos 20 | helm install -n nacos nacos-controller . 21 | ``` 22 | ## Nacos和K8s集群配置同步 23 | ### 快速开始 24 | 通过以下的命令,你可以快速将kubernetes集群中当前命名空间的配置全量同步到Nacos中。 25 | ```bash 26 | cd nacos-controller 27 | chmod +x configQuicStart.sh 28 | ./configQuicStart.sh 29 | ``` 30 | 除此之外,你还可以根据自己的需要编写DynamicConfiguration yaml文件,并部署到K8s集群中。 31 | 32 | Nacos Controller 2.0 支持Kubernetes集群配置和Nacos 配置的双向同步,支持将Kubernetes集群特定命名空间下的ConfigMap以及Secret同步到Nacos指定命名空间下中。用户可以通过Nacos实现对于Kubernetes集群配置的动态修改和管理。Nacos配置和Kubernetes配置的映射关系如下表所示: 33 | 34 | | ConfigMap/Secret | Nacos Config | 35 | |------------------|-----------------| 36 | | Namespace | 用户指定的命名空间 | 37 | | Name | Group | 38 | | Key | DataId | 39 | | Value | Content | 40 | 41 | 目前主要支持两种配置同步的策略: 42 | - 全量同步:Kubernetes集群特定命名空间下的所有ConfigMap以及Secret自动同步至Nacos,Nacos Controller会自动同步所有新建的ConfigMap和Secret 43 | - 部分同步:只同步用户指定的ConfigMap和Secret至Nacos 44 | 45 | ### K8s集群命名空间配置全量同步Nacos 46 | 编写DynamicConfiguration yaml文件: 47 | ```yaml 48 | apiVersion: nacos.io/v1 49 | kind: DynamicConfiguration 50 | metadata: 51 | name: dc-demo 52 | spec: 53 | nacosServer: 54 | # endpoint: nacos地址服务器,与serverAddr互斥,优先级高于serverAddr,与serverAddr二选一即可 55 | endpoint: 56 | # serverAddr: nacos地址,与endpoint二选一即可 57 | serverAddr: 58 | # namespace: 用户指定的命名空间 59 | namespace: 60 | # authRef: 引用存放Nacos 客户端鉴权信息的Secret,支持用户名/密码 和 AK/SK, Nacos服务端未开启鉴权可忽略 61 | authRef: 62 | apiVersion: v1 63 | kind: Secret 64 | name: nacos-auth 65 | strategy: 66 | # scope: 同步策略,full 表示全量同步,partial 表示部分同步 67 | scope: full 68 | # 是否同步配置删除操作 69 | syncDeletion: true 70 | # conflictPolicy: 同步冲突策略,preferCluster 表示初次同步内容冲突时以Kubernetes集群配置为准,preferServer 表示以Nacos配置为准 71 | conflictPolicy: preferCluster 72 | --- 73 | apiVersion: v1 74 | kind: Secret 75 | metadata: 76 | name: nacos-auth 77 | data: 78 | accessKey: 79 | secretKey: 80 | username: 81 | password: 82 | ``` 83 | 执行命令部署DynamicConfiguration到需要全量同步的Kubernetes集群命名空间下: 84 | ```bash 85 | kubectl apply -f dc-demo.yaml -n 86 | ``` 87 | 即可实现配置的全量同步 88 | ### K8s集群命名空间配置部分同步Nacos 89 | 编写DynamicConfiguration yaml文件,和全量同步的区别主要在于strategy部分,并且要指定需要同步的ConfigMap和Secret: 90 | ```yaml 91 | apiVersion: nacos.io/v1 92 | kind: DynamicConfiguration 93 | metadata: 94 | name: dc-demo 95 | spec: 96 | nacosServer: 97 | # endpoint: nacos地址服务器,与serverAddr互斥,优先级高于serverAddr,与serverAddr二选一即可 98 | endpoint: 99 | # serverAddr: nacos地址,与endpoint二选一即可 100 | serverAddr: 101 | # namespace: 用户指定的命名空间 102 | namespace: 103 | # authRef: 引用存放Nacos 客户端鉴权信息的Secret,支持用户名/密码 和 AK/SK, Nacos服务端未开启鉴权可忽略 104 | authRef: 105 | apiVersion: v1 106 | kind: Secret 107 | name: nacos-auth 108 | strategy: 109 | # scope: 同步策略,full 表示全量同步,partial 表示部分同步 110 | scope: partial 111 | # 是否同步配置删除操作 112 | syncDeletion: true 113 | # conflictPolicy: 同步冲突策略,preferCluster 表示初次同步内容冲突时以Kubernetes集群配置为准,preferServer 表示以Nacos配置为准 114 | conflictPolicy: preferCluster 115 | # 需要同步的ConfigMap和Secret 116 | objectRefs: 117 | - apiVersion: v1 118 | kind: ConfigMap 119 | name: nacos-config-cm 120 | - apiVersion: v1 121 | kind: Secret 122 | name: nacos-config-secret 123 | --- 124 | apiVersion: v1 125 | kind: Secret 126 | metadata: 127 | name: nacos-auth 128 | data: 129 | accessKey: 130 | secretKey: 131 | username: 132 | password: 133 | ``` 134 | 执行命令部署DynamicConfiguration到需要全量同步的Kubernetes集群命名空间下: 135 | ```bash 136 | kubectl apply -f dc-demo.yaml -n 137 | ``` 138 | 即可实现配置的部分同步 139 | 140 | ### NacosServer配置 141 | 字段说明: 142 | - endpoint: nacos地址服务器,与serverAddr互斥,优先级高于serverAddr 143 | - serverAddr: nacos地址,与endpoint互斥 144 | - namespace: nacos空间ID 145 | - group: nacos分组 146 | - authRef: 引用存放Nacos 客户端鉴权信息的资源,支持用户名/密码 和 AK/SK 147 | ```yaml 148 | nacosServer: 149 | endpoint: 150 | serverAddr: 151 | namespace: 152 | group: 153 | authRef: 154 | apiVersion: v1 155 | kind: Secret 156 | name: nacos-auth 157 | ``` 158 | ## Nacos与K8s集群服务同步 159 | Nacos Controller 2.0 支持Kubernetes集群服务同步到Nacos,支持将Kubernetes集群特定命名空间下的Service同步到Nacos指定命名空间下。用户可以通过Nacos实现实现对Kubernetes服务的服务发现。Nacos服务和Kubernetes服务的映射关系如下表所示: 160 | 161 | | Kubernetes Service | Nacos Service | 162 | |------------------|-----------------| 163 | | Namespace | 用户指定的命名空间 | 164 | | Name | serviceName | 165 | | Endpoint | instance | 166 | 167 | 目前主要支持两种服务同步的策略: 168 | - 全量同步:Kubernetes集群特定命名空间下的所有Service自动同步至Nacos 169 | - 部分同步:只同步用户指定的Service至Nacos 170 | ### Kubernetes集群服务全量同步Nacos 171 | 编写ServiceDiscovery yaml文件: 172 | ```yaml 173 | apiVersion: nacos.io/v1 174 | kind: ServiceDiscovery 175 | metadata: 176 | name: sd-demo 177 | spec: 178 | nacosServer: 179 | # serverAddr: nacos地址 180 | serverAddr: 181 | # namespace: 用户指定的命名空间 182 | namespace: 183 | # authRef: 引用存放Nacos 客户端鉴权信息的Secret,支持用户名/密码 和 AK/SK, Nacos服务端未开启鉴权可忽略 184 | authRef: 185 | apiVersion: v1 186 | kind: Secret 187 | name: nacos-auth 188 | --- 189 | apiVersion: v1 190 | kind: Secret 191 | metadata: 192 | name: nacos-auth 193 | data: 194 | accessKey: 195 | secretKey: 196 | username: 197 | password: 198 | ``` 199 | 执行命令部署ServiceDiscovery到需要全量同步的Kubenetes集群命名空间下: 200 | ```bash 201 | kubectl apply -f sd-demo.yaml -n 202 | ``` 203 | 204 | ### Kubernetes集群服务部分同步Nacos 205 | 编写ServiceDiscovery yaml文件,和全量同步的区别在于需要指定需要同步的Service: 206 | ```yaml 207 | apiVersion: nacos.io/v1 208 | kind: ServiceDiscovery 209 | metadata: 210 | name: sd-demo 211 | spec: 212 | nacosServer: 213 | # serverAddr: nacos地址 214 | serverAddr: 215 | # namespace: 用户指定的命名空间 216 | namespace: 217 | # authRef: 引用存放Nacos 客户端鉴权信息的Secret,支持用户名/密码 和 AK/SK, Nacos服务端未开启鉴权可忽略 218 | authRef: 219 | apiVersion: v1 220 | kind: Secret 221 | name: nacos-auth 222 | # 需要同步的Service 223 | services: [svc1,svc2] 224 | --- 225 | apiVersion: v1 226 | kind: Secret 227 | metadata: 228 | name: nacos-auth 229 | data: 230 | accessKey: 231 | secretKey: 232 | username: 233 | password: 234 | ``` 235 | 执行命令部署ServiceDiscovery到需要同步的Kubenetes集群命名空间下: 236 | ```bash 237 | kubectl apply -f sd-demo.yaml -n 238 | ``` 239 | 即可实现服务的部分同步 240 | 241 | ## 贡献者 242 | 特别感谢以下人员/团队对本项目的贡献 243 | 244 | - 阿里云[EDAS](https://www.aliyun.com/product/edas)团队(项目孵化来源) 245 | - 阿里云[MSE](https://www.aliyun.com/product/aliware/mse)团队 246 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | 2 | # Image URL to use all building/pushing image targets 3 | IMG ?= controller:latest 4 | # ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary. 5 | ENVTEST_K8S_VERSION = 1.27.1 6 | 7 | # Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) 8 | ifeq (,$(shell go env GOBIN)) 9 | GOBIN=$(shell go env GOPATH)/bin 10 | else 11 | GOBIN=$(shell go env GOBIN) 12 | endif 13 | 14 | # CONTAINER_TOOL defines the container tool to be used for building images. 15 | # Be aware that the target commands are only tested with Docker which is 16 | # scaffolded by default. However, you might want to replace it to use other 17 | # tools. (i.e. podman) 18 | CONTAINER_TOOL ?= docker 19 | 20 | # Setting SHELL to bash allows bash commands to be executed by recipes. 21 | # Options are set to exit when a recipe line exits non-zero or a piped command fails. 22 | SHELL = /usr/bin/env bash -o pipefail 23 | .SHELLFLAGS = -ec 24 | 25 | .PHONY: all 26 | all: build 27 | 28 | ##@ General 29 | 30 | # The help target prints out all targets with their descriptions organized 31 | # beneath their categories. The categories are represented by '##@' and the 32 | # target descriptions by '##'. The awk commands is responsible for reading the 33 | # entire set of makefiles included in this invocation, looking for lines of the 34 | # file as xyz: ## something, and then pretty-format the target and help. Then, 35 | # if there's a line with ##@ something, that gets pretty-printed as a category. 36 | # More info on the usage of ANSI control characters for terminal formatting: 37 | # https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters 38 | # More info on the awk command: 39 | # http://linuxcommand.org/lc3_adv_awk.php 40 | 41 | .PHONY: help 42 | help: ## Display this help. 43 | @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) 44 | 45 | ##@ Development 46 | 47 | .PHONY: manifests 48 | manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects. 49 | $(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases 50 | 51 | .PHONY: generate 52 | generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations. 53 | $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." 54 | 55 | .PHONY: fmt 56 | fmt: ## Run go fmt against code. 57 | go fmt ./... 58 | 59 | .PHONY: vet 60 | vet: ## Run go vet against code. 61 | go vet ./... 62 | 63 | .PHONY: test 64 | test: manifests generate fmt vet envtest ## Run tests. 65 | KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test ./... -coverprofile cover.out 66 | 67 | ##@ Build 68 | 69 | .PHONY: build 70 | build: manifests generate fmt vet ## Build manager binary. 71 | go build -o bin/manager cmd/main.go 72 | 73 | .PHONY: run 74 | run: manifests generate fmt vet ## Run a controller from your host. 75 | go run ./cmd/main.go 76 | 77 | # If you wish built the manager image targeting other platforms you can use the --platform flag. 78 | # (i.e. docker build --platform linux/arm64 ). However, you must enable docker buildKit for it. 79 | # More info: https://docs.docker.com/develop/develop-images/build_enhancements/ 80 | .PHONY: docker-build 81 | docker-build: test ## Build docker image with the manager. 82 | $(CONTAINER_TOOL) build -t ${IMG} . 83 | 84 | .PHONY: docker-push 85 | docker-push: ## Push docker image with the manager. 86 | $(CONTAINER_TOOL) push ${IMG} 87 | 88 | # PLATFORMS defines the target platforms for the manager image be build to provide support to multiple 89 | # architectures. (i.e. make docker-buildx IMG=myregistry/mypoperator:0.0.1). To use this option you need to: 90 | # - able to use docker buildx . More info: https://docs.docker.com/build/buildx/ 91 | # - have enable BuildKit, More info: https://docs.docker.com/develop/develop-images/build_enhancements/ 92 | # - be able to push the image for your registry (i.e. if you do not inform a valid value via IMG=> then the export will fail) 93 | # To properly provided solutions that supports more than one platform you should use this option. 94 | PLATFORMS ?= linux/arm64,linux/amd64,linux/s390x,linux/ppc64le 95 | .PHONY: docker-buildx 96 | docker-buildx: test ## Build and push docker image for the manager for cross-platform support 97 | # copy existing Dockerfile and insert --platform=${BUILDPLATFORM} into Dockerfile.cross, and preserve the original Dockerfile 98 | sed -e '1 s/\(^FROM\)/FROM --platform=\$$\{BUILDPLATFORM\}/; t' -e ' 1,// s//FROM --platform=\$$\{BUILDPLATFORM\}/' Dockerfile > Dockerfile.cross 99 | - $(CONTAINER_TOOL) buildx create --name project-v3-builder 100 | $(CONTAINER_TOOL) buildx use project-v3-builder 101 | - $(CONTAINER_TOOL) buildx build --push --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile.cross . 102 | - $(CONTAINER_TOOL) buildx rm project-v3-builder 103 | rm Dockerfile.cross 104 | 105 | ##@ Deployment 106 | 107 | ifndef ignore-not-found 108 | ignore-not-found = false 109 | endif 110 | 111 | .PHONY: install 112 | install: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config. 113 | $(KUSTOMIZE) build config/crd | $(KUBECTL) apply -f - 114 | 115 | .PHONY: uninstall 116 | uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. 117 | $(KUSTOMIZE) build config/crd | $(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f - 118 | 119 | .PHONY: deploy 120 | deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config. 121 | cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} 122 | $(KUSTOMIZE) build config/default | $(KUBECTL) apply -f - 123 | 124 | .PHONY: undeploy 125 | undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. 126 | $(KUSTOMIZE) build config/default | $(KUBECTL) delete --ignore-not-found=$(ignore-not-found) -f - 127 | 128 | ##@ Build Dependencies 129 | 130 | ## Location to install dependencies to 131 | LOCALBIN ?= $(shell pwd)/bin 132 | $(LOCALBIN): 133 | mkdir -p $(LOCALBIN) 134 | 135 | ## Tool Binaries 136 | KUBECTL ?= kubectl 137 | KUSTOMIZE ?= $(LOCALBIN)/kustomize 138 | CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen 139 | ENVTEST ?= $(LOCALBIN)/setup-envtest 140 | 141 | ## Tool Versions 142 | KUSTOMIZE_VERSION ?= v5.0.1 143 | CONTROLLER_TOOLS_VERSION ?= v0.16.5 144 | 145 | .PHONY: kustomize 146 | kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary. If wrong version is installed, it will be removed before downloading. 147 | $(KUSTOMIZE): $(LOCALBIN) 148 | @if test -x $(LOCALBIN)/kustomize && ! $(LOCALBIN)/kustomize version | grep -q $(KUSTOMIZE_VERSION); then \ 149 | echo "$(LOCALBIN)/kustomize version is not expected $(KUSTOMIZE_VERSION). Removing it before installing."; \ 150 | rm -rf $(LOCALBIN)/kustomize; \ 151 | fi 152 | test -s $(LOCALBIN)/kustomize || GOBIN=$(LOCALBIN) GO111MODULE=on go install sigs.k8s.io/kustomize/kustomize/v5@$(KUSTOMIZE_VERSION) 153 | 154 | .PHONY: controller-gen 155 | controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary. If wrong version is installed, it will be overwritten. 156 | $(CONTROLLER_GEN): $(LOCALBIN) 157 | test -s $(LOCALBIN)/controller-gen && $(LOCALBIN)/controller-gen --version | grep -q $(CONTROLLER_TOOLS_VERSION) || \ 158 | GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-tools/cmd/controller-gen@$(CONTROLLER_TOOLS_VERSION) 159 | 160 | .PHONY: envtest 161 | envtest: $(ENVTEST) ## Download envtest-setup locally if necessary. 162 | $(ENVTEST): $(LOCALBIN) 163 | test -s $(LOCALBIN)/setup-envtest || GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest 164 | -------------------------------------------------------------------------------- /pkg/controller/servicediscovery_controller.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2023. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package controller 18 | 19 | import ( 20 | "context" 21 | "errors" 22 | "fmt" 23 | "strings" 24 | "sync" 25 | "sync/atomic" 26 | 27 | nacosiov1 "github.com/nacos-group/nacos-controller/api/v1" 28 | "github.com/nacos-group/nacos-controller/pkg" 29 | "github.com/nacos-group/nacos-controller/pkg/nacos" 30 | "github.com/nacos-group/nacos-controller/pkg/nacos/auth" 31 | "github.com/nacos-group/nacos-controller/pkg/nacos/naming" 32 | v1 "k8s.io/api/core/v1" 33 | apierrors "k8s.io/apimachinery/pkg/api/errors" 34 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 35 | "k8s.io/apimachinery/pkg/runtime" 36 | "k8s.io/client-go/kubernetes" 37 | ctrl "sigs.k8s.io/controller-runtime" 38 | "sigs.k8s.io/controller-runtime/pkg/client" 39 | "sigs.k8s.io/controller-runtime/pkg/log" 40 | ) 41 | 42 | // ServiceDiscoveryReconciler reconciles a ServiceDiscovery object 43 | type ServiceDiscoveryReconciler struct { 44 | client.Client 45 | Scheme *runtime.Scheme 46 | AuthProvider *auth.NacosAuthProvider 47 | ServiceDiscoveryMap sync.Map 48 | } 49 | 50 | //+kubebuilder:rbac:groups=nacos.io.nacos.io,resources=servicediscoveries,verbs=get;list;watch;create;update;patch;delete 51 | //+kubebuilder:rbac:groups=nacos.io.nacos.io,resources=servicediscoveries/status,verbs=get;update;patch 52 | //+kubebuilder:rbac:groups=nacos.io.nacos.io,resources=servicediscoveries/finalizers,verbs=update 53 | 54 | // Reconcile is part of the main kubernetes reconciliation loop which aims to 55 | // move the current state of the cluster closer to the desired state. 56 | // TODO(user): Modify the Reconcile function to compare the state specified by 57 | // the ServiceDiscovery object against the actual cluster state, and then 58 | // perform operations to make the cluster state reflect the state specified by 59 | // the user. 60 | // 61 | // For more details, check Reconcile and its Result here: 62 | // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.15.0/pkg/reconcile 63 | 64 | func NewServiceDiscoveryReconciler(c client.Client, cs *kubernetes.Clientset) *ServiceDiscoveryReconciler { 65 | return &ServiceDiscoveryReconciler{ 66 | Client: c, 67 | Scheme: c.Scheme(), 68 | } 69 | } 70 | 71 | func (r *ServiceDiscoveryReconciler) syncEndponits(ctx context.Context, endpointsList []v1.Endpoints, nClient *naming.NacosNamingClient, sd *nacosiov1.ServiceDiscovery) error { 72 | for _, endpoints := range endpointsList { 73 | svc := &v1.Service{} 74 | if err := r.Client.Get(ctx, client.ObjectKey{Namespace: endpoints.Namespace, Name: endpoints.Name}, svc); err != nil { 75 | return err 76 | } 77 | 78 | svcInfo, err := naming.GenerateServiceInfo(svc) 79 | if err != nil { 80 | return err 81 | } 82 | svcInfo.Metadata["k8s.name"] = sd.Name 83 | svcInfo.Metadata["k8s.namespace"] = sd.Namespace 84 | svcInfo.Metadata["k8s.cluster"] = pkg.CurrentContext 85 | 86 | if !nClient.RegisterServiceInstances(svcInfo, naming.ConvertToAddresses(&endpoints, svcInfo)) { 87 | return errors.New("register service instances failed, service: " + svcInfo.ServiceName) 88 | } 89 | 90 | status := nacosiov1.ServiceSyncStatus{ 91 | ServiceName: svcInfo.ServiceName, 92 | GroupName: svcInfo.Group, 93 | Ready: true, 94 | LastSyncTime: metav1.Now(), 95 | } 96 | 97 | if sd.Status.SyncStatuses == nil { 98 | sd.Status.SyncStatuses = make(map[string]*nacosiov1.ServiceSyncStatus) 99 | } 100 | 101 | sd.Status.SyncStatuses[svcInfo.ServiceKey.String()] = &status 102 | 103 | if err := r.Client.Status().Update(ctx, sd); err != nil { 104 | return err 105 | } 106 | } 107 | return nil 108 | 109 | } 110 | func (r *ServiceDiscoveryReconciler) syncExistedService(ctx context.Context, sd *nacosiov1.ServiceDiscovery) error { 111 | fmt.Println("ServiceDiscovery Reconcile, syncExistedService start") 112 | toBeSynced := make([]v1.Endpoints, 0) 113 | authProvider := auth.NewDefaultNacosAuthProvider(r.Client) 114 | nClient, err := naming.GetNacosNamingClientBuilder().BuildNamingClient(authProvider, sd) 115 | 116 | if err != nil { 117 | return err 118 | } 119 | 120 | if len(sd.Spec.Services) > 0 { 121 | for _, svcName := range sd.Spec.Services { 122 | endpoints := v1.Endpoints{} 123 | if err := r.Client.Get(ctx, client.ObjectKey{Namespace: sd.Namespace, Name: svcName}, &endpoints); err != nil { 124 | return err 125 | } 126 | toBeSynced = append(toBeSynced, endpoints) 127 | } 128 | 129 | } else { 130 | endpointsList := &v1.EndpointsList{} 131 | if err := r.Client.List(ctx, endpointsList, &client.ListOptions{Namespace: sd.Namespace}); err != nil { 132 | return err 133 | } 134 | toBeSynced = append(toBeSynced, endpointsList.Items...) 135 | } 136 | 137 | if err := r.syncEndponits(ctx, toBeSynced, nClient, sd); err != nil { 138 | return err 139 | } 140 | 141 | fmt.Println("ServiceDiscovery Reconcile, syncExistedService success") 142 | 143 | return nil 144 | } 145 | func (r *ServiceDiscoveryReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { 146 | _ = log.FromContext(ctx) 147 | log.Log.Info("ServiceDiscovery Reconcile", "req ", req.NamespacedName) 148 | sd := &nacosiov1.ServiceDiscovery{} 149 | fmt.Println("ServiceDiscovery Reconcile, namespace:", req.Namespace+", name: "+req.Name) 150 | err := r.Get(ctx, client.ObjectKey{Namespace: req.Namespace, Name: req.Name}, sd) 151 | if err != nil { 152 | fmt.Println("ServiceDiscoveryReconciler: Service deleted:", req.NamespacedName, err) 153 | return ctrl.Result{}, nil 154 | } 155 | 156 | //新增资源,添加Finalizer,同步存量服务 157 | if !nacos.StringSliceContains(sd.Finalizers, "nacos.io/sd-finalizer") { 158 | // 确保 Finalizer 159 | if err := r.ensureFinalizer(ctx, sd); err != nil { 160 | return ctrl.Result{}, err 161 | } 162 | 163 | //同步存量服务 164 | return ctrl.Result{}, r.syncExistedService(ctx, sd) 165 | 166 | } 167 | 168 | // 资源被删除,清理服务 169 | if !sd.DeletionTimestamp.IsZero() { 170 | // 如果资源被删除,执行删除逻辑 171 | fmt.Println("ServiceDiscovery deleted:", req.NamespacedName) 172 | // 处理删除逻辑 173 | if err := r.doFinalization(ctx, sd); err != nil { 174 | return ctrl.Result{}, err 175 | } 176 | return ctrl.Result{}, nil 177 | } 178 | 179 | //处理更新逻辑 180 | endpointsList := make([]v1.Endpoints, 0) 181 | if len(sd.Spec.Services) > 0 { 182 | for _, svc := range sd.Spec.Services { 183 | endpoints := v1.Endpoints{} 184 | err := r.Client.Get(ctx, client.ObjectKey{Namespace: sd.Namespace, Name: svc}, &endpoints) 185 | 186 | if err != nil { 187 | if apierrors.IsNotFound(err) { 188 | continue 189 | } else { 190 | return ctrl.Result{}, err 191 | } 192 | } 193 | 194 | endpointsList = append(endpointsList, endpoints) 195 | } 196 | } 197 | 198 | authProvider := auth.NewDefaultNacosAuthProvider(r.Client) 199 | nClient, err := naming.GetNacosNamingClientBuilder().BuildNamingClient(authProvider, sd) 200 | 201 | if err != nil { 202 | return ctrl.Result{}, err 203 | } 204 | if err := r.syncEndponits(ctx, endpointsList, nClient, sd); err != nil { 205 | return ctrl.Result{}, err 206 | } 207 | 208 | return ctrl.Result{}, nil 209 | } 210 | 211 | // 执行清理操作,删除nacos服务中被nacos controller管理的服务; 212 | func (r *ServiceDiscoveryReconciler) doFinalization(ctx context.Context, sd *nacosiov1.ServiceDiscovery) error { 213 | fmt.Println("ServiceDiscovery Reconcile, doFinalization start") 214 | authProvider := auth.NewDefaultNacosAuthProvider(r.Client) 215 | nClient, err := naming.GetNacosNamingClientBuilder().BuildNamingClient(authProvider, sd) 216 | 217 | if err != nil { 218 | return err 219 | } 220 | 221 | var succ atomic.Bool 222 | succ.Store(true) 223 | 224 | for _, svcStatus := range sd.Status.SyncStatuses { 225 | if !svcStatus.Ready { 226 | continue 227 | } 228 | 229 | svcName := svcStatus.ServiceName 230 | if strings.Contains(svcName, "@@") { 231 | svcName = strings.Split(svcName, "@@")[0] 232 | } 233 | 234 | groupName := svcStatus.GroupName 235 | 236 | if groupName == "" { 237 | groupName = naming.NamingDefaultGroupName 238 | } 239 | 240 | log.Log.Info("Sync instance to nacos1", "service", svcName) 241 | if !nClient.UnregisterService(naming.ServiceInfo{ 242 | ServiceKey: naming.ServiceKey{ 243 | Group: groupName, 244 | ServiceName: svcName, 245 | }, 246 | }) { 247 | return errors.New("unregister service fail, serviceName: " + svcStatus.ServiceName) 248 | } else { 249 | log.Log.Info("Sync instance to nacos2", "service", svcStatus.ServiceName) 250 | } 251 | } 252 | 253 | if err := r.removeFinalizer(ctx, sd); err != nil { 254 | return err 255 | } 256 | 257 | return nil 258 | } 259 | 260 | func (r *ServiceDiscoveryReconciler) ensureFinalizer(ctx context.Context, obj client.Object) error { 261 | if !nacos.StringSliceContains(obj.GetFinalizers(), "nacos.io/sd-finalizer") { 262 | obj.SetFinalizers(append(obj.GetFinalizers(), "nacos.io/sd-finalizer")) 263 | if err := r.Client.Update(ctx, obj); err != nil { 264 | return err 265 | } 266 | } 267 | 268 | return nil 269 | } 270 | func (r *ServiceDiscoveryReconciler) removeFinalizer(ctx context.Context, obj client.Object) error { 271 | if nacos.StringSliceContains(obj.GetFinalizers(), "nacos.io/sd-finalizer") { 272 | finalizers := obj.GetFinalizers() 273 | for i, v := range finalizers { 274 | if v == "nacos.io/sd-finalizer" { 275 | finalizers = append(finalizers[:i], finalizers[i+1:]...) 276 | } 277 | } 278 | 279 | obj.SetFinalizers(finalizers) 280 | if err := r.Client.Update(ctx, obj); err != nil { 281 | return err 282 | } 283 | } 284 | 285 | return nil 286 | } 287 | 288 | // SetupWithManager sets up the controller with the Manager. 289 | func (r *ServiceDiscoveryReconciler) SetupWithManager(mgr ctrl.Manager) error { 290 | return ctrl.NewControllerManagedBy(mgr). 291 | For(&nacosiov1.ServiceDiscovery{}). 292 | Complete(r) 293 | } 294 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Nacos Controller 2 | 3 | This project includes a series of Kubernetes custom resources (CustomResourceDefinition) and their related controller implementations. 4 | The current version defines CRDs as follows: 5 | 6 | - DynamicConfiguration: Synchronization bridge between Nacos configuration and Kubernetes configuration. 7 | 8 | [中文文档](./README_CN.md) 9 | 10 | # Quick Start 11 | ## Deploy Nacos Controller 12 | 1. Install helm,see [document](https://helm.sh/docs/intro/install/) 13 | 2. Install Nacos Controller 14 | ```bash 15 | git clone https://github.com/nacos-group/nacos-controller.git 16 | cd nacos-controller/charts/nacos-controller 17 | 18 | export KUBECONFIG=/path/to/your/kubeconfig/file 19 | kubectl create ns nacos 20 | helm install -n nacos nacos-controller . 21 | ``` 22 | 23 | ## Configuration Synchronization Between Nacos and Kubernetes Clusters 24 | ### Quick start 25 | By using the following command, you can quickly synchronize all configurations of the current namespace in the Kubernetes cluster to Nacos. 26 | ```bash 27 | cd nacos-controller 28 | chmod +x configQuicStart.sh 29 | ./configQuicStart.sh 30 | ``` 31 | In addition, you can write your own DynamicConfiguration YAML file according to your needs and deploy it to the K8s cluster. 32 | 33 | Nacos Controller 2.0 supports bidirectional synchronization between Kubernetes cluster configurations and Nacos configurations. It can synchronize ConfigMaps and Secrets from specific Kubernetes namespaces to specified Nacos namespaces. Users can dynamically modify and manage Kubernetes cluster configurations through Nacos. The mapping relationship is as follows: 34 | 35 | | ConfigMap/Secret | Nacos Config | 36 | |------------------|-----------------| 37 | | Namespace | User-specified namespace | 38 | | Name | Group | 39 | | Key | DataId | 40 | | Value | Content | 41 | 42 | Currently supported synchronization strategies: 43 | - Full synchronization: Automatically synchronizes all ConfigMaps and Secrets from specific Kubernetes namespaces to Nacos. Nacos Controller will auto-sync newly created ConfigMaps/Secrets 44 | - Partial synchronization: Only synchronizes user-specified ConfigMaps and Secrets to Nacos 45 | 46 | ### Full Synchronization from K8s Namespace to Nacos 47 | Create DynamicConfiguration YAML: 48 | ```yaml 49 | apiVersion: nacos.io/v1 50 | kind: DynamicConfiguration 51 | metadata: 52 | name: dc-demo 53 | spec: 54 | nacosServer: 55 | # endpoint: the address server of nacos server, conflict with serverAddr field, and higher priority than serverAddr field 56 | endpoint: 57 | # serverAddr: the address of nacos server, conflict with endpoint field 58 | serverAddr: 59 | # namespace: Target Nacos namespace 60 | namespace: 61 | # authRef: Reference to the Secret that stores the Nacos client authentication information, supporting both username/password and Access Key/Secret Key. If the Nacos server does not have authentication enabled, this can be ignored. 62 | authRef: 63 | apiVersion: v1 64 | kind: Secret 65 | name: nacos-auth 66 | strategy: 67 | # scope: Synchronization strategy, where "full" indicates full synchronization and "partial" indicates partial synchronization. 68 | scope: full 69 | # Whether to synchronize configuration deletion operations 70 | syncDeletion: true 71 | # conflictPolicy: Synchronization conflict policy. "preferCluster" prioritizes Kubernetes cluster configuration, while "preferServer" prioritizes Nacos configuration. 72 | conflictPolicy: preferCluster 73 | --- 74 | apiVersion: v1 75 | kind: Secret 76 | metadata: 77 | name: nacos-auth 78 | data: 79 | accessKey: 80 | secretKey: 81 | username: 82 | password: 83 | ``` 84 | Run the command to deploy DynamicConfiguration to the namespace of the Kubernetes cluster that requires full synchronization: 85 | ```bash 86 | kubectl apply -f dc-demo.yaml -n 87 | ``` 88 | and the full synchronization of configurations will be achieved. 89 | 90 | ### Partial Synchronization from K8s Namespace to Nacos 91 | Create a DynamicConfiguration YAML file. The main difference from full synchronization lies in the strategy section, and you need to specify the ConfigMap and Secret that require synchronization: 92 | ```yaml 93 | apiVersion: nacos.io/v1 94 | kind: DynamicConfiguration 95 | metadata: 96 | name: dc-demo 97 | spec: 98 | nacosServer: 99 | # endpoint: the address server of nacos server, conflict with serverAddr field, and higher priority than serverAddr field 100 | endpoint: 101 | # serverAddr: the address of nacos server, conflict with endpoint field 102 | serverAddr: 103 | # namespace: Target Nacos namespace 104 | namespace: 105 | # authRef: Reference to the Secret that stores the Nacos client authentication information, supporting both username/password and Access Key/Secret Key. If the Nacos server does not have authentication enabled, this can be ignored. 106 | authRef: 107 | apiVersion: v1 108 | kind: Secret 109 | name: nacos-auth 110 | strategy: 111 | # scope: Synchronization strategy, where "full" indicates full synchronization and "partial" indicates partial synchronization. 112 | scope: partial 113 | # Whether to synchronize configuration deletion operations 114 | syncDeletion: true 115 | # conflictPolicy: Synchronization conflict policy. "preferCluster" prioritizes Kubernetes cluster configuration, while "preferServer" prioritizes Nacos configuration. 116 | conflictPolicy: preferCluster 117 | # The ConfigMap and Secret that need to be synchronized 118 | objectRefs: 119 | - apiVersion: v1 120 | kind: ConfigMap 121 | name: nacos-config-cm 122 | - apiVersion: v1 123 | kind: Secret 124 | name: nacos-config-secret 125 | --- 126 | apiVersion: v1 127 | kind: Secret 128 | metadata: 129 | name: nacos-auth 130 | data: 131 | accessKey: 132 | secretKey: 133 | username: 134 | password: 135 | ``` 136 | Run the command to deploy DynamicConfiguration to the namespace of the Kubernetes cluster that requires full synchronization: 137 | ```bash 138 | kubectl apply -f dc-demo.yaml -n 139 | ``` 140 | and the partial synchronization of configurations will be achieved. 141 | 142 | ### NacosServer Configuration 143 | - endpoint: the address server of nacos server, conflict with serverAddr field, and higher priority than serverAddr field 144 | - serverAddr: the address of nacos server, conflict with endpoint field 145 | - namespace: the namespace id of nacos server 146 | - group: the group of nacos server 147 | - authRef: a reference of Object, which contains ak/sk of nacos server, currently only Secret is supported 148 | 149 | ```yaml 150 | nacosServer: 151 | endpoint: 152 | serverAddr: 153 | namespace: 154 | group: 155 | authRef: 156 | apiVersion: v1 157 | kind: Secret 158 | name: nacos-auth 159 | ``` 160 | 161 | ## Nacos and Kubernetes Cluster Service Synchronization 162 | Nacos Controller 2.0 supports synchronizing Kubernetes cluster services to Nacos, allowing services under specific namespaces in a Kubernetes cluster to be synced to a designated namespace in Nacos. Users can leverage Nacos to achieve service discovery for Kubernetes services. The mapping relationship between Nacos services and Kubernetes services is as follows: 163 | 164 | | Kubernetes Service | Nacos Service | 165 | |------------------|-----------------| 166 | | Namespace | User-specified namespace | 167 | | Name | serviceName | 168 | | Endpoint | instance | 169 | 170 | Currently, two synchronization strategies are primarily supported: 171 | - Full Sync: Automatically synchronizes all Services under a specific namespace in the Kubernetes cluster to Nacos 172 | - Partial Sync: Synchronizes only user-specified Services to Nacos. 173 | ### Full Synchronization of Kubernetes Services to Nacos 174 | Create a ServiceDiscovery YAML file: 175 | ```yaml 176 | apiVersion: nacos.io/v1 177 | kind: ServiceDiscovery 178 | metadata: 179 | name: sd-demo 180 | spec: 181 | nacosServer: 182 | # serverAddr: Nacos server address 183 | serverAddr: 184 | # namespace: User-specified namespace in Nacos 185 | namespace: 186 | # authRef: Secret containing Nacos client authentication credentials (supports username/password or AK/SK; omit if Nacos server authentication is disabled) 187 | authRef: 188 | apiVersion: v1 189 | kind: Secret 190 | name: nacos-auth 191 | --- 192 | apiVersion: v1 193 | kind: Secret 194 | metadata: 195 | name: nacos-auth 196 | data: 197 | accessKey: 198 | secretKey: 199 | username: 200 | password: 201 | ``` 202 | Deploy the ServiceDiscovery to the target Kubernetes namespace: 203 | ```bash 204 | kubectl apply -f sd-demo.yaml -n 205 | ``` 206 | 207 | ### Partial Synchronization of Kubernetes Services to Nacos 208 | Create a ServiceDiscovery YAML file (the only difference from full sync is specifying the Services to sync): 209 | ```yaml 210 | apiVersion: nacos.io/v1 211 | kind: ServiceDiscovery 212 | metadata: 213 | name: sd-demo 214 | spec: 215 | nacosServer: 216 | # serverAddr: Nacos server address 217 | serverAddr: 218 | # namespace: User-specified namespace in Nacos 219 | namespace: 220 | # authRef: Secret containing Nacos client authentication credentials (supports username/password or AK/SK; omit if Nacos server authentication is disabled) 221 | authRef: 222 | apiVersion: v1 223 | kind: Secret 224 | name: nacos-auth 225 | # List of Services to sync 226 | services: [svc1,svc2] 227 | --- 228 | apiVersion: v1 229 | kind: Secret 230 | metadata: 231 | name: nacos-auth 232 | data: 233 | accessKey: 234 | secretKey: 235 | username: 236 | password: 237 | ``` 238 | Deploy the ServiceDiscovery to the target Kubernetes namespace: 239 | ```bash 240 | kubectl apply -f sd-demo.yaml -n 241 | ``` 242 | 243 | ## Contributors 244 | Special thanks to the following individuals/teams for their contributions to this project: 245 | 246 | - Alibaba Cloud [EDAS](https://www.aliyun.com/product/edas) team (project incubation source) 247 | - Alibaba Cloud [MSE](https://www.aliyun.com/product/aliware/mse) team -------------------------------------------------------------------------------- /config/crd/bases/nacos.io_dynamicconfigurations.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | controller-gen.kubebuilder.io/version: v0.16.5 7 | name: dynamicconfigurations.nacos.io 8 | spec: 9 | group: nacos.io 10 | names: 11 | kind: DynamicConfiguration 12 | listKind: DynamicConfigurationList 13 | plural: dynamicconfigurations 14 | shortNames: 15 | - dc 16 | singular: dynamicconfiguration 17 | scope: Namespaced 18 | versions: 19 | - additionalPrinterColumns: 20 | - jsonPath: .status.phase 21 | name: Phase 22 | type: string 23 | - jsonPath: .metadata.creationTimestamp 24 | name: Age 25 | type: date 26 | name: v1 27 | schema: 28 | openAPIV3Schema: 29 | description: DynamicConfiguration is the Schema for the dynamicconfigurations 30 | API 31 | properties: 32 | apiVersion: 33 | description: |- 34 | APIVersion defines the versioned schema of this representation of an object. 35 | Servers should convert recognized schemas to the latest internal value, and 36 | may reject unrecognized values. 37 | More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources 38 | type: string 39 | kind: 40 | description: |- 41 | Kind is a string value representing the REST resource this object represents. 42 | Servers may infer this from the endpoint the client submits requests to. 43 | Cannot be updated. 44 | In CamelCase. 45 | More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds 46 | type: string 47 | metadata: 48 | type: object 49 | spec: 50 | description: DynamicConfigurationSpec defines the desired state of DynamicConfiguration 51 | properties: 52 | additionalConf: 53 | description: |- 54 | INSERT ADDITIONAL SPEC FIELDS - desired state of cluster 55 | Important: Run "make" to regenerate code after modifying this file 56 | properties: 57 | labels: 58 | additionalProperties: 59 | type: string 60 | type: object 61 | properties: 62 | additionalProperties: 63 | type: string 64 | type: object 65 | tags: 66 | additionalProperties: 67 | type: string 68 | type: object 69 | type: object 70 | nacosServer: 71 | properties: 72 | authRef: 73 | description: ObjectReference contains enough information to let 74 | you inspect or modify the referred object. 75 | properties: 76 | apiVersion: 77 | description: API version of the referent. 78 | type: string 79 | fieldPath: 80 | description: |- 81 | If referring to a piece of an object instead of an entire object, this string 82 | should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. 83 | For example, if the object reference is to a container within a pod, this would take on a value like: 84 | "spec.containers{name}" (where "name" refers to the name of the container that triggered 85 | the event) or if no container name is specified "spec.containers[2]" (container with 86 | index 2 in this pod). This syntax is chosen only to have some well-defined way of 87 | referencing a part of an object. 88 | type: string 89 | kind: 90 | description: |- 91 | Kind of the referent. 92 | More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds 93 | type: string 94 | name: 95 | description: |- 96 | Name of the referent. 97 | More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names 98 | type: string 99 | namespace: 100 | description: |- 101 | Namespace of the referent. 102 | More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ 103 | type: string 104 | resourceVersion: 105 | description: |- 106 | Specific resourceVersion to which this reference is made, if any. 107 | More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency 108 | type: string 109 | uid: 110 | description: |- 111 | UID of the referent. 112 | More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids 113 | type: string 114 | type: object 115 | x-kubernetes-map-type: atomic 116 | endpoint: 117 | type: string 118 | namespace: 119 | type: string 120 | serverAddr: 121 | type: string 122 | type: object 123 | objectRefs: 124 | items: 125 | description: ObjectReference contains enough information to let 126 | you inspect or modify the referred object. 127 | properties: 128 | apiVersion: 129 | description: API version of the referent. 130 | type: string 131 | fieldPath: 132 | description: |- 133 | If referring to a piece of an object instead of an entire object, this string 134 | should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. 135 | For example, if the object reference is to a container within a pod, this would take on a value like: 136 | "spec.containers{name}" (where "name" refers to the name of the container that triggered 137 | the event) or if no container name is specified "spec.containers[2]" (container with 138 | index 2 in this pod). This syntax is chosen only to have some well-defined way of 139 | referencing a part of an object. 140 | type: string 141 | kind: 142 | description: |- 143 | Kind of the referent. 144 | More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds 145 | type: string 146 | name: 147 | description: |- 148 | Name of the referent. 149 | More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names 150 | type: string 151 | namespace: 152 | description: |- 153 | Namespace of the referent. 154 | More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ 155 | type: string 156 | resourceVersion: 157 | description: |- 158 | Specific resourceVersion to which this reference is made, if any. 159 | More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency 160 | type: string 161 | uid: 162 | description: |- 163 | UID of the referent. 164 | More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids 165 | type: string 166 | type: object 167 | x-kubernetes-map-type: atomic 168 | type: array 169 | strategy: 170 | properties: 171 | conflictPolicy: 172 | type: string 173 | scope: 174 | type: string 175 | syncDeletion: 176 | default: false 177 | type: boolean 178 | type: object 179 | type: object 180 | status: 181 | description: DynamicConfigurationStatus defines the observed state of 182 | DynamicConfiguration 183 | properties: 184 | listenConfigs: 185 | additionalProperties: 186 | items: 187 | type: string 188 | type: array 189 | type: object 190 | message: 191 | type: string 192 | nacosServerStatus: 193 | properties: 194 | endpoint: 195 | type: string 196 | namespace: 197 | type: string 198 | serverAddr: 199 | type: string 200 | type: object 201 | observedGeneration: 202 | format: int64 203 | type: integer 204 | phase: 205 | description: |- 206 | INSERT ADDITIONAL STATUS FIELD - define observed state of cluster 207 | Important: Run "make" to regenerate code after modifying this file 208 | type: string 209 | syncStatuses: 210 | additionalProperties: 211 | items: 212 | properties: 213 | dataId: 214 | type: string 215 | lastSyncFrom: 216 | type: string 217 | lastSyncTime: 218 | format: date-time 219 | type: string 220 | md5: 221 | type: string 222 | message: 223 | type: string 224 | ready: 225 | type: boolean 226 | type: object 227 | type: array 228 | type: object 229 | syncStrategyStatus: 230 | properties: 231 | conflictPolicy: 232 | type: string 233 | scope: 234 | type: string 235 | syncDeletion: 236 | default: false 237 | type: boolean 238 | type: object 239 | type: object 240 | type: object 241 | served: true 242 | storage: true 243 | subresources: 244 | status: {} 245 | -------------------------------------------------------------------------------- /pkg/nacos/naming/naming_client.go: -------------------------------------------------------------------------------- 1 | package naming 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "os" 7 | "path" 8 | "sigs.k8s.io/controller-runtime/pkg/log" 9 | "strconv" 10 | "strings" 11 | "sync" 12 | 13 | nacosiov1 "github.com/nacos-group/nacos-controller/api/v1" 14 | "github.com/nacos-group/nacos-controller/pkg" 15 | "github.com/nacos-group/nacos-controller/pkg/nacos/auth" 16 | "github.com/nacos-group/nacos-controller/pkg/nacos/client" 17 | "github.com/nacos-group/nacos-sdk-go/v2/clients" 18 | "github.com/nacos-group/nacos-sdk-go/v2/clients/naming_client" 19 | "github.com/nacos-group/nacos-sdk-go/v2/common/constant" 20 | "github.com/nacos-group/nacos-sdk-go/v2/common/logger" 21 | "github.com/nacos-group/nacos-sdk-go/v2/vo" 22 | ) 23 | 24 | type NacosNamingClientBuilder struct { 25 | cache sync.Map 26 | } 27 | 28 | var builder = NacosNamingClientBuilder{ 29 | cache: sync.Map{}, 30 | } 31 | 32 | func GetNacosNamingClientBuilder() *NacosNamingClientBuilder { 33 | return &builder 34 | } 35 | func (m *NacosNamingClientBuilder) BuildNamingClient(authProvider auth.NacosAuthProvider, sd *nacosiov1.ServiceDiscovery) (*NacosNamingClient, error) { 36 | if sd == nil { 37 | return nil, fmt.Errorf("empty ServiceDiscovery") 38 | } 39 | 40 | bytes, _ := json.Marshal(sd) 41 | fmt.Println("BuildNamingClient, sd:", string(bytes)) 42 | bytes, _ = json.Marshal(authProvider) 43 | fmt.Println("BuildNamingClient, auth:", string(bytes)) 44 | 45 | nacosServer := sd.Spec.NacosServer 46 | // 简化判空逻辑,cacheKey仅内部使用 47 | cacheKey := fmt.Sprintf("%s-%s", nacosServer.ServerAddr, nacosServer.Namespace) 48 | cachedClient, ok := m.cache.Load(cacheKey) 49 | if ok && cachedClient != nil { 50 | return cachedClient.(*NacosNamingClient), nil 51 | } 52 | //clientParams, err := authProvider.GetNacosNamingClientParams(sd) 53 | nacosServerParam := client.NacosServerParam{ 54 | Endpoint: nacosServer.Endpoint, 55 | Namespace: nacosServer.Namespace, 56 | ServerAddr: nacosServer.ServerAddr, 57 | } 58 | clientParams, err := authProvider.GetNacosClientParams(sd.Spec.NacosServer.AuthRef, nacosServerParam, sd.Namespace) 59 | if err != nil { 60 | return nil, err 61 | } 62 | var sc []constant.ServerConfig 63 | clientOpts := []constant.ClientOption{ 64 | constant.WithAccessKey(clientParams.AuthInfo.AccessKey), 65 | constant.WithSecretKey(clientParams.AuthInfo.SecretKey), 66 | constant.WithPassword(clientParams.AuthInfo.Password), 67 | constant.WithPassword(clientParams.AuthInfo.Username), 68 | constant.WithTimeoutMs(5000), 69 | constant.WithNotLoadCacheAtStart(true), 70 | constant.WithLogDir("/tmp/nacos/log"), 71 | constant.WithCacheDir("/tmp/nacos/cache"), 72 | constant.WithLogLevel("debug"), 73 | constant.WithNamespaceId(clientParams.Namespace), 74 | constant.WithAppName("nacos-controller"), 75 | constant.WithAppConnLabels(map[string]string{"k8s.namespace": sd.Namespace, 76 | "k8s.cluster": pkg.CurrentContext, 77 | "k8s.name": sd.Name}), 78 | } 79 | if len(clientParams.Endpoint) > 0 { 80 | clientOpts = append(clientOpts, constant.WithEndpoint(clientParams.Endpoint)) 81 | } else if len(clientParams.ServerAddr) > 0 { 82 | port := 8848 83 | ip := clientParams.ServerAddr 84 | if strings.Contains(ip, ":") { 85 | split := strings.Split(ip, ":") 86 | ip = split[0] 87 | if v, err := strconv.Atoi(split[1]); err != nil { 88 | return nil, fmt.Errorf("invalid ServerAddr: %s", clientParams.ServerAddr) 89 | } else { 90 | port = v 91 | } 92 | 93 | } 94 | sc = []constant.ServerConfig{ 95 | *constant.NewServerConfig(ip, uint64(port)), 96 | } 97 | } 98 | cc := *constant.NewClientConfig(clientOpts...) 99 | nClient, err := clients.NewNamingClient( 100 | vo.NacosClientParam{ 101 | ClientConfig: &cc, 102 | ServerConfigs: sc, 103 | }) 104 | if err != nil { 105 | return nil, err 106 | } 107 | 108 | serverAddrs := []string{clientParams.ServerAddr} 109 | nClient1 := &NacosNamingClient{ 110 | client: nClient, 111 | httpSdk: NewNacosHttpSdk(serverAddrs, clientParams.Namespace, clientParams.AuthInfo.AccessKey, clientParams.AuthInfo.SecretKey), 112 | } 113 | 114 | m.cache.Store(cacheKey, nClient1) 115 | return nClient1, nil 116 | } 117 | 118 | func ConvertToNacosClientParam(options NacosOptions) vo.NacosClientParam { 119 | clientConfig := constant.ClientConfig{ 120 | NamespaceId: options.Namespace, 121 | NotLoadCacheAtStart: true, 122 | LogDir: path.Join(os.Getenv("HOME"), "logs", "nacos-go-sdk"), 123 | } 124 | 125 | var serversConfig []constant.ServerConfig 126 | for _, ip := range options.ServersIP { 127 | serversConfig = append(serversConfig, constant.ServerConfig{ 128 | IpAddr: ip, 129 | Port: options.ServerPort, 130 | }) 131 | } 132 | 133 | return vo.NacosClientParam{ 134 | ClientConfig: &clientConfig, 135 | ServerConfigs: serversConfig, 136 | } 137 | } 138 | 139 | type ServiceKey struct { 140 | ServiceName string 141 | 142 | Group string 143 | } 144 | 145 | func (s *ServiceKey) String() string { 146 | return fmt.Sprintf("%s@@%s", s.ServiceName, s.Group) 147 | } 148 | 149 | type ServiceInfo struct { 150 | ServiceKey 151 | 152 | Port uint64 153 | 154 | Metadata map[string]string 155 | } 156 | 157 | type Addresses struct { 158 | IPAddresses []Address 159 | } 160 | type NamingService interface { 161 | RegisterService(ServiceInfo, []Address) 162 | 163 | UnregisterService(ServiceInfo) 164 | 165 | RegisterServiceInstances(serviceInfo ServiceInfo, addresses []Address) 166 | 167 | UnregisterServiceInstances(serviceInfo ServiceInfo, addresses []Address) 168 | 169 | UpdateServiceHealthCheckType(key ServiceKey) bool 170 | } 171 | 172 | type NacosNamingClient struct { 173 | client naming_client.INamingClient 174 | servicesMap sync.Map 175 | httpSdk *NacosHttpSdk 176 | } 177 | 178 | func NewNamingClient(options NacosOptions) (*NacosNamingClient, error) { 179 | nacosConfig := ConvertToNacosClientParam(options) 180 | client, err := clients.NewNamingClient(nacosConfig) 181 | if err != nil { 182 | return nil, err 183 | } 184 | 185 | return &NacosNamingClient{ 186 | client: client, 187 | httpSdk: NewNacosHttpSdk(options.ServersIP, options.Namespace, options.AccessKey, options.SecretKey), 188 | }, nil 189 | } 190 | func (c *NacosNamingClient) UpdateServiceHealthCheckType(key ServiceKey) bool { 191 | return c.httpSdk.UpdateServiceHealthCheckTypeToNone(key) 192 | } 193 | func (c *NacosNamingClient) RegisterService(serviceInfo ServiceInfo, addresses []Address) { 194 | if c.UpdateServiceHealthCheckType(serviceInfo.ServiceKey) { 195 | old, exist := c.servicesMap.Load(serviceInfo.ServiceKey) 196 | if !exist { 197 | logger.Infof("Register service (%s@@%s), added %d, deleted %d.", 198 | serviceInfo.ServiceName, serviceInfo.Group, len(addresses), 0) 199 | } 200 | 201 | added, deleted := diffAddresses(old.([]Address), addresses) 202 | logger.Infof("Register service (%s@@%s), added %d, deleted %d.", 203 | serviceInfo.ServiceName, serviceInfo.Group, len(added), len(deleted)) 204 | 205 | c.RegisterServiceInstances(serviceInfo, added) 206 | c.UnregisterServiceInstances(serviceInfo, deleted) 207 | 208 | c.servicesMap.Store(serviceInfo.ServiceKey, addresses) 209 | } else { 210 | logger.Warnf("Register service fail, service (%s@@%s) is not registered.", serviceInfo.ServiceName, serviceInfo.Group) 211 | } 212 | } 213 | 214 | func (c *NacosNamingClient) GetAllInstances(serviceInfo ServiceInfo) ([]Address, error) { 215 | return c.httpSdk.GetAllInstances(vo.SelectAllInstancesParam{ 216 | ServiceName: serviceInfo.ServiceName, 217 | GroupName: serviceInfo.Group, 218 | }) 219 | } 220 | 221 | func (c *NacosNamingClient) UnregisterService(serviceInfo ServiceInfo) bool { 222 | logger.Infof("Unregister service (%s@@%s).", serviceInfo.ServiceName, serviceInfo.Group) 223 | old, err := c.httpSdk.GetAllInstances(vo.SelectAllInstancesParam{ 224 | ServiceName: serviceInfo.ServiceName, 225 | GroupName: serviceInfo.Group, 226 | }) 227 | 228 | if err != nil { 229 | logger.Errorf("Select all instances fail, service (%s@@%s).", serviceInfo.ServiceName, serviceInfo.Group) 230 | return false 231 | } 232 | 233 | return c.UnregisterServiceInstances(serviceInfo, old) 234 | } 235 | 236 | func (c *NacosNamingClient) RegisterServiceInstances(serviceInfo ServiceInfo, addresses []Address) bool { 237 | if !c.UpdateServiceHealthCheckType(serviceInfo.ServiceKey) { 238 | log.Log.Info(fmt.Sprintf("Update service health check type fail, service (%s@@%s) is not registered.", serviceInfo.ServiceName, serviceInfo.Group)) 239 | return false 240 | } 241 | 242 | oldAddresses, err := c.GetAllInstances(serviceInfo) 243 | log.Log.Info("RegisterServiceInstances, old: " + strconv.Itoa(len(oldAddresses))) 244 | 245 | if err != nil { 246 | log.Log.Error(err, fmt.Sprintf("Select all instances fail, service (%s@@%s).", serviceInfo.ServiceName, serviceInfo.Group)) 247 | return false 248 | } 249 | 250 | added, deleted := diffAddresses(oldAddresses, addresses) 251 | 252 | if len(added) > 0 || len(deleted) > 0 { 253 | log.Log.Info(fmt.Sprintf("Register service (%s@@%s), added %d, deleted %d.", 254 | serviceInfo.ServiceName, serviceInfo.Group, len(added), len(deleted))) 255 | } 256 | 257 | for _, address := range added { 258 | if _, err := c.client.RegisterInstance(vo.RegisterInstanceParam{ 259 | Ip: address.IP, 260 | Port: address.Port, 261 | Weight: DefaultNacosEndpointWeight, 262 | Enable: true, 263 | Healthy: true, 264 | Metadata: serviceInfo.Metadata, 265 | ServiceName: serviceInfo.ServiceName, 266 | GroupName: serviceInfo.Group, 267 | Ephemeral: false, 268 | }); err != nil { 269 | log.Log.Error(err, fmt.Sprintf("Register instance (%s:%d) with service (%s@@%s) fail", 270 | address.IP, address.Port, serviceInfo.ServiceName, serviceInfo.Group)) 271 | return false 272 | } 273 | } 274 | 275 | for _, address := range deleted { 276 | if _, err := c.client.DeregisterInstance(vo.DeregisterInstanceParam{ 277 | Ip: address.IP, 278 | Port: address.Port, 279 | ServiceName: serviceInfo.ServiceName, 280 | GroupName: serviceInfo.Group, 281 | Ephemeral: false, 282 | }); err != nil { 283 | log.Log.Error(err, fmt.Sprintf("Unregister instance (%s:%d) with service (%s@@%s) fail", 284 | address.IP, address.Port, serviceInfo.ServiceName, serviceInfo.ServiceKey.Group)) 285 | return false 286 | } 287 | } 288 | 289 | return true 290 | } 291 | 292 | func (c *NacosNamingClient) UnregisterServiceInstances(serviceInfo ServiceInfo, addresses []Address) bool { 293 | for _, address := range addresses { 294 | if _, err := c.client.DeregisterInstance(vo.DeregisterInstanceParam{ 295 | Ip: address.IP, 296 | Port: address.Port, 297 | ServiceName: serviceInfo.ServiceName, 298 | GroupName: serviceInfo.Group, 299 | Ephemeral: false, 300 | }); err != nil { 301 | logger.Errorf("Unregister instance (%s:%d) with service (%s@@%s) fail, err %v.", 302 | address.IP, address.Port, serviceInfo.ServiceName, serviceInfo.Group, err) 303 | return false 304 | } 305 | } 306 | return true 307 | } 308 | 309 | func diffAddresses(old, curr []Address) ([]Address, []Address) { 310 | bs, _ := json.Marshal(old) 311 | bsNew, _ := json.Marshal(curr) 312 | log.Log.Info("diffAddresses, old: " + string(bs) + ", curr: " + string(bsNew)) 313 | 314 | var added, deleted []Address 315 | oldAddressesSet := make(map[Address]struct{}, len(old)) 316 | newAddressesSet := make(map[Address]struct{}, len(curr)) 317 | 318 | for _, s := range old { 319 | oldAddressesSet[s] = struct{}{} 320 | } 321 | for _, s := range curr { 322 | newAddressesSet[s] = struct{}{} 323 | } 324 | 325 | for oldAddress := range oldAddressesSet { 326 | if _, exist := newAddressesSet[oldAddress]; !exist { 327 | deleted = append(deleted, oldAddress) 328 | } 329 | } 330 | 331 | for newAddress := range newAddressesSet { 332 | if _, exist := oldAddressesSet[newAddress]; !exist { 333 | added = append(added, newAddress) 334 | } 335 | } 336 | 337 | return added, deleted 338 | } 339 | --------------------------------------------------------------------------------