├── config ├── manager │ ├── aws.properties │ ├── controller_manager_config.yaml │ ├── kustomization.yaml │ └── manager.yaml ├── prometheus │ ├── kustomization.yaml │ └── monitor.yaml ├── certmanager │ ├── kustomization.yaml │ ├── kustomizeconfig.yaml │ └── certificate.yaml ├── webhook │ ├── kustomization.yaml │ ├── service.yaml │ └── kustomizeconfig.yaml ├── rbac │ ├── service_account.yaml │ ├── auth_proxy_client_clusterrole.yaml │ ├── role_binding.yaml │ ├── auth_proxy_role_binding.yaml │ ├── auth_proxy_service.yaml │ ├── leader_election_role_binding.yaml │ ├── auth_proxy_role.yaml │ ├── serviceexport_viewer_role.yaml │ ├── serviceimport_viewer_role.yaml │ ├── serviceexport_editor_role.yaml │ ├── serviceimport_editor_role.yaml │ ├── leader_election_role.yaml │ ├── kustomization.yaml │ └── role.yaml ├── controller_install_latest │ └── kustomization.yaml ├── controller_install_release │ └── kustomization.yaml ├── crd │ ├── patches │ │ ├── cainjection_in_clusterproperties.yaml │ │ ├── cainjection_in_serviceexports.yaml │ │ ├── cainjection_in_serviceimports.yaml │ │ ├── annotation_for_clusterproperties.yaml │ │ ├── webhook_in_serviceexports.yaml │ │ ├── webhook_in_serviceimports.yaml │ │ └── webhook_in_clusterproperties.yaml │ ├── kustomizeconfig.yaml │ ├── kustomization.yaml │ └── bases │ │ ├── about.k8s.io_clusterproperties.yaml │ │ ├── multicluster.x-k8s.io_serviceexports.yaml │ │ └── multicluster.x-k8s.io_serviceimports.yaml └── default │ ├── manager_config_patch.yaml │ ├── manager_webhook_patch.yaml │ ├── webhookcainjection_patch.yaml │ ├── manager_auth_proxy_patch.yaml │ └── kustomization.yaml ├── NOTICE ├── docs ├── architecture-overview.png └── images │ ├── solution-baseline.png │ ├── solution-overview.png │ ├── service-consumption.png │ ├── service-provisioning.png │ └── cloudmap.svg ├── samples ├── nginx-serviceexport.yaml ├── example-serviceexport.yaml ├── nginx-service.yaml ├── example-service.yaml ├── example-headless.yaml ├── client-hello.yaml ├── mcsapi-clusterproperty.yaml ├── eksctl-cluster.yaml ├── nginx-deployment.yaml ├── example-deployment.yaml ├── example-clusterproperty.yaml ├── coredns-configmap.yaml ├── coredns-clusterrole.yaml └── coredns-deployment.yaml ├── integration ├── kind-test │ ├── configs │ │ ├── ipv6.yaml │ │ ├── e2e-clusterproperty.yaml │ │ ├── dnsutils-pod.yaml │ │ ├── e2e-clusterset-ip-service.yaml │ │ ├── e2e-headless-service.yaml │ │ ├── e2e-deployment.yaml │ │ └── coredns-deployment.yaml │ └── scripts │ │ ├── cleanup-kind.sh │ │ ├── ensure-jq.sh │ │ ├── curl-test.sh │ │ ├── common.sh │ │ ├── run-helper.sh │ │ ├── setup-kind.sh │ │ ├── dns-test.sh │ │ └── run-tests.sh ├── eks-test │ ├── configs │ │ ├── nginx-serviceexport.yaml │ │ ├── nginx-service.yaml │ │ ├── client-hello.yaml │ │ ├── e2e-clusterproperty-1.yaml │ │ ├── e2e-clusterproperty-2.yaml │ │ ├── eksctl-cluster.yaml │ │ ├── nginx-deployment.yaml │ │ └── coredns-deployment.yaml │ └── scripts │ │ ├── eks-setup-helper.sh │ │ ├── eks-common.sh │ │ ├── eks-setup.sh │ │ ├── eks-DNS-test.sh │ │ ├── eks-cleanup.sh │ │ └── eks-run-tests.sh ├── shared │ ├── scripts │ │ ├── cleanup-cloudmap.sh │ │ ├── poll-endpoints.sh │ │ └── test-import.sh │ ├── configs │ │ ├── coredns-configmap.yaml │ │ └── coredns-clusterrole.yaml │ └── scenarios │ │ ├── runner │ │ └── main.go │ │ └── export_service.go └── janitor │ ├── runner │ └── main.go │ ├── aws_facade.go │ ├── api.go │ ├── api_test.go │ ├── janitor_test.go │ └── janitor.go ├── .dockerignore ├── .gitignore ├── mkdocs.yml ├── .github ├── workflows │ ├── mkdocs.yml │ ├── build.yml │ ├── integration-test.yml │ ├── deploy.yml │ └── codeql-analysis.yml ├── dependabot.yml └── .codecov.yml ├── hack └── boilerplate.go.txt ├── .golangci.yaml ├── pkg ├── common │ ├── errors.go │ ├── logger.go │ ├── ratelimiter_test.go │ ├── errors_test.go │ └── ratelimiter.go ├── apis │ ├── about │ │ └── v1alpha1 │ │ │ ├── groupversion_info.go │ │ │ ├── clusterproperty_types.go │ │ │ └── zz_generated.deepcopy.go │ └── multicluster │ │ └── v1alpha1 │ │ ├── groupversion_info.go │ │ ├── serviceexport_types.go │ │ └── serviceimport_types.go ├── version │ └── version.go ├── model │ ├── plan.go │ ├── plan_test.go │ ├── cluster.go │ └── cluster_test.go ├── cloudmap │ ├── aws_facade.go │ ├── operation_poller.go │ ├── cache.go │ ├── cache_test.go │ └── operation_poller_test.go └── controllers │ └── multicluster │ ├── controllers_common_test.go │ └── endpointslice_plan.go ├── PROJECT ├── Dockerfile ├── go.mod ├── main.go └── test └── test-constants.go /config/manager/aws.properties: -------------------------------------------------------------------------------- 1 | AWS_REGION 2 | -------------------------------------------------------------------------------- /config/prometheus/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - monitor.yaml 3 | -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | aws-cloud-map-mcs-controller-for-k8s 2 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | -------------------------------------------------------------------------------- /config/certmanager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - certificate.yaml 3 | 4 | configurations: 5 | - kustomizeconfig.yaml 6 | -------------------------------------------------------------------------------- /docs/architecture-overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws/aws-cloud-map-mcs-controller-for-k8s/HEAD/docs/architecture-overview.png -------------------------------------------------------------------------------- /config/webhook/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - manifests.yaml 3 | - service.yaml 4 | 5 | configurations: 6 | - kustomizeconfig.yaml 7 | -------------------------------------------------------------------------------- /docs/images/solution-baseline.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws/aws-cloud-map-mcs-controller-for-k8s/HEAD/docs/images/solution-baseline.png -------------------------------------------------------------------------------- /docs/images/solution-overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws/aws-cloud-map-mcs-controller-for-k8s/HEAD/docs/images/solution-overview.png -------------------------------------------------------------------------------- /config/rbac/service_account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: controller-manager 5 | namespace: system 6 | -------------------------------------------------------------------------------- /docs/images/service-consumption.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws/aws-cloud-map-mcs-controller-for-k8s/HEAD/docs/images/service-consumption.png -------------------------------------------------------------------------------- /docs/images/service-provisioning.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws/aws-cloud-map-mcs-controller-for-k8s/HEAD/docs/images/service-provisioning.png -------------------------------------------------------------------------------- /samples/nginx-serviceexport.yaml: -------------------------------------------------------------------------------- 1 | kind: ServiceExport 2 | apiVersion: multicluster.x-k8s.io/v1alpha1 3 | metadata: 4 | namespace: demo 5 | name: nginx-hello -------------------------------------------------------------------------------- /integration/kind-test/configs/ipv6.yaml: -------------------------------------------------------------------------------- 1 | kind: Cluster 2 | apiVersion: kind.x-k8s.io/v1alpha4 3 | networking: 4 | ipFamily: ipv6 5 | apiServerAddress: 127.0.0.1 -------------------------------------------------------------------------------- /samples/example-serviceexport.yaml: -------------------------------------------------------------------------------- 1 | kind: ServiceExport 2 | apiVersion: multicluster.x-k8s.io/v1alpha1 3 | metadata: 4 | namespace: example 5 | name: my-service 6 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | # More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file 2 | # Ignore all files which are not go type 3 | !**/*.go 4 | !**/*.mod 5 | !**/*.sum 6 | -------------------------------------------------------------------------------- /integration/eks-test/configs/nginx-serviceexport.yaml: -------------------------------------------------------------------------------- 1 | kind: ServiceExport 2 | apiVersion: multicluster.x-k8s.io/v1alpha1 3 | metadata: 4 | namespace: aws-cloud-map-mcs-eks-e2e 5 | name: nginx-hello -------------------------------------------------------------------------------- /config/controller_install_latest/kustomization.yaml: -------------------------------------------------------------------------------- 1 | bases: 2 | - ../default 3 | 4 | images: 5 | - name: controller 6 | newName: ghcr.io/aws/aws-cloud-map-mcs-controller-for-k8s 7 | newTag: latest 8 | -------------------------------------------------------------------------------- /config/controller_install_release/kustomization.yaml: -------------------------------------------------------------------------------- 1 | bases: 2 | - ../default 3 | 4 | images: 5 | - name: controller 6 | newName: ghcr.io/aws/aws-cloud-map-mcs-controller-for-k8s 7 | newTag: v0.3.1 8 | -------------------------------------------------------------------------------- /samples/nginx-service.yaml: -------------------------------------------------------------------------------- 1 | kind: Service 2 | apiVersion: v1 3 | metadata: 4 | namespace: demo 5 | name: nginx-hello 6 | spec: 7 | selector: 8 | app: nginx 9 | ports: 10 | - port: 80 -------------------------------------------------------------------------------- /samples/example-service.yaml: -------------------------------------------------------------------------------- 1 | kind: Service 2 | apiVersion: v1 3 | metadata: 4 | namespace: example 5 | name: my-service 6 | spec: 7 | selector: 8 | app: nginx 9 | ports: 10 | - port: 80 11 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_client_clusterrole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: metrics-reader 5 | rules: 6 | - nonResourceURLs: 7 | - "/metrics" 8 | verbs: 9 | - get 10 | -------------------------------------------------------------------------------- /samples/example-headless.yaml: -------------------------------------------------------------------------------- 1 | kind: Service 2 | apiVersion: v1 3 | metadata: 4 | namespace: example 5 | name: my-service 6 | spec: 7 | clusterIP: None 8 | selector: 9 | app: nginx 10 | ports: 11 | - port: 80 12 | -------------------------------------------------------------------------------- /integration/eks-test/configs/nginx-service.yaml: -------------------------------------------------------------------------------- 1 | kind: Service 2 | apiVersion: v1 3 | metadata: 4 | namespace: aws-cloud-map-mcs-eks-e2e 5 | name: nginx-hello 6 | spec: 7 | selector: 8 | app: nginx 9 | ports: 10 | - port: 80 11 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | vendor/ 2 | bin/ 3 | testbin/ 4 | testlog/ 5 | cover.out 6 | 7 | # Files generated by JetBrains IDEs, e.g. IntelliJ IDEA 8 | .idea/ 9 | *.iml 10 | 11 | # OSX trash 12 | .DS_Store 13 | 14 | #mocks generated by mockgen 15 | mocks/ 16 | -------------------------------------------------------------------------------- /integration/shared/scripts/cleanup-cloudmap.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Deletes all AWS Cloud Map resources used for integration test. 4 | 5 | set -eo pipefail 6 | 7 | go run ./integration/janitor/runner/main.go "$NAMESPACE" "$CLUSTERID1" "$CLUSTERSETID1" 8 | -------------------------------------------------------------------------------- /samples/client-hello.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: client-hello 5 | namespace: demo 6 | spec: 7 | containers: 8 | - command: 9 | - sleep 10 | - "1d" 11 | image: alpine 12 | name: client-hello -------------------------------------------------------------------------------- /config/webhook/service.yaml: -------------------------------------------------------------------------------- 1 | 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: webhook-service 6 | namespace: system 7 | spec: 8 | ports: 9 | - port: 443 10 | targetPort: 9443 11 | selector: 12 | control-plane: controller-manager 13 | -------------------------------------------------------------------------------- /integration/eks-test/configs/client-hello.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: client-hello 5 | namespace: aws-cloud-map-mcs-eks-e2e 6 | spec: 7 | containers: 8 | - command: 9 | - sleep 10 | - "1d" 11 | image: alpine 12 | name: client-hello 13 | -------------------------------------------------------------------------------- /integration/kind-test/scripts/cleanup-kind.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Deletes Kind cluster used for integration test. 4 | 5 | set -eo pipefail 6 | source ./integration/kind-test/scripts/common.sh 7 | 8 | $KIND_BIN delete cluster --name "$KIND_SHORT" 9 | 10 | ./integration/shared/scripts/cleanup-cloudmap.sh 11 | -------------------------------------------------------------------------------- /mkdocs.yml: -------------------------------------------------------------------------------- 1 | site_name: AWS Cloud Map MCS Controller 2 | repo_name: aws/aws-cloud-map-mcs-controller-for-k8s 3 | repo_url: https://github.com/aws/aws-cloud-map-mcs-controller-for-k8s 4 | 5 | plugins: 6 | - search 7 | theme: 8 | name: material 9 | language: en 10 | favicon: images/cloudmap.svg 11 | logo: images/cloudmap.svg 12 | 13 | -------------------------------------------------------------------------------- /samples/mcsapi-clusterproperty.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: about.k8s.io/v1alpha1 2 | kind: ClusterProperty 3 | metadata: 4 | name: cluster.clusterset.k8s.io 5 | spec: 6 | value: ${CLUSTER_ID} 7 | --- 8 | apiVersion: about.k8s.io/v1alpha1 9 | kind: ClusterProperty 10 | metadata: 11 | name: clusterset.k8s.io 12 | spec: 13 | value: ${CLUSTERSET_ID} -------------------------------------------------------------------------------- /config/rbac/role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: manager-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: manager-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: controller-manager 12 | namespace: system 13 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: proxy-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: proxy-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: controller-manager 12 | namespace: system 13 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_clusterproperties.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 7 | name: clusterproperties.about.k8s.io 8 | -------------------------------------------------------------------------------- /config/manager/controller_manager_config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: controller-runtime.sigs.k8s.io/v1alpha1 2 | kind: ControllerManagerConfig 3 | health: 4 | healthProbeBindAddress: :8081 5 | metrics: 6 | bindAddress: 127.0.0.1:8080 7 | webhook: 8 | port: 9443 9 | leaderElection: 10 | leaderElect: true 11 | resourceName: aws-cloud-map-mcs-controller-for-k8s-lock 12 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | control-plane: controller-manager 6 | name: controller-manager-metrics-service 7 | namespace: system 8 | spec: 9 | ports: 10 | - name: https 11 | port: 8443 12 | targetPort: https 13 | selector: 14 | control-plane: controller-manager 15 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_serviceexports.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 7 | name: serviceexports.multicluster.x-k8s.io 8 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_serviceimports.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 7 | name: serviceimports.multicluster.x-k8s.io 8 | -------------------------------------------------------------------------------- /config/rbac/leader_election_role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: leader-election-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: Role 8 | name: leader-election-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: controller-manager 12 | namespace: system 13 | -------------------------------------------------------------------------------- /integration/eks-test/configs/e2e-clusterproperty-1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: about.k8s.io/v1alpha1 2 | kind: ClusterProperty 3 | metadata: 4 | name: cluster.clusterset.k8s.io 5 | spec: 6 | value: eks-e2e-clusterid-1 7 | --- 8 | apiVersion: about.k8s.io/v1alpha1 9 | kind: ClusterProperty 10 | metadata: 11 | name: clusterset.k8s.io 12 | spec: 13 | value: eks-e2e-clustersetid-1 14 | -------------------------------------------------------------------------------- /integration/eks-test/configs/e2e-clusterproperty-2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: about.k8s.io/v1alpha1 2 | kind: ClusterProperty 3 | metadata: 4 | name: cluster.clusterset.k8s.io 5 | spec: 6 | value: eks-e2e-clusterid-2 7 | --- 8 | apiVersion: about.k8s.io/v1alpha1 9 | kind: ClusterProperty 10 | metadata: 11 | name: clusterset.k8s.io 12 | spec: 13 | value: eks-e2e-clustersetid-1 14 | -------------------------------------------------------------------------------- /integration/kind-test/configs/e2e-clusterproperty.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: about.k8s.io/v1alpha1 2 | kind: ClusterProperty 3 | metadata: 4 | name: cluster.clusterset.k8s.io 5 | spec: 6 | value: kind-e2e-clusterid-1 7 | --- 8 | apiVersion: about.k8s.io/v1alpha1 9 | kind: ClusterProperty 10 | metadata: 11 | name: clusterset.k8s.io 12 | spec: 13 | value: kind-e2e-clustersetid-1 14 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: proxy-role 5 | rules: 6 | - apiGroups: 7 | - authentication.k8s.io 8 | resources: 9 | - tokenreviews 10 | verbs: 11 | - create 12 | - apiGroups: 13 | - authorization.k8s.io 14 | resources: 15 | - subjectaccessreviews 16 | verbs: 17 | - create 18 | -------------------------------------------------------------------------------- /config/manager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - manager.yaml 3 | 4 | generatorOptions: 5 | disableNameSuffixHash: true 6 | 7 | configMapGenerator: 8 | - files: 9 | - controller_manager_config.yaml 10 | name: manager-config 11 | - envs: 12 | - aws.properties 13 | name: aws-config 14 | 15 | images: 16 | - name: controller 17 | newName: controller 18 | newTag: latest 19 | -------------------------------------------------------------------------------- /integration/kind-test/configs/dnsutils-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: dnsutils 5 | namespace: default 6 | spec: 7 | containers: 8 | - command: 9 | - sleep 10 | - "3600" 11 | image: gcr.io/kubernetes-e2e-test-images/dnsutils:1.3 12 | name: dnsutils 13 | imagePullPolicy: IfNotPresent 14 | restartPolicy: Always 15 | -------------------------------------------------------------------------------- /config/crd/patches/annotation_for_clusterproperties.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds an annotation to pass protected groups approval required to use domain "k8s.io" 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | api-approved.kubernetes.io: "https://github.com/kubernetes/enhancements/pull/3084" 7 | name: clusterproperties.about.k8s.io 8 | -------------------------------------------------------------------------------- /integration/kind-test/scripts/ensure-jq.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Ensure jq is available to parse json output. Installs jq on debian/ubuntu 4 | 5 | if ! which -s jq ; then 6 | echo "jq not found, attempting to install" 7 | if ! sudo apt-get install -y jq ; then 8 | echo "failed to install jq, ensure it is available before running tests" 9 | exit 1 10 | fi 11 | fi 12 | 13 | exit 0 14 | -------------------------------------------------------------------------------- /integration/kind-test/configs/e2e-clusterset-ip-service.yaml: -------------------------------------------------------------------------------- 1 | kind: Service 2 | apiVersion: v1 3 | metadata: 4 | namespace: aws-cloud-map-mcs-e2e 5 | name: e2e-clusterset-ip-service 6 | spec: 7 | selector: 8 | app: nginx-hello 9 | ports: 10 | - port: 80 11 | --- 12 | kind: ServiceExport 13 | apiVersion: multicluster.x-k8s.io/v1alpha1 14 | metadata: 15 | namespace: aws-cloud-map-mcs-e2e 16 | name: e2e-clusterset-ip-service 17 | -------------------------------------------------------------------------------- /integration/kind-test/configs/e2e-headless-service.yaml: -------------------------------------------------------------------------------- 1 | kind: Service 2 | apiVersion: v1 3 | metadata: 4 | namespace: aws-cloud-map-mcs-e2e 5 | name: e2e-headless-service 6 | spec: 7 | clusterIP: None 8 | selector: 9 | app: nginx-hello 10 | ports: 11 | - port: 80 12 | --- 13 | kind: ServiceExport 14 | apiVersion: multicluster.x-k8s.io/v1alpha1 15 | metadata: 16 | namespace: aws-cloud-map-mcs-e2e 17 | name: e2e-headless-service 18 | -------------------------------------------------------------------------------- /samples/eksctl-cluster.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: eksctl.io/v1alpha5 2 | kind: ClusterConfig 3 | metadata: 4 | name: $CLUSTER_NAME 5 | region: $AWS_REGION 6 | version: "1.21" 7 | vpc: 8 | cidr: $VPC_CIDR 9 | autoAllocateIPv6: false 10 | clusterEndpoints: 11 | publicAccess: true 12 | privateAccess: true 13 | managedNodeGroups: 14 | - name: $NODEGROUP_NAME 15 | instanceType: t2.small 16 | minSize: 1 17 | maxSize: 10 18 | desiredCapacity: 1 -------------------------------------------------------------------------------- /config/crd/patches/webhook_in_serviceexports.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables a conversion webhook for the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | name: serviceexports.multicluster.x-k8s.io 6 | spec: 7 | conversion: 8 | strategy: Webhook 9 | webhook: 10 | clientConfig: 11 | service: 12 | namespace: system 13 | name: webhook-service 14 | path: /convert 15 | -------------------------------------------------------------------------------- /config/crd/patches/webhook_in_serviceimports.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables a conversion webhook for the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | name: serviceimports.multicluster.x-k8s.io 6 | spec: 7 | conversion: 8 | strategy: Webhook 9 | webhook: 10 | clientConfig: 11 | service: 12 | namespace: system 13 | name: webhook-service 14 | path: /convert 15 | -------------------------------------------------------------------------------- /integration/eks-test/configs/eksctl-cluster.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: eksctl.io/v1alpha5 2 | kind: ClusterConfig 3 | metadata: 4 | name: $CLUSTER_NAME 5 | region: $AWS_REGION 6 | version: "1.22" 7 | vpc: 8 | cidr: $VPC_CIDR 9 | autoAllocateIPv6: false 10 | clusterEndpoints: 11 | publicAccess: true 12 | privateAccess: true 13 | managedNodeGroups: 14 | - name: $NODEGROUP_NAME 15 | instanceType: t3.small 16 | minSize: 1 17 | maxSize: 10 18 | desiredCapacity: 1 19 | -------------------------------------------------------------------------------- /config/rbac/serviceexport_viewer_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to view serviceexports. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: serviceexport-viewer-role 6 | rules: 7 | - apiGroups: 8 | - multicluster.x-k8s.io 9 | resources: 10 | - serviceexports 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - apiGroups: 16 | - multicluster.x-k8s.io 17 | resources: 18 | - serviceexports/status 19 | verbs: 20 | - get 21 | -------------------------------------------------------------------------------- /config/rbac/serviceimport_viewer_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to view serviceimports. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: serviceimport-viewer-role 6 | rules: 7 | - apiGroups: 8 | - multicluster.x-k8s.io 9 | resources: 10 | - serviceimports 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - apiGroups: 16 | - multicluster.x-k8s.io 17 | resources: 18 | - serviceimports/status 19 | verbs: 20 | - get 21 | -------------------------------------------------------------------------------- /config/certmanager/kustomizeconfig.yaml: -------------------------------------------------------------------------------- 1 | # This configuration is for teaching kustomize how to update name ref and var substitution 2 | nameReference: 3 | - kind: Issuer 4 | group: cert-manager.io 5 | fieldSpecs: 6 | - kind: Certificate 7 | group: cert-manager.io 8 | path: spec/issuerRef/name 9 | 10 | varReference: 11 | - kind: Certificate 12 | group: cert-manager.io 13 | path: spec/commonName 14 | - kind: Certificate 15 | group: cert-manager.io 16 | path: spec/dnsNames 17 | -------------------------------------------------------------------------------- /samples/nginx-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | namespace: demo 5 | name: nginx-demo 6 | labels: 7 | app: nginx 8 | spec: 9 | replicas: 3 10 | selector: 11 | matchLabels: 12 | app: nginx 13 | template: 14 | metadata: 15 | labels: 16 | app: nginx 17 | spec: 18 | containers: 19 | - name: nginx 20 | image: nginxdemos/hello:plain-text 21 | ports: 22 | - containerPort: 80 -------------------------------------------------------------------------------- /samples/example-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | namespace: example 5 | name: nginx-deployment 6 | labels: 7 | app: nginx 8 | spec: 9 | replicas: 5 10 | selector: 11 | matchLabels: 12 | app: nginx 13 | template: 14 | metadata: 15 | labels: 16 | app: nginx 17 | spec: 18 | containers: 19 | - name: nginx 20 | image: nginx:1.14.2 21 | ports: 22 | - containerPort: 80 23 | -------------------------------------------------------------------------------- /samples/example-clusterproperty.yaml: -------------------------------------------------------------------------------- 1 | # An example object of `cluster.clusterset.k8s.io ClusterProperty` 2 | 3 | apiVersion: about.k8s.io/v1alpha1 4 | kind: ClusterProperty 5 | metadata: 6 | name: cluster.clusterset.k8s.io 7 | spec: 8 | value: sample-mcs-clusterid 9 | --- 10 | # An example object of `clusterset.k8s.io ClusterProperty`: 11 | 12 | apiVersion: about.k8s.io/v1alpha1 13 | kind: ClusterProperty 14 | metadata: 15 | name: clusterset.k8s.io 16 | spec: 17 | value: sample-mcs-clustersetid 18 | -------------------------------------------------------------------------------- /config/crd/patches/webhook_in_clusterproperties.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables a conversion webhook for the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | name: clusterproperties.about.k8s.io 6 | spec: 7 | conversion: 8 | strategy: Webhook 9 | webhook: 10 | clientConfig: 11 | service: 12 | namespace: system 13 | name: webhook-service 14 | path: /convert 15 | conversionReviewVersions: 16 | - v1 17 | -------------------------------------------------------------------------------- /.github/workflows/mkdocs.yml: -------------------------------------------------------------------------------- 1 | name: mkdocs 2 | on: 3 | push: 4 | branches: 5 | - main 6 | jobs: 7 | deploy: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - name: Checkout main 11 | uses: actions/checkout@v4 12 | 13 | - name: Setup python 14 | uses: actions/setup-python@v5 15 | with: 16 | python-version: 3.x 17 | 18 | - name: Install mkdocs 19 | run: pip install mkdocs-material 20 | 21 | - name: Publish mkdocs 22 | run: mkdocs gh-deploy --force 23 | -------------------------------------------------------------------------------- /integration/eks-test/configs/nginx-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | namespace: aws-cloud-map-mcs-eks-e2e 5 | name: nginx-demo 6 | labels: 7 | app: nginx 8 | spec: 9 | replicas: 3 10 | selector: 11 | matchLabels: 12 | app: nginx 13 | template: 14 | metadata: 15 | labels: 16 | app: nginx 17 | spec: 18 | containers: 19 | - name: nginx 20 | image: nginxdemos/hello:plain-text 21 | ports: 22 | - containerPort: 80 23 | -------------------------------------------------------------------------------- /config/rbac/serviceexport_editor_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to edit serviceexports. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: serviceexport-editor-role 6 | rules: 7 | - apiGroups: 8 | - multicluster.x-k8s.io 9 | resources: 10 | - serviceexports 11 | verbs: 12 | - create 13 | - delete 14 | - get 15 | - list 16 | - patch 17 | - update 18 | - watch 19 | - apiGroups: 20 | - multicluster.x-k8s.io 21 | resources: 22 | - serviceexports/status 23 | verbs: 24 | - get 25 | -------------------------------------------------------------------------------- /config/rbac/serviceimport_editor_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to edit serviceimports. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: serviceimport-editor-role 6 | rules: 7 | - apiGroups: 8 | - multicluster.x-k8s.io 9 | resources: 10 | - serviceimports 11 | verbs: 12 | - create 13 | - delete 14 | - get 15 | - list 16 | - patch 17 | - update 18 | - watch 19 | - apiGroups: 20 | - multicluster.x-k8s.io 21 | resources: 22 | - serviceimports/status 23 | verbs: 24 | - get 25 | -------------------------------------------------------------------------------- /integration/janitor/runner/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "os" 7 | 8 | "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/integration/janitor" 9 | ) 10 | 11 | func main() { 12 | if len(os.Args) != 4 { 13 | fmt.Println("Expected namespace name, clusterId, clusterSetId arguments") 14 | os.Exit(1) 15 | } 16 | 17 | nsName := os.Args[1] 18 | clusterId := os.Args[2] 19 | clusterSetId := os.Args[3] 20 | 21 | j := janitor.NewDefaultJanitor(clusterId, clusterSetId) 22 | j.Cleanup(context.TODO(), nsName) 23 | } 24 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates 5 | 6 | version: 2 7 | updates: 8 | - package-ecosystem: "gomod" # See documentation for possible values 9 | directory: "/" # Location of package manifests 10 | schedule: 11 | interval: "monthly" 12 | -------------------------------------------------------------------------------- /integration/kind-test/configs/e2e-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | namespace: aws-cloud-map-mcs-e2e 5 | name: nginx-hello-deployment 6 | labels: 7 | app: nginx-hello 8 | spec: 9 | replicas: 5 10 | selector: 11 | matchLabels: 12 | app: nginx-hello 13 | template: 14 | metadata: 15 | labels: 16 | app: nginx-hello 17 | spec: 18 | containers: 19 | - name: nginx-hello 20 | image: nginxdemos/hello:0.3-plain-text 21 | ports: 22 | - containerPort: 80 23 | -------------------------------------------------------------------------------- /config/default/manager_config_patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: controller-manager 5 | namespace: system 6 | spec: 7 | template: 8 | spec: 9 | containers: 10 | - name: manager 11 | args: 12 | - "--config=controller_manager_config.yaml" 13 | volumeMounts: 14 | - name: manager-config 15 | mountPath: /controller_manager_config.yaml 16 | subPath: controller_manager_config.yaml 17 | volumes: 18 | - name: manager-config 19 | configMap: 20 | name: manager-config 21 | -------------------------------------------------------------------------------- /config/prometheus/monitor.yaml: -------------------------------------------------------------------------------- 1 | 2 | # Prometheus Monitor Service (Metrics) 3 | apiVersion: monitoring.coreos.com/v1 4 | kind: ServiceMonitor 5 | metadata: 6 | labels: 7 | control-plane: controller-manager 8 | name: controller-manager-metrics-monitor 9 | namespace: system 10 | spec: 11 | endpoints: 12 | - path: /metrics 13 | port: https 14 | scheme: https 15 | bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 16 | tlsConfig: 17 | insecureSkipVerify: true 18 | selector: 19 | matchLabels: 20 | control-plane: controller-manager 21 | -------------------------------------------------------------------------------- /hack/boilerplate.go.txt: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ -------------------------------------------------------------------------------- /config/crd/kustomizeconfig.yaml: -------------------------------------------------------------------------------- 1 | # This file is for teaching kustomize how to substitute name and namespace reference in CRD 2 | nameReference: 3 | - kind: Service 4 | version: v1 5 | fieldSpecs: 6 | - kind: CustomResourceDefinition 7 | version: v1 8 | group: apiextensions.k8s.io 9 | path: spec/conversion/webhook/clientConfig/service/name 10 | 11 | namespace: 12 | - kind: CustomResourceDefinition 13 | version: v1 14 | group: apiextensions.k8s.io 15 | path: spec/conversion/webhook/clientConfig/service/namespace 16 | create: false 17 | 18 | varReference: 19 | - path: metadata/annotations 20 | -------------------------------------------------------------------------------- /config/default/manager_webhook_patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: controller-manager 5 | namespace: system 6 | spec: 7 | template: 8 | spec: 9 | containers: 10 | - name: manager 11 | ports: 12 | - containerPort: 9443 13 | name: webhook-server 14 | protocol: TCP 15 | volumeMounts: 16 | - mountPath: /tmp/k8s-webhook-server/serving-certs 17 | name: cert 18 | readOnly: true 19 | volumes: 20 | - name: cert 21 | secret: 22 | defaultMode: 420 23 | secretName: webhook-server-cert 24 | -------------------------------------------------------------------------------- /integration/eks-test/scripts/eks-setup-helper.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Adding IAM service accounts 4 | $KUBECTL_BIN config use-context $1 5 | $KUBECTL_BIN create namespace $MCS_NAMESPACE 6 | eksctl create iamserviceaccount \ 7 | --cluster $1 \ 8 | --namespace $MCS_NAMESPACE \ 9 | --name cloud-map-mcs-controller-manager \ 10 | --attach-policy-arn arn:aws:iam::aws:policy/AWSCloudMapFullAccess \ 11 | --override-existing-serviceaccounts \ 12 | --approve 13 | 14 | # Installing controller 15 | $KUBECTL_BIN config use-context $1 16 | $KUBECTL_BIN apply -k "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/config/controller_install_latest" 17 | 18 | -------------------------------------------------------------------------------- /samples/coredns-configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | Corefile: | 4 | .:53 { 5 | errors 6 | health 7 | multicluster clusterset.local 8 | kubernetes cluster.local in-addr.arpa ip6.arpa { 9 | pods insecure 10 | fallthrough in-addr.arpa ip6.arpa 11 | } 12 | prometheus :9153 13 | forward . /etc/resolv.conf 14 | cache 30 15 | loop 16 | reload 17 | loadbalance 18 | } 19 | kind: ConfigMap 20 | metadata: 21 | annotations: 22 | labels: 23 | eks.amazonaws.com/component: coredns 24 | k8s-app: kube-dns 25 | name: coredns 26 | namespace: kube-system -------------------------------------------------------------------------------- /config/rbac/leader_election_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions to do leader election. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | name: leader-election-role 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - configmaps 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - create 16 | - update 17 | - patch 18 | - delete 19 | - apiGroups: 20 | - coordination.k8s.io 21 | resources: 22 | - leases 23 | verbs: 24 | - get 25 | - list 26 | - watch 27 | - create 28 | - update 29 | - patch 30 | - delete 31 | - apiGroups: 32 | - "" 33 | resources: 34 | - events 35 | verbs: 36 | - create 37 | - patch 38 | -------------------------------------------------------------------------------- /integration/shared/configs/coredns-configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | Corefile: | 4 | .:53 { 5 | errors 6 | health 7 | multicluster clusterset.local 8 | kubernetes cluster.local in-addr.arpa ip6.arpa { 9 | pods insecure 10 | fallthrough in-addr.arpa ip6.arpa 11 | } 12 | prometheus :9153 13 | forward . /etc/resolv.conf 14 | cache 30 15 | loop 16 | reload 17 | loadbalance 18 | } 19 | kind: ConfigMap 20 | metadata: 21 | annotations: 22 | labels: 23 | eks.amazonaws.com/component: coredns 24 | k8s-app: kube-dns 25 | name: coredns 26 | namespace: kube-system 27 | -------------------------------------------------------------------------------- /.github/.codecov.yml: -------------------------------------------------------------------------------- 1 | # validate 2 | # cat .codecov.yml | curl --data-binary @- https://codecov.io/validate 3 | 4 | codecov: 5 | require_ci_to_pass: yes 6 | 7 | coverage: 8 | precision: 2 9 | round: down 10 | range: "70...100" 11 | status: 12 | project: 13 | default: 14 | if_ci_failed: error #success, failure, error, ignore 15 | informational: true 16 | only_pulls: true 17 | 18 | comment: 19 | layout: "reach,diff,flags,files,footer" 20 | behavior: default 21 | require_changes: no 22 | 23 | ignore: 24 | - "config/**/*" 25 | - "pkg/apis/**/*" 26 | - "mocks/**/*" 27 | - "integration/shared/scenarios/**/*" 28 | - "pkg/common/logger.go" 29 | - "test/*" 30 | -------------------------------------------------------------------------------- /.golangci.yaml: -------------------------------------------------------------------------------- 1 | linters-settings: 2 | errcheck: 3 | check-type-assertions: true 4 | goconst: 5 | min-len: 2 6 | min-occurrences: 3 7 | govet: 8 | check-shadowing: true 9 | nolintlint: 10 | require-explanation: true 11 | require-specific: true 12 | 13 | linters: 14 | enable: 15 | - dupl 16 | - goconst 17 | - gocritic 18 | - gofmt 19 | - goimports 20 | - misspell 21 | - whitespace 22 | 23 | issues: 24 | exclude-rules: 25 | - path: _test\.go # disable some linters on test files 26 | linters: 27 | - dupl 28 | 29 | run: 30 | issues-exit-code: 1 31 | concurrency: 4 32 | skip: 33 | - .*_mock.go 34 | - mocks/ 35 | - pkg/apis/ 36 | -------------------------------------------------------------------------------- /config/default/webhookcainjection_patch.yaml: -------------------------------------------------------------------------------- 1 | # This patch add annotation to admission webhook config and 2 | # the variables $(CERTIFICATE_NAMESPACE) and $(CERTIFICATE_NAME) will be substituted by kustomize. 3 | apiVersion: admissionregistration.k8s.io/v1beta1 4 | kind: MutatingWebhookConfiguration 5 | metadata: 6 | name: mutating-webhook-configuration 7 | annotations: 8 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 9 | --- 10 | apiVersion: admissionregistration.k8s.io/v1beta1 11 | kind: ValidatingWebhookConfiguration 12 | metadata: 13 | name: validating-webhook-configuration 14 | annotations: 15 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 16 | -------------------------------------------------------------------------------- /pkg/common/errors.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "github.com/pkg/errors" 5 | ) 6 | 7 | var notFound = errors.New("resource was not found") 8 | 9 | func IsNotFound(err error) bool { 10 | return errors.Is(err, notFound) 11 | } 12 | 13 | func IsUnknown(err error) bool { 14 | return err != nil && !errors.Is(err, notFound) 15 | } 16 | 17 | func NotFoundError(message string) error { 18 | return errors.Wrap(notFound, message) 19 | } 20 | 21 | func Wrap(err1 error, err2 error) error { 22 | switch { 23 | case err1 != nil && err2 != nil: 24 | return errors.Wrap(err1, err2.Error()) 25 | case err1 != nil: 26 | return err1 27 | case err2 != nil: 28 | return err2 29 | default: 30 | return nil 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: build 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | pull_request: 7 | branches: [ main ] 8 | 9 | jobs: 10 | 11 | build: 12 | runs-on: ubuntu-latest 13 | steps: 14 | 15 | - name: Set up Go 16 | uses: actions/setup-go@v5 17 | with: 18 | go-version: 1.19 19 | 20 | - name: checkout 21 | uses: actions/checkout@v4 22 | 23 | - name: Unit tests 24 | run: make test 25 | 26 | - name: Upload code coverage 27 | uses: codecov/codecov-action@v4 28 | with: 29 | files: cover.out 30 | 31 | - name: golangci-lint 32 | uses: golangci/golangci-lint-action@v5 33 | with: 34 | version: v1.50.1 35 | -------------------------------------------------------------------------------- /integration/kind-test/scripts/curl-test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Testing service consumption with dnsutils pod 4 | 5 | deployment=$1 6 | 7 | echo "performing curl to $SERVICE.$NAMESPACE.svc.clusterset.local" 8 | http_code=$($KUBECTL_BIN exec deployment/$deployment --namespace "$NAMESPACE" -- curl -s -o /dev/null -w "%{http_code}" $SERVICE.$NAMESPACE.svc.clusterset.local) 9 | exit_code=$? 10 | 11 | if [ "$exit_code" -ne 0 ]; then 12 | echo "ERROR: Unable to curl $SERVICE.$NAMESPACE.svc.clusterset.local" 13 | exit $exit_code 14 | fi 15 | 16 | if [ "$http_code" -ne "200" ]; then 17 | echo "ERROR: curl $SERVICE.$NAMESPACE.svc.clusterset.local failed with $http_code" 18 | exit 1 19 | fi 20 | 21 | exit 0 22 | -------------------------------------------------------------------------------- /PROJECT: -------------------------------------------------------------------------------- 1 | domain: x-k8s.io 2 | layout: 3 | - go.kubebuilder.io/v3 4 | projectName: aws-cloud-map-mcs-controller-for-k8s 5 | repo: github.com/aws/aws-cloud-map-mcs-controller-for-k8s 6 | multigroup: true 7 | resources: 8 | - api: 9 | crdVersion: v1 10 | namespaced: true 11 | controller: true 12 | domain: x-k8s.io 13 | group: multicluster 14 | kind: ServiceExport 15 | path: github.com/aws/aws-cloud-map-mcs-controller-for-k8s/apis/multicluster/v1alpha1 16 | version: v1alpha1 17 | - api: 18 | crdVersion: v1 19 | namespaced: true 20 | controller: true 21 | domain: x-k8s.io 22 | group: multicluster 23 | kind: ServiceImport 24 | path: github.com/aws/aws-cloud-map-mcs-controller-for-k8s/apis/multicluster/v1alpha1 25 | version: v1alpha1 26 | version: "3" 27 | -------------------------------------------------------------------------------- /config/rbac/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | # All RBAC will be applied under this service account in 3 | # the deployment namespace. You may comment out this resource 4 | # if your manager will use a service account that exists at 5 | # runtime. Be sure to update RoleBinding and ClusterRoleBinding 6 | # subjects if changing service account names. 7 | - service_account.yaml 8 | - role.yaml 9 | - role_binding.yaml 10 | - leader_election_role.yaml 11 | - leader_election_role_binding.yaml 12 | # Comment the following 4 lines if you want to disable 13 | # the auth proxy (https://github.com/brancz/kube-rbac-proxy) 14 | # which protects your /metrics endpoint. 15 | - auth_proxy_service.yaml 16 | - auth_proxy_role.yaml 17 | - auth_proxy_role_binding.yaml 18 | - auth_proxy_client_clusterrole.yaml 19 | -------------------------------------------------------------------------------- /pkg/apis/about/v1alpha1/groupversion_info.go: -------------------------------------------------------------------------------- 1 | // Package v1alpha1 contains API Schema definitions for the about v1alpha1 API group 2 | // +kubebuilder:object:generate=true 3 | // +groupName=about.k8s.io 4 | package v1alpha1 5 | 6 | import ( 7 | "k8s.io/apimachinery/pkg/runtime/schema" 8 | "sigs.k8s.io/controller-runtime/pkg/scheme" 9 | ) 10 | 11 | var ( 12 | // GroupVersion is group version used to register these objects 13 | GroupVersion = schema.GroupVersion{Group: "about.k8s.io", Version: "v1alpha1"} 14 | 15 | // SchemeBuilder is used to add go types to the GroupVersionKind scheme 16 | SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} 17 | 18 | // AddToScheme adds the types in this group-version to the given scheme. 19 | AddToScheme = SchemeBuilder.AddToScheme 20 | ) 21 | -------------------------------------------------------------------------------- /pkg/apis/multicluster/v1alpha1/groupversion_info.go: -------------------------------------------------------------------------------- 1 | // Package v1alpha1 contains API Schema definitions for the multicluster v1alpha1 API group 2 | // +kubebuilder:object:generate=true 3 | // +groupName=multicluster.x-k8s.io 4 | package v1alpha1 5 | 6 | import ( 7 | "k8s.io/apimachinery/pkg/runtime/schema" 8 | "sigs.k8s.io/controller-runtime/pkg/scheme" 9 | ) 10 | 11 | var ( 12 | // GroupVersion is group version used to register these objects 13 | GroupVersion = schema.GroupVersion{Group: "multicluster.x-k8s.io", Version: "v1alpha1"} 14 | 15 | // SchemeBuilder is used to add go types to the GroupVersionKind scheme 16 | SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} 17 | 18 | // AddToScheme adds the types in this group-version to the given scheme. 19 | AddToScheme = SchemeBuilder.AddToScheme 20 | ) 21 | -------------------------------------------------------------------------------- /integration/eks-test/scripts/eks-common.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | export KIND_BIN='./bin/kind' 4 | export KUBECTL_BIN='kubectl' 5 | export LOGS='./integration/eks-test/testlog' 6 | export CONFIGS='./integration/eks-test/configs' 7 | export SCENARIOS='./integration/shared/scenarios' 8 | export NAMESPACE='aws-cloud-map-mcs-eks-e2e' 9 | export MCS_NAMESPACE='cloud-map-mcs-system' 10 | export SERVICE='nginx-hello' 11 | export SERVICE_TYPE='ClusterSetIP' 12 | export CLIENT_POD='client-hello' 13 | export ENDPT_PORT=80 14 | export SERVICE_PORT=80 # from nginx-service.yaml 15 | export EXPORT_CLS='cls1' 16 | export IMPORT_CLS='cls2' 17 | export CLUSTERID1='eks-e2e-clusterid-1' 18 | export CLUSTERID2='eks-e2e-clusterid-2' 19 | export CLUSTERSETID1='eks-e2e-clustersetid-1' 20 | export EXPECTED_ENDPOINT_COUNT=3 21 | export UPDATED_ENDPOINT_COUNT=4 -------------------------------------------------------------------------------- /integration/shared/scripts/poll-endpoints.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Poll for endpoints to become active 4 | 5 | set -e 6 | 7 | endpt_count=0 8 | poll_count=0 9 | while ((endpt_count < $1)) 10 | do 11 | if ((poll_count++ > 30)) ; then 12 | echo "timed out polling for endpoints" >&2 13 | exit 1 14 | fi 15 | 16 | sleep 2 17 | if ! addresses=$($KUBECTL_BIN get endpointslices -o json --namespace "$NAMESPACE" | \ 18 | jq --arg SERVICE "$SERVICE" '.items[] | select(.metadata.ownerReferences[].name==$SERVICE) | .endpoints[].addresses[0]' 2> /dev/null) 19 | then 20 | # no endpoints ready 21 | continue 22 | fi 23 | 24 | endpt_count=$(echo "$addresses" | wc -l | xargs) 25 | done 26 | 27 | echo "$addresses" | tr -d '"' | paste -sd "," - 28 | echo "matched number of endpoints to expected count" >&2 29 | exit 0 30 | -------------------------------------------------------------------------------- /integration/kind-test/scripts/common.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | export KIND_BIN='./bin/kind' 4 | export KUBECTL_BIN='kubectl' 5 | export LOGS='./integration/kind-test/testlog' 6 | export KIND_CONFIGS='./integration/kind-test/configs' 7 | export SHARED_CONFIGS='./integration/shared/configs' 8 | export SCENARIOS='./integration/shared/scenarios' 9 | export NAMESPACE='aws-cloud-map-mcs-e2e' 10 | export ENDPT_PORT=80 11 | export SERVICE_PORT=80 12 | export CLUSTERIP_SERVICE='e2e-clusterset-ip-service' 13 | export HEADLESS_SERVICE='e2e-headless-service' 14 | export KIND_SHORT='cloud-map-e2e' 15 | export CLUSTER='kind-cloud-map-e2e' 16 | export CLUSTERID1='kind-e2e-clusterid-1' 17 | export CLUSTERSETID1='kind-e2e-clustersetid-1' 18 | export IMAGE='kindest/node:v1.21.12@sha256:f316b33dd88f8196379f38feb80545ef3ed44d9197dca1bfd48bcb1583210207' 19 | export EXPECTED_ENDPOINT_COUNT=5 20 | export UPDATED_ENDPOINT_COUNT=6 21 | -------------------------------------------------------------------------------- /config/webhook/kustomizeconfig.yaml: -------------------------------------------------------------------------------- 1 | # the following config is for teaching kustomize where to look at when substituting vars. 2 | # It requires kustomize v2.1.0 or newer to work properly. 3 | nameReference: 4 | - kind: Service 5 | version: v1 6 | fieldSpecs: 7 | - kind: MutatingWebhookConfiguration 8 | group: admissionregistration.k8s.io 9 | path: webhooks/clientConfig/service/name 10 | - kind: ValidatingWebhookConfiguration 11 | group: admissionregistration.k8s.io 12 | path: webhooks/clientConfig/service/name 13 | 14 | namespace: 15 | - kind: MutatingWebhookConfiguration 16 | group: admissionregistration.k8s.io 17 | path: webhooks/clientConfig/service/namespace 18 | create: true 19 | - kind: ValidatingWebhookConfiguration 20 | group: admissionregistration.k8s.io 21 | path: webhooks/clientConfig/service/namespace 22 | create: true 23 | 24 | varReference: 25 | - path: metadata/annotations 26 | -------------------------------------------------------------------------------- /pkg/version/version.go: -------------------------------------------------------------------------------- 1 | package version 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | ) 7 | 8 | // Build information obtained with the help of -ldflags 9 | var ( 10 | GitVersion string 11 | GitCommit string 12 | PackageName = "aws-cloud-map-mcs-controller-for-k8s" 13 | ) 14 | 15 | // GetVersion figures out the version information 16 | // based on variables set by -ldflags. 17 | func GetVersion() string { 18 | // only set the appVersion if -ldflags was used 19 | if GitCommit != "" { 20 | return fmt.Sprintf("%s (%s)", strings.TrimPrefix(GitVersion, "v"), GitCommit) 21 | } 22 | 23 | return "" 24 | } 25 | 26 | func GetPackageVersion() string { 27 | return PackageName + " " + GetVersion() 28 | } 29 | 30 | func GetUserAgentKey() string { 31 | return PackageName 32 | } 33 | 34 | func GetUserAgentValue() string { 35 | if GitVersion != "" { 36 | return strings.TrimPrefix(GitVersion, "v") 37 | } 38 | return "" 39 | } 40 | -------------------------------------------------------------------------------- /config/default/manager_auth_proxy_patch.yaml: -------------------------------------------------------------------------------- 1 | # This patch inject a sidecar container which is a HTTP proxy for the 2 | # controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. 3 | apiVersion: apps/v1 4 | kind: Deployment 5 | metadata: 6 | name: controller-manager 7 | namespace: system 8 | spec: 9 | template: 10 | spec: 11 | containers: 12 | - name: kube-rbac-proxy 13 | image: gcr.io/kubebuilder/kube-rbac-proxy:v0.8.0 14 | args: 15 | - "--secure-listen-address=0.0.0.0:8443" 16 | - "--upstream=http://127.0.0.1:8080/" 17 | - "--logtostderr=true" 18 | - "--v=10" 19 | ports: 20 | - containerPort: 8443 21 | name: https 22 | - name: manager 23 | args: 24 | - "--health-probe-bind-address=:8081" 25 | - "--metrics-bind-address=127.0.0.1:8080" 26 | - "--leader-elect" 27 | -------------------------------------------------------------------------------- /integration/kind-test/scripts/run-helper.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Helper to run test and passing different Service names 4 | 5 | source ./integration/kind-test/scripts/common.sh 6 | 7 | # create test namespace 8 | $KUBECTL_BIN create namespace "$NAMESPACE" 9 | 10 | # If the IP Type env var is not set, default it to IPV4 11 | if [[ -z "${ADDRESS_TYPE}" ]]; then 12 | ADDRESS_TYPE="IPv4" 13 | fi 14 | 15 | # ClusterIP service test 16 | ./integration/kind-test/scripts/run-tests.sh "$CLUSTERIP_SERVICE" "ClusterSetIP" $ADDRESS_TYPE 17 | exit_code=$? 18 | if [ "$exit_code" -ne 0 ] ; then 19 | echo "ERROR: Testing $CLUSTERIP_SERVICE failed" 20 | exit $exit_code 21 | fi 22 | 23 | sleep 5 24 | 25 | # Headless service test 26 | ./integration/kind-test/scripts/run-tests.sh "$HEADLESS_SERVICE" "Headless" $ADDRESS_TYPE 27 | exit_code=$? 28 | if [ "$exit_code" -ne 0 ] ; then 29 | echo "ERROR: Testing $HEADLESS_SERVICE failed" 30 | exit $exit_code 31 | fi 32 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Build the manager binary 2 | FROM golang:1.19 as builder 3 | 4 | WORKDIR /workspace 5 | 6 | # Copy the Go Modules manifests, plus the source 7 | COPY . ./ 8 | 9 | # cache deps before building and copying source so that we don't need to re-download as much 10 | # and so that source changes don't invalidate our downloaded layer 11 | RUN go mod download 12 | 13 | # Build 14 | ENV PKG=github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/version 15 | RUN GIT_TAG=$(git describe --tags --dirty --always) && \ 16 | GIT_COMMIT=$(git describe --dirty --always) && \ 17 | CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build \ 18 | -ldflags="-s -w -X ${PKG}.GitVersion=${GIT_TAG} -X ${PKG}.GitCommit=${GIT_COMMIT}" -a -o manager main.go 19 | 20 | # Use distroless as minimal base image to package the manager binary 21 | # Refer to https://github.com/GoogleContainerTools/distroless for more details 22 | FROM gcr.io/distroless/static:nonroot 23 | WORKDIR / 24 | COPY --from=builder /workspace/manager . 25 | USER 65532:65532 26 | 27 | ENTRYPOINT ["/manager"] 28 | -------------------------------------------------------------------------------- /config/certmanager/certificate.yaml: -------------------------------------------------------------------------------- 1 | # The following manifests contain a self-signed issuer CR and a certificate CR. 2 | # More document can be found at https://docs.cert-manager.io 3 | # WARNING: Targets CertManager 0.11 check https://docs.cert-manager.io/en/latest/tasks/upgrading/index.html for 4 | # breaking changes 5 | apiVersion: cert-manager.io/v1alpha2 6 | kind: Issuer 7 | metadata: 8 | name: selfsigned-issuer 9 | namespace: system 10 | spec: 11 | selfSigned: {} 12 | --- 13 | apiVersion: cert-manager.io/v1alpha2 14 | kind: Certificate 15 | metadata: 16 | name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml 17 | namespace: system 18 | spec: 19 | # $(SERVICE_NAME) and $(SERVICE_NAMESPACE) will be substituted by kustomize 20 | dnsNames: 21 | - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc 22 | - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc.cluster.local 23 | issuerRef: 24 | kind: Issuer 25 | name: selfsigned-issuer 26 | secretName: webhook-server-cert # this secret will not be prefixed, since it's not managed by kustomize 27 | -------------------------------------------------------------------------------- /samples/coredns-clusterrole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | labels: 5 | eks.amazonaws.com/component: coredns 6 | k8s-app: kube-dns 7 | kubernetes.io/bootstrapping: rbac-defaults 8 | name: system:coredns 9 | rules: 10 | - apiGroups: 11 | - "" 12 | resources: 13 | - endpoints 14 | - services 15 | - pods 16 | - namespaces 17 | verbs: 18 | - list 19 | - watch 20 | - apiGroups: 21 | - "" 22 | resources: 23 | - nodes 24 | verbs: 25 | - get 26 | - apiGroups: 27 | - discovery.k8s.io 28 | resources: 29 | - endpointslices 30 | verbs: 31 | - create 32 | - get 33 | - list 34 | - patch 35 | - update 36 | - watch 37 | - apiGroups: 38 | - multicluster.x-k8s.io 39 | resources: 40 | - serviceimports 41 | verbs: 42 | - create 43 | - get 44 | - list 45 | - patch 46 | - update 47 | - watch 48 | - apiGroups: 49 | - multicluster.x-k8s.io 50 | resources: 51 | - serviceexports 52 | verbs: 53 | - create 54 | - get 55 | - list 56 | - patch 57 | - update 58 | - watch -------------------------------------------------------------------------------- /integration/shared/scripts/test-import.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Test service imports were created during e2e test 4 | 5 | set -e 6 | 7 | expected_endpoint_count=$1 8 | endpoints=$2 9 | echo "checking service imports..." 10 | 11 | import_count=0 12 | poll_count=0 13 | while ((import_count < expected_endpoint_count)) 14 | do 15 | sleep 1 16 | if ((poll_count++ > 30)) ; then 17 | echo "timed out polling for import endpoints" 18 | exit 1 19 | fi 20 | 21 | imports=$($KUBECTL_BIN get endpointslices -o json --namespace $NAMESPACE | \ 22 | jq '.items[] | select(.metadata.ownerReferences[].name | startswith("imported")) | .endpoints[].addresses[0]') 23 | echo "import endpoint list from kubectl:" 24 | echo "$imports" 25 | 26 | import_count=$(echo "$imports" | wc -l | xargs) 27 | done 28 | 29 | echo "$imports" | tr -d '"' | while read -r import; do 30 | echo "checking import: $import" 31 | if ! echo "$endpoints" | grep -q "$import" ; then 32 | echo "exported endpoint not found: $import" 33 | exit 1 34 | fi 35 | done 36 | 37 | if [ $? -ne 0 ]; then 38 | exit $? 39 | fi 40 | 41 | echo "matched all imports to exported endpoints" 42 | exit 0 43 | -------------------------------------------------------------------------------- /pkg/common/logger.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "github.com/go-logr/logr" 5 | ctrl "sigs.k8s.io/controller-runtime" 6 | ) 7 | 8 | type Logger interface { 9 | Info(msg string, keysAndValues ...interface{}) 10 | Debug(msg string, keysAndValues ...interface{}) 11 | Error(err error, msg string, keysAndValues ...interface{}) 12 | } 13 | 14 | type logger struct { 15 | log logr.Logger 16 | } 17 | 18 | func NewLogger(name string, names ...string) Logger { 19 | l := ctrl.Log.WithName(name) 20 | for _, n := range names { 21 | l = l.WithName(n) 22 | } 23 | return logger{log: l} 24 | } 25 | 26 | func NewLoggerWithLogr(l logr.Logger) Logger { 27 | return logger{log: l} 28 | } 29 | 30 | func (l logger) Info(msg string, keysAndValues ...interface{}) { 31 | l.log.V(0).Info(msg, keysAndValues...) 32 | } 33 | 34 | func (l logger) Debug(msg string, keysAndValues ...interface{}) { 35 | l.log.V(1).Info(msg, keysAndValues...) 36 | } 37 | 38 | func (l logger) Error(err error, msg string, keysAndValues ...interface{}) { 39 | l.log.Error(err, msg, keysAndValues...) 40 | } 41 | 42 | func (l logger) WithValues(keysAndValues ...interface{}) Logger { 43 | return logger{log: l.log.WithValues(keysAndValues...)} 44 | } 45 | -------------------------------------------------------------------------------- /integration/eks-test/scripts/eks-setup.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | source ./integration/eks-test/scripts/eks-common.sh 4 | 5 | # Call helper for service account and controller installation 6 | ./integration/eks-test/scripts/eks-setup-helper.sh $EXPORT_CLS 7 | ./integration/eks-test/scripts/eks-setup-helper.sh $IMPORT_CLS 8 | 9 | # Apply ClusterProperties 10 | $KUBECTL_BIN config use-context $EXPORT_CLS 11 | $KUBECTL_BIN apply -f "$CONFIGS/e2e-clusterproperty-1.yaml" 12 | 13 | $KUBECTL_BIN config use-context $IMPORT_CLS 14 | $KUBECTL_BIN apply -f "$CONFIGS/e2e-clusterproperty-2.yaml" 15 | 16 | # Installing service 17 | $KUBECTL_BIN config use-context $EXPORT_CLS 18 | $KUBECTL_BIN create namespace $NAMESPACE 19 | $KUBECTL_BIN apply -f "$CONFIGS/nginx-deployment.yaml" 20 | $KUBECTL_BIN apply -f "$CONFIGS/nginx-service.yaml" 21 | 22 | $KUBECTL_BIN config use-context $IMPORT_CLS 23 | $KUBECTL_BIN create namespace $NAMESPACE 24 | 25 | # Creating service export 26 | $KUBECTL_BIN config use-context $EXPORT_CLS 27 | $KUBECTL_BIN apply -f "$CONFIGS/nginx-serviceexport.yaml" 28 | 29 | # Create client-hello pod 30 | $KUBECTL_BIN config use-context $IMPORT_CLS 31 | $KUBECTL_BIN apply -f "$CONFIGS/client-hello.yaml" 32 | sleep 15 33 | -------------------------------------------------------------------------------- /integration/janitor/aws_facade.go: -------------------------------------------------------------------------------- 1 | package janitor 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/cloudmap" 7 | "github.com/aws/aws-sdk-go-v2/aws" 8 | sd "github.com/aws/aws-sdk-go-v2/service/servicediscovery" 9 | ) 10 | 11 | // SdkJanitorFacade extends the minimal surface area of ServiceDiscovery API calls of the client 12 | // for integration test janitor operations. 13 | type SdkJanitorFacade interface { 14 | // DeleteNamespace provides ServiceDiscovery DeleteNamespace wrapper interface. 15 | DeleteNamespace(context.Context, *sd.DeleteNamespaceInput, ...func(*sd.Options)) (*sd.DeleteNamespaceOutput, error) 16 | 17 | // DeleteService provides ServiceDiscovery DeleteService wrapper interface. 18 | DeleteService(context.Context, *sd.DeleteServiceInput, ...func(*sd.Options)) (*sd.DeleteServiceOutput, error) 19 | 20 | cloudmap.AwsFacade 21 | } 22 | 23 | type sdkJanitorFacade struct { 24 | *sd.Client 25 | } 26 | 27 | // NewSdkJanitorFacadeFromConfig creates a new AWS facade from an AWS client config 28 | // extended for integration test janitor operations. 29 | func NewSdkJanitorFacadeFromConfig(cfg *aws.Config) SdkJanitorFacade { 30 | return &sdkJanitorFacade{sd.NewFromConfig(*cfg)} 31 | } 32 | -------------------------------------------------------------------------------- /.github/workflows/integration-test.yml: -------------------------------------------------------------------------------- 1 | name: integration 2 | on: 3 | push: 4 | branches: 5 | - main 6 | concurrency: 7 | group: ${{ github.workflow }}-${{ github.ref }} 8 | cancel-in-progress: true 9 | jobs: 10 | integration-test: 11 | name: Run Integration Test 12 | runs-on: ubuntu-latest 13 | environment: Integration Test 14 | permissions: 15 | id-token: write 16 | steps: 17 | - name: Configure AWS credentials 18 | uses: aws-actions/configure-aws-credentials@v4 19 | with: 20 | aws-region: us-west-2 21 | role-to-assume: ${{ secrets.AWS_ROLE_TO_ASSUME }} 22 | role-session-name: IntegrationTestSession 23 | - name: Checkout code 24 | uses: actions/checkout@v4 25 | - name: Set up Go 26 | uses: actions/setup-go@v5 27 | with: 28 | go-version: 1.19 29 | - name: Set up env 30 | run: source ~/.bashrc 31 | - name: Start clean 32 | run: make kind-integration-cleanup 33 | - name: Set up cluster 34 | run: make kind-integration-setup 35 | - name: Run tests 36 | run: make kind-integration-run 37 | - name: Clean up clusters 38 | run: make kind-integration-cleanup 39 | -------------------------------------------------------------------------------- /integration/shared/configs/coredns-clusterrole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | labels: 5 | eks.amazonaws.com/component: coredns 6 | k8s-app: kube-dns 7 | kubernetes.io/bootstrapping: rbac-defaults 8 | name: system:coredns 9 | rules: 10 | - apiGroups: 11 | - "" 12 | resources: 13 | - endpoints 14 | - services 15 | - pods 16 | - namespaces 17 | verbs: 18 | - list 19 | - watch 20 | - apiGroups: 21 | - "" 22 | resources: 23 | - nodes 24 | verbs: 25 | - get 26 | - apiGroups: 27 | - about.k8s.io 28 | resources: 29 | - clusterproperties 30 | verbs: 31 | - create 32 | - get 33 | - list 34 | - patch 35 | - update 36 | - watch 37 | - apiGroups: 38 | - discovery.k8s.io 39 | resources: 40 | - endpointslices 41 | verbs: 42 | - create 43 | - get 44 | - list 45 | - patch 46 | - update 47 | - watch 48 | - apiGroups: 49 | - multicluster.x-k8s.io 50 | resources: 51 | - serviceimports 52 | verbs: 53 | - create 54 | - get 55 | - list 56 | - patch 57 | - update 58 | - watch 59 | - apiGroups: 60 | - multicluster.x-k8s.io 61 | resources: 62 | - serviceexports 63 | verbs: 64 | - create 65 | - get 66 | - list 67 | - patch 68 | - update 69 | - watch -------------------------------------------------------------------------------- /.github/workflows/deploy.yml: -------------------------------------------------------------------------------- 1 | name: deploy 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | 7 | jobs: 8 | 9 | deploy: 10 | runs-on: ubuntu-latest 11 | env: 12 | REGISTRY: ghcr.io/${{ github.repository }} 13 | permissions: 14 | contents: read 15 | packages: write 16 | steps: 17 | - uses: actions/checkout@v4 18 | 19 | - name: Set up Go 20 | uses: actions/setup-go@v5 21 | with: 22 | go-version: 1.19 23 | 24 | - name: Build image 25 | run: make docker-build 26 | 27 | - name: Login to GitHub Container Registry 28 | uses: docker/login-action@v3 29 | with: 30 | registry: ghcr.io 31 | username: ${{ github.actor }} 32 | password: ${{ secrets.GITHUB_TOKEN }} 33 | 34 | - name: Tag image for github container repository with commit SHA 35 | run: 'docker tag controller:latest $REGISTRY:$GITHUB_SHA' 36 | 37 | - name: Push image for github container repository with commit SHA 38 | run: 'docker push $REGISTRY:$GITHUB_SHA' 39 | 40 | - name: Tag image for github container repository with latest 41 | run: 'docker tag controller:latest $REGISTRY:latest' 42 | 43 | - name: Push image for github container repository with latest 44 | run: 'docker push $REGISTRY:latest' 45 | -------------------------------------------------------------------------------- /integration/kind-test/scripts/setup-kind.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Builds the AWS Cloud Map MCS Controller for K8s, provisions a Kubernetes clusters with Kind, 4 | # installs Cloud Map CRDs and controller into the cluster and applies export and deployment configs. 5 | 6 | set -e 7 | 8 | source ./integration/kind-test/scripts/common.sh 9 | 10 | ./integration/kind-test/scripts/ensure-jq.sh 11 | 12 | # If the IP Type env var is not set, default it to IPv4 13 | if [[ -z "${ADDRESS_TYPE}" ]]; then 14 | ADDRESS_TYPE="IPv4" 15 | fi 16 | 17 | echo "ADDRESS_TYPE: $ADDRESS_TYPE" 18 | if [[ $ADDRESS_TYPE == "IPv4" ]]; then 19 | $KIND_BIN create cluster --name "$KIND_SHORT" --image "$IMAGE" 20 | elif [[ $ADDRESS_TYPE == "IPv6" ]]; then 21 | $KIND_BIN create cluster --name "$KIND_SHORT" --image "$IMAGE" --config=./integration/kind-test/configs/ipv6.yaml 22 | else 23 | echo "ADDRESS_TYPE invalid" 24 | fi 25 | 26 | $KUBECTL_BIN config use-context "$CLUSTER" 27 | make install 28 | 29 | # Install CoreDNS plugin 30 | $KUBECTL_BIN apply -f "$SHARED_CONFIGS/coredns-clusterrole.yaml" 31 | $KUBECTL_BIN apply -f "$SHARED_CONFIGS/coredns-configmap.yaml" 32 | $KUBECTL_BIN apply -f "$KIND_CONFIGS/coredns-deployment.yaml" 33 | 34 | # Add ClusterId and ClusterSetId 35 | $KUBECTL_BIN apply -f "$KIND_CONFIGS/e2e-clusterproperty.yaml" 36 | -------------------------------------------------------------------------------- /config/rbac/role.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | creationTimestamp: null 6 | name: manager-role 7 | rules: 8 | - apiGroups: 9 | - "" 10 | resources: 11 | - namespaces 12 | verbs: 13 | - list 14 | - watch 15 | - apiGroups: 16 | - "" 17 | resources: 18 | - services 19 | verbs: 20 | - create 21 | - delete 22 | - get 23 | - list 24 | - update 25 | - watch 26 | - apiGroups: 27 | - about.k8s.io 28 | resources: 29 | - clusterproperties 30 | verbs: 31 | - create 32 | - delete 33 | - get 34 | - list 35 | - patch 36 | - update 37 | - watch 38 | - apiGroups: 39 | - discovery.k8s.io 40 | resources: 41 | - endpointslices 42 | verbs: 43 | - create 44 | - delete 45 | - deletecollection 46 | - get 47 | - list 48 | - update 49 | - watch 50 | - apiGroups: 51 | - multicluster.x-k8s.io 52 | resources: 53 | - serviceexports 54 | verbs: 55 | - get 56 | - list 57 | - patch 58 | - update 59 | - watch 60 | - apiGroups: 61 | - multicluster.x-k8s.io 62 | resources: 63 | - serviceexports/finalizers 64 | verbs: 65 | - get 66 | - update 67 | - apiGroups: 68 | - multicluster.x-k8s.io 69 | resources: 70 | - serviceimports 71 | verbs: 72 | - create 73 | - delete 74 | - get 75 | - list 76 | - patch 77 | - update 78 | - watch 79 | -------------------------------------------------------------------------------- /integration/eks-test/scripts/eks-DNS-test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Testing service consumption with client-hello pod 4 | 5 | echo "verifying cross-cluster service consumption..." 6 | 7 | # Install curl if not installed 8 | $KUBECTL_BIN exec $CLIENT_POD -n $NAMESPACE /bin/sh -- curl --version &>/dev/null 9 | exit_code=$? 10 | if [ "$exit_code" -eq 126 ]; then 11 | echo "curl not installed, installing..." 12 | $KUBECTL_BIN exec $CLIENT_POD -n $NAMESPACE /bin/sh -- apk add curl 13 | fi 14 | 15 | # Perform an nslookup to cluster-local CoreDNS 16 | echo "performing nslookup..." 17 | $KUBECTL_BIN exec -it $CLIENT_POD -n $NAMESPACE /bin/sh -- nslookup $SERVICE.$NAMESPACE.svc.clusterset.local 18 | exit_code=$? 19 | 20 | if [ "$exit_code" -ne 0 ]; then 21 | echo "ERROR: Unable to nslookup service $SERVICE.$NAMESPACE.svc.clusterset.local" 22 | exit $exit_code 23 | fi 24 | sleep 5 25 | 26 | # Call to DNS server, if unable to reach, importing cluster is not able to properly consume service 27 | echo "performing curl..." 28 | $KUBECTL_BIN exec -it $CLIENT_POD -n $NAMESPACE /bin/sh -- curl $SERVICE.$NAMESPACE.svc.clusterset.local 29 | exit_code=$? 30 | 31 | if [ "$exit_code" -ne 0 ]; then 32 | echo "ERROR: Unable to reach service $SERVICE.$NAMESPACE.svc.clusterset.local" 33 | exit $exit_code 34 | fi 35 | 36 | echo "confirmed service consumption" 37 | exit 0 38 | -------------------------------------------------------------------------------- /integration/janitor/api.go: -------------------------------------------------------------------------------- 1 | package janitor 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/cloudmap" 7 | "github.com/aws/aws-sdk-go-v2/aws" 8 | sd "github.com/aws/aws-sdk-go-v2/service/servicediscovery" 9 | ) 10 | 11 | type ServiceDiscoveryJanitorApi interface { 12 | DeleteNamespace(ctx context.Context, namespaceId string) (operationId string, err error) 13 | DeleteService(ctx context.Context, serviceId string) error 14 | cloudmap.ServiceDiscoveryApi 15 | } 16 | 17 | type serviceDiscoveryJanitorApi struct { 18 | cloudmap.ServiceDiscoveryApi 19 | janitorFacade SdkJanitorFacade 20 | } 21 | 22 | func NewServiceDiscoveryJanitorApiFromConfig(cfg *aws.Config) ServiceDiscoveryJanitorApi { 23 | return &serviceDiscoveryJanitorApi{ 24 | ServiceDiscoveryApi: cloudmap.NewServiceDiscoveryApiFromConfig(cfg), 25 | janitorFacade: NewSdkJanitorFacadeFromConfig(cfg), 26 | } 27 | } 28 | 29 | func (api *serviceDiscoveryJanitorApi) DeleteNamespace(ctx context.Context, nsId string) (opId string, err error) { 30 | out, err := api.janitorFacade.DeleteNamespace(ctx, &sd.DeleteNamespaceInput{Id: &nsId}) 31 | if err != nil { 32 | return "", err 33 | } 34 | 35 | return aws.ToString(out.OperationId), nil 36 | } 37 | 38 | func (api *serviceDiscoveryJanitorApi) DeleteService(ctx context.Context, svcId string) error { 39 | _, err := api.janitorFacade.DeleteService(ctx, &sd.DeleteServiceInput{Id: &svcId}) 40 | return err 41 | } 42 | -------------------------------------------------------------------------------- /config/crd/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # This kustomization.yaml is not intended to be run by itself, 2 | # since it depends on service name and namespace that are out of this kustomize package. 3 | # It should be run by config/default 4 | resources: 5 | - bases/about.k8s.io_clusterproperties.yaml 6 | - bases/multicluster.x-k8s.io_serviceexports.yaml 7 | - bases/multicluster.x-k8s.io_serviceimports.yaml 8 | #+kubebuilder:scaffold:crdkustomizeresource 9 | 10 | patchesStrategicMerge: 11 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. 12 | # patches here are for enabling the conversion webhook for each CRD 13 | #- patches/webhook_in_clusterproperties.yaml 14 | #- patches/webhook_in_serviceexports.yaml 15 | #- patches/webhook_in_serviceimports.yaml 16 | #+kubebuilder:scaffold:crdkustomizewebhookpatch 17 | 18 | # [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix. 19 | # patches here are for enabling the CA injection for each CRD 20 | #- patches/cainjection_in_clusterproperties.yaml 21 | #- patches/cainjection_in_serviceexports.yaml 22 | #- patches/cainjection_in_serviceimports.yaml 23 | #+kubebuilder:scaffold:crdkustomizecainjectionpatch 24 | 25 | # Patch adds an annotation to pass protected groups approval required to use domain "k8s.io" 26 | - patches/annotation_for_clusterproperties.yaml 27 | 28 | # the following config is for teaching kustomize how to do kustomization for CRDs. 29 | configurations: 30 | - kustomizeconfig.yaml 31 | -------------------------------------------------------------------------------- /pkg/model/plan.go: -------------------------------------------------------------------------------- 1 | package model 2 | 3 | type Plan struct { 4 | // List of current instances 5 | Current []*Endpoint 6 | 7 | // List of desired instances 8 | Desired []*Endpoint 9 | } 10 | 11 | type Changes struct { 12 | // List of endpoints that need to be created 13 | Create []*Endpoint 14 | // List of endpoints that need to be updated 15 | Update []*Endpoint 16 | // List of endpoints that need to be deleted 17 | Delete []*Endpoint 18 | } 19 | 20 | // CalculateChanges returns list of Changes that need to applied 21 | func (p *Plan) CalculateChanges() Changes { 22 | changes := Changes{} 23 | 24 | currentMap := make(map[string]*Endpoint) 25 | for _, e := range p.Current { 26 | currentMap[e.Id] = e 27 | } 28 | 29 | for _, e := range p.Desired { 30 | existing := currentMap[e.Id] 31 | if existing != nil { 32 | if !existing.Equals(e) { 33 | changes.Update = append(changes.Update, e) 34 | } 35 | delete(currentMap, e.Id) 36 | } else { 37 | changes.Create = append(changes.Create, e) 38 | } 39 | } 40 | 41 | // iterate unmatched endpoints from Current to delete them 42 | for _, e := range currentMap { 43 | changes.Delete = append(changes.Delete, e) 44 | } 45 | 46 | return changes 47 | } 48 | 49 | func (c *Changes) HasUpdates() bool { 50 | return len(c.Create) > 0 || len(c.Update) > 0 51 | } 52 | 53 | func (c *Changes) HasDeletes() bool { 54 | return len(c.Delete) > 0 55 | } 56 | 57 | func (c *Changes) IsNone() bool { 58 | return len(c.Create) == 0 && len(c.Update) == 0 && len(c.Delete) == 0 59 | } 60 | -------------------------------------------------------------------------------- /pkg/common/ratelimiter_test.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | ) 7 | 8 | func TestRateLimiter_Wait(t *testing.T) { 9 | type fields struct { 10 | RateLimiter RateLimiter 11 | } 12 | type args struct { 13 | ctx context.Context 14 | event Event 15 | } 16 | tests := []struct { 17 | name string 18 | fields fields 19 | args args 20 | wantErr bool 21 | }{ 22 | { 23 | name: "happy", 24 | fields: fields{RateLimiter: NewDefaultRateLimiter()}, 25 | args: args{ 26 | ctx: context.TODO(), 27 | event: ListServices, 28 | }, 29 | wantErr: false, 30 | }, 31 | { 32 | name: "not_found", 33 | fields: fields{RateLimiter: NewDefaultRateLimiter()}, 34 | args: args{ 35 | ctx: context.TODO(), 36 | event: "test", 37 | }, 38 | wantErr: true, 39 | }, 40 | { 41 | name: "error_ctx_canceled", 42 | fields: fields{RateLimiter: NewDefaultRateLimiter()}, 43 | args: args{ 44 | ctx: ctxCanceled(context.TODO()), 45 | event: ListNamespaces, 46 | }, 47 | wantErr: true, 48 | }, 49 | } 50 | for _, tt := range tests { 51 | t.Run(tt.name, func(t *testing.T) { 52 | r := tt.fields.RateLimiter 53 | if err := r.Wait(tt.args.ctx, tt.args.event); (err != nil) != tt.wantErr { 54 | t.Errorf("Wait() error = %v, wantErr %v", err, tt.wantErr) 55 | } 56 | }) 57 | } 58 | } 59 | 60 | func ctxCanceled(ctx context.Context) context.Context { 61 | ret, cancel := context.WithCancel(ctx) 62 | defer cancel() // cancel after function call 63 | return ret 64 | } 65 | -------------------------------------------------------------------------------- /config/manager/manager.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | labels: 5 | control-plane: controller-manager 6 | name: system 7 | --- 8 | apiVersion: apps/v1 9 | kind: Deployment 10 | metadata: 11 | name: controller-manager 12 | namespace: system 13 | labels: 14 | control-plane: controller-manager 15 | spec: 16 | selector: 17 | matchLabels: 18 | control-plane: controller-manager 19 | replicas: 1 20 | template: 21 | metadata: 22 | labels: 23 | control-plane: controller-manager 24 | spec: 25 | securityContext: 26 | runAsNonRoot: true 27 | containers: 28 | - command: 29 | - /manager 30 | args: 31 | - --leader-elect 32 | image: controller:latest 33 | name: manager 34 | securityContext: 35 | allowPrivilegeEscalation: false 36 | livenessProbe: 37 | httpGet: 38 | path: /healthz 39 | port: 8081 40 | initialDelaySeconds: 15 41 | periodSeconds: 20 42 | readinessProbe: 43 | httpGet: 44 | path: /readyz 45 | port: 8081 46 | initialDelaySeconds: 5 47 | periodSeconds: 10 48 | resources: 49 | limits: 50 | cpu: 100m 51 | memory: 30Mi 52 | requests: 53 | cpu: 100m 54 | memory: 20Mi 55 | env: 56 | - name: AWS_REGION 57 | valueFrom: 58 | configMapKeyRef: 59 | name: aws-config 60 | key: AWS_REGION 61 | serviceAccountName: controller-manager 62 | terminationGracePeriodSeconds: 10 63 | -------------------------------------------------------------------------------- /integration/shared/scenarios/runner/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "os" 7 | 8 | "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/integration/shared/scenarios" 9 | "github.com/aws/aws-sdk-go-v2/aws" 10 | "github.com/aws/aws-sdk-go-v2/config" 11 | ) 12 | 13 | func main() { 14 | if len(os.Args) != 10 { 15 | fmt.Println("Expected namespace, service, clusterId, clusterSetId, endpoint port, service port, serviceType, endpoint AddressType, and endpoint IP list as arguments") 16 | os.Exit(1) 17 | } 18 | 19 | nsName := os.Args[1] 20 | svcName := os.Args[2] 21 | clusterId := os.Args[3] 22 | clusterSetId := os.Args[4] 23 | port := os.Args[5] 24 | servicePort := os.Args[6] 25 | serviceType := os.Args[7] 26 | addressType := os.Args[8] 27 | ips := os.Args[9] 28 | 29 | testServiceExport(nsName, svcName, clusterId, clusterSetId, port, servicePort, serviceType, addressType, ips) 30 | } 31 | 32 | func testServiceExport(nsName string, svcName string, clusterId string, clusterSetId string, port string, servicePort string, serviceType string, addressType string, ips string) { 33 | fmt.Printf("Testing service export integration for namespace %s and service %s\n", nsName, svcName) 34 | 35 | export, err := scenarios.NewExportServiceScenario(getAwsConfig(), nsName, svcName, clusterId, clusterSetId, port, servicePort, serviceType, addressType, ips) 36 | if err != nil { 37 | fmt.Printf("Failed to setup service export integration test scenario: %s", err.Error()) 38 | os.Exit(1) 39 | } 40 | 41 | if err := export.Run(); err != nil { 42 | fmt.Printf("Service export integration test scenario failed: %s", err.Error()) 43 | os.Exit(1) 44 | } 45 | } 46 | 47 | func getAwsConfig() *aws.Config { 48 | awsCfg, err := config.LoadDefaultConfig(context.TODO()) 49 | 50 | if err != nil { 51 | fmt.Printf("unable to configure AWS session: %s", err.Error()) 52 | os.Exit(1) 53 | } 54 | 55 | return &awsCfg 56 | } 57 | -------------------------------------------------------------------------------- /pkg/common/errors_test.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "errors" 5 | "testing" 6 | ) 7 | 8 | func TestIsNotFound(t *testing.T) { 9 | type args struct { 10 | err error 11 | } 12 | tests := []struct { 13 | name string 14 | args args 15 | want bool 16 | }{ 17 | { 18 | name: "trueCase", 19 | args: struct{ err error }{err: NotFoundError("1")}, 20 | want: true, 21 | }, 22 | { 23 | name: "falseCase", 24 | args: struct{ err error }{err: errors.New("test")}, 25 | want: false, 26 | }, 27 | } 28 | for _, tt := range tests { 29 | t.Run(tt.name, func(t *testing.T) { 30 | if got := IsNotFound(tt.args.err); got != tt.want { 31 | t.Errorf("IsNotFound() = %v, want %v", got, tt.want) 32 | } 33 | }) 34 | } 35 | } 36 | 37 | func TestIsUnknown(t *testing.T) { 38 | type args struct { 39 | err error 40 | } 41 | tests := []struct { 42 | name string 43 | args args 44 | want bool 45 | }{ 46 | { 47 | name: "trueCase", 48 | args: struct{ err error }{err: errors.New("test")}, 49 | want: true, 50 | }, 51 | { 52 | name: "falseCase", 53 | args: struct{ err error }{err: NotFoundError("1")}, 54 | want: false, 55 | }, 56 | { 57 | name: "nilCase", 58 | args: struct{ err error }{err: nil}, 59 | want: false, 60 | }, 61 | } 62 | for _, tt := range tests { 63 | t.Run(tt.name, func(t *testing.T) { 64 | if got := IsUnknown(tt.args.err); got != tt.want { 65 | t.Errorf("IsUnknown() = %v, want %v", got, tt.want) 66 | } 67 | }) 68 | } 69 | } 70 | 71 | func TestNotFoundError(t *testing.T) { 72 | tests := []struct { 73 | name string 74 | arg string 75 | }{ 76 | { 77 | name: "happyCase", 78 | arg: "arg", 79 | }, 80 | } 81 | for _, tt := range tests { 82 | t.Run(tt.name, func(t *testing.T) { 83 | if err := NotFoundError(tt.arg); !IsNotFound(err) { 84 | t.Errorf("NotFoundError() error = %v, containsErr = %v", err, notFound) 85 | } 86 | }) 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /pkg/apis/about/v1alpha1/clusterproperty_types.go: -------------------------------------------------------------------------------- 1 | package v1alpha1 2 | 3 | import ( 4 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 5 | ) 6 | 7 | // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. 8 | 9 | // ClusterPropertySpec defines the desired state of ClusterProperty 10 | type ClusterPropertySpec struct { 11 | // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster 12 | // Important: Run "make" to regenerate code after modifying this file 13 | 14 | // ClusterProperty value 15 | // +kubebuilder:validation:Maxlength=128000 16 | // +kubebuilder:validation:MinLength=1 17 | Value string `json:"value"` 18 | } 19 | 20 | // ClusterPropertyStatus defines the observed state of ClusterProperty 21 | type ClusterPropertyStatus struct { 22 | // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster 23 | // Important: Run "make" to regenerate code after modifying this file 24 | } 25 | 26 | //+kubebuilder:object:root=true 27 | //+kubebuilder:subresource:status 28 | //+kubebuilder:resource:scope=Cluster 29 | 30 | // ClusterProperty is the Schema for the clusterproperties API 31 | // +kubebuilder:printcolumn:name="value",type=string,JSONPath=`.spec.value` 32 | // +kubebuilder:printcolumn:name="age",type=date,JSONPath=`.metadata.creationTimestamp` 33 | type ClusterProperty struct { 34 | metav1.TypeMeta `json:",inline"` 35 | metav1.ObjectMeta `json:"metadata,omitempty"` 36 | 37 | Spec ClusterPropertySpec `json:"spec,omitempty"` 38 | Status ClusterPropertyStatus `json:"status,omitempty"` 39 | } 40 | 41 | //+kubebuilder:object:root=true 42 | 43 | // ClusterPropertyList contains a list of ClusterProperty 44 | type ClusterPropertyList struct { 45 | metav1.TypeMeta `json:",inline"` 46 | metav1.ListMeta `json:"metadata,omitempty"` 47 | Items []ClusterProperty `json:"items"` 48 | } 49 | 50 | func init() { 51 | SchemeBuilder.Register(&ClusterProperty{}, &ClusterPropertyList{}) 52 | } 53 | -------------------------------------------------------------------------------- /integration/janitor/api_test.go: -------------------------------------------------------------------------------- 1 | package janitor 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | janitorMock "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/mocks/integration/janitor" 8 | "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/test" 9 | "github.com/aws/aws-sdk-go-v2/aws" 10 | sd "github.com/aws/aws-sdk-go-v2/service/servicediscovery" 11 | "github.com/golang/mock/gomock" 12 | "github.com/stretchr/testify/assert" 13 | ) 14 | 15 | func TestNewServiceDiscoveryJanitorApiFromConfig(t *testing.T) { 16 | assert.NotNil(t, NewServiceDiscoveryJanitorApiFromConfig(&aws.Config{})) 17 | } 18 | 19 | func TestServiceDiscoveryJanitorApi_DeleteNamespace_HappyCase(t *testing.T) { 20 | mockController := gomock.NewController(t) 21 | defer mockController.Finish() 22 | 23 | mocksdk := janitorMock.NewMockSdkJanitorFacade(mockController) 24 | jApi := getJanitorApi(mocksdk) 25 | 26 | mocksdk.EXPECT().DeleteNamespace(context.TODO(), &sd.DeleteNamespaceInput{Id: aws.String(test.HttpNsId)}). 27 | Return(&sd.DeleteNamespaceOutput{OperationId: aws.String(test.OpId1)}, nil) 28 | 29 | opId, err := jApi.DeleteNamespace(context.TODO(), test.HttpNsId) 30 | assert.Nil(t, err, "No error for happy case") 31 | assert.Equal(t, test.OpId1, opId) 32 | } 33 | 34 | func TestServiceDiscoveryJanitorApi_DeleteService_HappyCase(t *testing.T) { 35 | mockController := gomock.NewController(t) 36 | defer mockController.Finish() 37 | 38 | mocksdk := janitorMock.NewMockSdkJanitorFacade(mockController) 39 | jApi := getJanitorApi(mocksdk) 40 | 41 | mocksdk.EXPECT().DeleteService(context.TODO(), &sd.DeleteServiceInput{Id: aws.String(test.SvcId)}). 42 | Return(&sd.DeleteServiceOutput{}, nil) 43 | 44 | err := jApi.DeleteService(context.TODO(), test.SvcId) 45 | assert.Nil(t, err, "No error for happy case") 46 | } 47 | 48 | func getJanitorApi(sdk *janitorMock.MockSdkJanitorFacade) ServiceDiscoveryJanitorApi { 49 | return &serviceDiscoveryJanitorApi{ 50 | janitorFacade: sdk, 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /integration/eks-test/scripts/eks-cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Cleanup EKS cluster used for integration test. 4 | 5 | source ./integration/eks-test/scripts/eks-common.sh 6 | 7 | # Delete service and namespace from export and import cluster 8 | $KUBECTL_BIN config use-context $EXPORT_CLS 9 | $KUBECTL_BIN delete svc $SERVICE -n $NAMESPACE 10 | 11 | # Verfication to check if there are hanging ServiceExport or ServiceImport CRDs and clears the finalizers to allow cleanup process to continue 12 | for CRD in $($KUBECTL_BIN get crd -n $NAMESPACE | grep multicluster | cut -d " " -f 1 | xargs); do 13 | $KUBECTL_BIN patch crd -n $NAMESPACE $CRD --type merge -p '{"metadata":{"finalizers": [null]}}'; 14 | $KUBECTL_BIN delete crd $CRD -n $NAMESPACE # CRD needs to be explictly deleted in order to ensure zero resources are hanging for future tests 15 | done 16 | 17 | $KUBECTL_BIN delete namespaces $NAMESPACE 18 | 19 | # IAM Service Account needs to be explictly deleted, as not doing so creates hanging service accounts that cause permissions issues in future tests 20 | eksctl delete iamserviceaccount \ 21 | --name cloud-map-mcs-controller-manager \ 22 | --namespace $MCS_NAMESPACE \ 23 | --cluster $EXPORT_CLS \ 24 | --wait 25 | 26 | $KUBECTL_BIN config use-context $IMPORT_CLS 27 | $KUBECTL_BIN delete pod $CLIENT_POD -n $NAMESPACE 28 | $KUBECTL_BIN delete namespaces $NAMESPACE 29 | eksctl delete iamserviceaccount \ 30 | --name cloud-map-mcs-controller-manager \ 31 | --namespace $MCS_NAMESPACE \ 32 | --cluster $IMPORT_CLS \ 33 | --wait 34 | 35 | $KUBECTL_BIN config use-context $EXPORT_CLS 36 | $KUBECTL_BIN delete -k "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/config/controller_install_latest" 37 | $KUBECTL_BIN config use-context $IMPORT_CLS 38 | $KUBECTL_BIN delete -k "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/config/controller_install_latest" 39 | 40 | echo "EKS clusters cleaned!" 41 | 42 | # ./integration/shared/scripts/cleanup-cloudmap.sh 43 | go run ./integration/janitor/runner/main.go "$NAMESPACE" "$CLUSTERID1" "$CLUSTERSETID1" 44 | go run ./integration/janitor/runner/main.go "$NAMESPACE" "$CLUSTERID2" "$CLUSTERSETID1" 45 | -------------------------------------------------------------------------------- /pkg/model/plan_test.go: -------------------------------------------------------------------------------- 1 | package model 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | ) 7 | 8 | func TestPlan_CalculateChanges(t *testing.T) { 9 | type fields struct { 10 | Current []*Endpoint 11 | Desired []*Endpoint 12 | } 13 | tests := []struct { 14 | name string 15 | fields fields 16 | want Changes 17 | }{ 18 | { 19 | name: "No changes", 20 | fields: fields{ 21 | Current: []*Endpoint{{Id: "inst-1"}}, 22 | Desired: []*Endpoint{{Id: "inst-1"}}, 23 | }, 24 | want: Changes{}, 25 | }, 26 | { 27 | name: "New endpoint added", 28 | fields: fields{ 29 | Current: []*Endpoint{{Id: "inst-1"}}, 30 | Desired: []*Endpoint{{Id: "inst-1"}, {Id: "inst-2"}}, 31 | }, 32 | want: Changes{ 33 | Create: []*Endpoint{{Id: "inst-2"}}, 34 | }, 35 | }, 36 | { 37 | name: "Endpoint deleted", 38 | fields: fields{ 39 | Current: []*Endpoint{{Id: "inst-1"}, {Id: "inst-2"}}, 40 | Desired: []*Endpoint{{Id: "inst-1"}}, 41 | }, 42 | want: Changes{ 43 | Delete: []*Endpoint{{Id: "inst-2"}}, 44 | }, 45 | }, 46 | { 47 | name: "Endpoint updated", 48 | fields: fields{ 49 | Current: []*Endpoint{{Id: "inst-1", IP: "1.1.1.1"}}, 50 | Desired: []*Endpoint{{Id: "inst-1", IP: "1.1.1.2"}}, 51 | }, 52 | want: Changes{ 53 | Update: []*Endpoint{{Id: "inst-1", IP: "1.1.1.2"}}, 54 | }, 55 | }, 56 | { 57 | name: "Endpoint added/deleted/updated at the same time", 58 | fields: fields{ 59 | Current: []*Endpoint{{Id: "inst-1", IP: "1.1.1.1"}, {Id: "inst-2", IP: "1.1.1.2"}}, 60 | Desired: []*Endpoint{{Id: "inst-3", IP: "1.1.1.3"}, {Id: "inst-2", IP: "2.2.2.2"}}, 61 | }, 62 | want: Changes{ 63 | Delete: []*Endpoint{{Id: "inst-1", IP: "1.1.1.1"}}, 64 | Create: []*Endpoint{{Id: "inst-3", IP: "1.1.1.3"}}, 65 | Update: []*Endpoint{{Id: "inst-2", IP: "2.2.2.2"}}, 66 | }, 67 | }, 68 | } 69 | for _, tt := range tests { 70 | t.Run(tt.name, func(t *testing.T) { 71 | p := &Plan{ 72 | Current: tt.fields.Current, 73 | Desired: tt.fields.Desired, 74 | } 75 | if got := p.CalculateChanges(); !reflect.DeepEqual(got, tt.want) { 76 | t.Errorf("CalculateChanges() = %v, want %v", got, tt.want) 77 | } 78 | }) 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /pkg/common/ratelimiter.go: -------------------------------------------------------------------------------- 1 | package common 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "golang.org/x/time/rate" 8 | ) 9 | 10 | const ( 11 | ListNamespaces Event = "ListNamespaces" 12 | ListServices Event = "ListServices" 13 | GetOperation Event = "GetOperation" 14 | DiscoverInstances Event = "DiscoverInstances" 15 | CreateHttpNamespace Event = "CreateHttpNamespace" 16 | CreateService Event = "CreateService" 17 | RegisterInstance Event = "RegisterInstance" 18 | DeregisterInstance Event = "DeregisterInstance" 19 | ) 20 | 21 | type Event string 22 | 23 | type RateLimiter struct { 24 | rateLimiters map[Event]*rate.Limiter 25 | } 26 | 27 | // NewDefaultRateLimiter returns the rate limiters with the default limits for the AWS CloudMap's API calls 28 | func NewDefaultRateLimiter() RateLimiter { 29 | return RateLimiter{rateLimiters: map[Event]*rate.Limiter{ 30 | // Below are the default limits for the AWS CloudMap's APIs 31 | // TODO: make it customizable in the future 32 | ListNamespaces: rate.NewLimiter(rate.Limit(0.5), 5), // 1 ListNamespaces API calls per second 33 | ListServices: rate.NewLimiter(rate.Limit(2), 10), // 2 ListServices API calls per second 34 | GetOperation: rate.NewLimiter(rate.Limit(100), 200), // 100 GetOperation API calls per second 35 | DiscoverInstances: rate.NewLimiter(rate.Limit(500), 1000), // 500 DiscoverInstances API calls per second 36 | CreateHttpNamespace: rate.NewLimiter(rate.Limit(0.5), 5), // 1 CreateHttpNamespace API calls per second 37 | CreateService: rate.NewLimiter(rate.Limit(5), 50), // 5 CreateService API calls per second 38 | RegisterInstance: rate.NewLimiter(rate.Limit(50), 100), // 50 RegisterInstance API calls per second 39 | DeregisterInstance: rate.NewLimiter(rate.Limit(50), 100), // 50 DeregisterInstance API calls per second 40 | }} 41 | } 42 | 43 | // Wait blocks until limit permits an event to happen. It returns an error if the Context is canceled, or the expected wait time exceeds the Context's Deadline. 44 | func (r RateLimiter) Wait(ctx context.Context, event Event) error { 45 | if limiter, ok := r.rateLimiters[event]; ok { 46 | return limiter.Wait(ctx) 47 | } 48 | return fmt.Errorf("event %s not found in the list of limiters", event) 49 | } 50 | -------------------------------------------------------------------------------- /config/crd/bases/about.k8s.io_clusterproperties.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | controller-gen.kubebuilder.io/version: v0.9.2 7 | creationTimestamp: null 8 | name: clusterproperties.about.k8s.io 9 | spec: 10 | group: about.k8s.io 11 | names: 12 | kind: ClusterProperty 13 | listKind: ClusterPropertyList 14 | plural: clusterproperties 15 | singular: clusterproperty 16 | scope: Cluster 17 | versions: 18 | - additionalPrinterColumns: 19 | - jsonPath: .spec.value 20 | name: value 21 | type: string 22 | - jsonPath: .metadata.creationTimestamp 23 | name: age 24 | type: date 25 | name: v1alpha1 26 | schema: 27 | openAPIV3Schema: 28 | description: ClusterProperty is the Schema for the clusterproperties API 29 | properties: 30 | apiVersion: 31 | description: 'APIVersion defines the versioned schema of this representation 32 | of an object. Servers should convert recognized schemas to the latest 33 | internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' 34 | type: string 35 | kind: 36 | description: 'Kind is a string value representing the REST resource this 37 | object represents. Servers may infer this from the endpoint the client 38 | submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' 39 | type: string 40 | metadata: 41 | type: object 42 | spec: 43 | description: ClusterPropertySpec defines the desired state of ClusterProperty 44 | properties: 45 | value: 46 | description: ClusterProperty value 47 | minLength: 1 48 | type: string 49 | required: 50 | - value 51 | type: object 52 | status: 53 | description: ClusterPropertyStatus defines the observed state of ClusterProperty 54 | type: object 55 | type: object 56 | served: true 57 | storage: true 58 | subresources: 59 | status: {} 60 | -------------------------------------------------------------------------------- /pkg/apis/multicluster/v1alpha1/serviceexport_types.go: -------------------------------------------------------------------------------- 1 | package v1alpha1 2 | 3 | import ( 4 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 5 | ) 6 | 7 | // +genclient 8 | // +kubebuilder:object:root=true 9 | 10 | // ServiceExport declares that the Service with the same name and namespace 11 | // as this export should be consumable from other clusters. 12 | type ServiceExport struct { 13 | metav1.TypeMeta `json:",inline"` 14 | // +optional 15 | metav1.ObjectMeta `json:"metadata,omitempty"` 16 | // status describes the current state of an exported service. 17 | // Service configuration comes from the Service that had the same 18 | // name and namespace as this ServiceExport. 19 | // Populated by the multi-cluster service implementation's controller. 20 | // +optional 21 | Status ServiceExportStatus `json:"status,omitempty"` 22 | } 23 | 24 | // ServiceExportStatus contains the current status of an export. 25 | type ServiceExportStatus struct { 26 | // +optional 27 | // +patchStrategy=merge 28 | // +patchMergeKey=type 29 | // +listType=map 30 | // +listMapKey=type 31 | Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` 32 | } 33 | 34 | // ServiceExportConditionType identifies a specific condition. 35 | type ServiceExportConditionType string 36 | 37 | const ( 38 | // ServiceExportValid means that the service referenced by this 39 | // service export has been recognized as valid by an mcs-controller. 40 | // This will be false if the service is found to be unexportable 41 | // (ExternalName, not found). 42 | ServiceExportValid ServiceExportConditionType = "Valid" 43 | // ServiceExportConflict means that there is a conflict between two 44 | // exports for the same Service. When "True", the condition message 45 | // should contain enough information to diagnose the conflict: 46 | // field(s) under contention, which cluster won, and why. 47 | // Users should not expect detailed per-cluster information in the 48 | // conflict message. 49 | ServiceExportConflict ServiceExportConditionType = "Conflict" 50 | ) 51 | 52 | // +kubebuilder:object:root=true 53 | 54 | // ServiceExportList represents a list of endpoint slices 55 | type ServiceExportList struct { 56 | metav1.TypeMeta `json:",inline"` 57 | // Standard list metadata. 58 | // +optional 59 | metav1.ListMeta `json:"metadata,omitempty"` 60 | // List of endpoint slices 61 | // +listType=set 62 | Items []ServiceExport `json:"items"` 63 | } 64 | 65 | func init() { 66 | SchemeBuilder.Register(&ServiceExport{}, &ServiceExportList{}) 67 | } 68 | -------------------------------------------------------------------------------- /pkg/model/cluster.go: -------------------------------------------------------------------------------- 1 | package model 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | aboutv1alpha1 "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/apis/about/v1alpha1" 8 | 9 | "sigs.k8s.io/controller-runtime/pkg/client" 10 | ) 11 | 12 | const ( 13 | ClusterIdPropertyName = "cluster.clusterset.k8s.io" 14 | ClusterSetIdPropertyName = "clusterset.k8s.io" 15 | ) 16 | 17 | // Non-exported type, accessible via read-only func 18 | type clusterProperties struct { 19 | clusterId string 20 | clusterSetId string 21 | } 22 | 23 | func (r clusterProperties) ClusterId() string { 24 | return r.clusterId 25 | } 26 | 27 | func (r clusterProperties) ClusterSetId() string { 28 | return r.clusterSetId 29 | } 30 | 31 | func (r clusterProperties) IsValid() bool { 32 | return r.clusterSetId != "" && r.clusterId != "" 33 | } 34 | 35 | func (r clusterProperties) String() string { 36 | return fmt.Sprintf("ClusterId: %s, ClusterSetId: %s", r.clusterId, r.clusterSetId) 37 | } 38 | 39 | // ClusterUtils provides utility functions for working with clusters 40 | type ClusterUtils struct { 41 | client client.Client 42 | clusterProperties clusterProperties 43 | } 44 | 45 | func NewClusterUtils(client client.Client) ClusterUtils { 46 | return ClusterUtils{ 47 | client: client, 48 | clusterProperties: clusterProperties{}, 49 | } 50 | } 51 | 52 | func NewClusterUtilsWithValues(clusterId string, clusterSetId string) ClusterUtils { 53 | return ClusterUtils{ 54 | clusterProperties: clusterProperties{clusterId: clusterId, clusterSetId: clusterSetId}, 55 | } 56 | } 57 | 58 | func (r *ClusterUtils) GetClusterProperties(ctx context.Context) (*clusterProperties, error) { 59 | if !r.clusterProperties.IsValid() { 60 | err := r.LoadClusterProperties(ctx) 61 | if err != nil { 62 | return nil, err 63 | } 64 | } 65 | return &r.clusterProperties, nil 66 | } 67 | 68 | func (r *ClusterUtils) LoadClusterProperties(ctx context.Context) error { 69 | clusterPropertyList := &aboutv1alpha1.ClusterPropertyList{} 70 | err := r.client.List(ctx, clusterPropertyList) 71 | if err != nil { 72 | return err 73 | } 74 | for _, clusterProperty := range clusterPropertyList.Items { 75 | switch clusterProperty.Name { 76 | case ClusterIdPropertyName: 77 | r.clusterProperties.clusterId = clusterProperty.Spec.Value 78 | case ClusterSetIdPropertyName: 79 | r.clusterProperties.clusterSetId = clusterProperty.Spec.Value 80 | } 81 | } 82 | if !r.clusterProperties.IsValid() { 83 | return fmt.Errorf("ClusterProperty not found: %s", r.clusterProperties) 84 | } 85 | return nil 86 | } 87 | -------------------------------------------------------------------------------- /integration/kind-test/scripts/dns-test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # If the IP Type env var is not set, default it to IPv4 4 | if [[ -z "${ADDRESS_TYPE}" ]]; then 5 | ADDRESS_TYPE="IPv4" 6 | fi 7 | 8 | # Helper function to verify DNS results 9 | checkDNS() { 10 | dns_addresses_count=$(echo "$1" | wc -l | xargs) 11 | 12 | if [ "$SERVICE_TYPE" = "Headless" ]; then 13 | if [ "$dns_addresses_count" -ne "$expected_endpoint_count" ]; then 14 | echo "ERROR: Found $dns_addresses_count endpoints, expected $expected_endpoint_count endpoints" 15 | exit 1 16 | fi 17 | fi 18 | 19 | if [ "$SERVICE_TYPE" = "ClusterSetIP" ]; then 20 | if [ "$dns_addresses_count" -ne 1 ]; then 21 | echo "ERROR: Found $dns_addresses_count endpoints, expected 1 endpoint" 22 | exit 1 23 | fi 24 | fi 25 | } 26 | 27 | # Testing service consumption with dnsutils pod 28 | 29 | echo "verifying dns resolution..." 30 | 31 | expected_endpoint_count=$1 32 | 33 | # Install dnsutils pod 34 | $KUBECTL_BIN apply -f "$KIND_CONFIGS/dnsutils-pod.yaml" 35 | $KUBECTL_BIN wait --for=condition=ready pod/dnsutils # wait until pod is deployed 36 | 37 | # Perform a dig to cluster-local CoreDNS 38 | # TODO: parse dig outputs for more precise verification - check specifics IPs? 39 | if [[ $ADDRESS_TYPE == "IPv4" ]]; then 40 | echo "performing dig for A records for IPv4..." 41 | addresses=$($KUBECTL_BIN exec dnsutils -- dig +all +ans $SERVICE.$NAMESPACE.svc.clusterset.local +short) 42 | exit_code=$? 43 | echo "$addresses" 44 | elif [[ $ADDRESS_TYPE == "IPv6" ]]; then 45 | echo "performing dig for AAAA records for IPv6..." 46 | addresses=$($KUBECTL_BIN exec dnsutils -- dig AAAA +all +ans $SERVICE.$NAMESPACE.svc.clusterset.local +short) 47 | exit_code=$? 48 | echo "$addresses" 49 | else 50 | echo "ADDRESS_TYPE invalid" 51 | exit 1 52 | fi 53 | 54 | if [ "$exit_code" -ne 0 ]; then 55 | echo "ERROR: Unable to dig service $SERVICE.$NAMESPACE.svc.clusterset.local" 56 | exit $exit_code 57 | fi 58 | 59 | # verify DNS results 60 | checkDNS "$addresses" 61 | 62 | echo "performing dig for SRV records..." 63 | addresses=$($KUBECTL_BIN exec dnsutils -- dig +all +ans $SERVICE.$NAMESPACE.svc.clusterset.local. SRV +short) 64 | exit_code=$? 65 | echo "$addresses" 66 | 67 | if [ "$exit_code" -ne 0 ]; then 68 | echo "ERROR: Unable to dig service $SERVICE.$NAMESPACE.svc.clusterset.local" 69 | exit $exit_code 70 | fi 71 | 72 | # verify DNS results 73 | checkDNS "$addresses" 74 | 75 | echo "confirmed dns resolution" 76 | exit 0 77 | -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | # For most projects, this workflow file will not need changing; you simply need 2 | # to commit it to your repository. 3 | # 4 | # You may wish to alter this file to override the set of languages analyzed, 5 | # or to provide custom queries or build logic. 6 | # 7 | # ******** NOTE ******** 8 | # We have attempted to detect the languages in your repository. Please check 9 | # the `language` matrix defined below to confirm you have the correct set of 10 | # supported CodeQL languages. 11 | # 12 | name: "CodeQL" 13 | 14 | on: 15 | push: 16 | branches: [ main ] 17 | pull_request: 18 | # The branches below must be a subset of the branches above 19 | branches: [ main ] 20 | schedule: 21 | - cron: '35 16 * * 5' 22 | 23 | jobs: 24 | analyze: 25 | name: Analyze 26 | runs-on: ubuntu-latest 27 | permissions: 28 | actions: read 29 | contents: read 30 | security-events: write 31 | 32 | strategy: 33 | fail-fast: false 34 | matrix: 35 | language: [ 'go' ] 36 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ] 37 | # Learn more: 38 | # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed 39 | 40 | steps: 41 | - name: Checkout repository 42 | uses: actions/checkout@v4 43 | 44 | # Initializes the CodeQL tools for scanning. 45 | - name: Initialize CodeQL 46 | uses: github/codeql-action/init@v3 47 | with: 48 | languages: ${{ matrix.language }} 49 | # If you wish to specify custom queries, you can do so here or in a config file. 50 | # By default, queries listed here will override any specified in a config file. 51 | # Prefix the list here with "+" to use these queries and those in the config file. 52 | # queries: ./path/to/local/query, your-org/your-repo/queries@main 53 | 54 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). 55 | # If this step fails, then you should remove it and run the build manually (see below) 56 | - name: Autobuild 57 | uses: github/codeql-action/autobuild@v3 58 | 59 | # ℹ️ Command-line programs to run using the OS shell. 60 | # 📚 https://git.io/JvXDl 61 | 62 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines 63 | # and modify them (or add more) to build your code if your project 64 | # uses a compiled language 65 | 66 | #- run: | 67 | # make bootstrap 68 | # make release 69 | 70 | - name: Perform CodeQL Analysis 71 | uses: github/codeql-action/analyze@v3 72 | -------------------------------------------------------------------------------- /integration/eks-test/scripts/eks-run-tests.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Runs the AWS Cloud Map MCS Controller for K8s in EKS clusters and test services has been exported from one cluster and imported from the other 4 | 5 | source ./integration/eks-test/scripts/eks-common.sh 6 | 7 | # Checking expected endpoints number in exporting cluster 8 | $KUBECTL_BIN config use-context $EXPORT_CLS 9 | if ! endpts=$(./integration/shared/scripts/poll-endpoints.sh "$EXPECTED_ENDPOINT_COUNT"); then 10 | exit $? 11 | fi 12 | 13 | # Runner to verify expected endpoints are exported to Cloud Map 14 | go run $SCENARIOS/runner/main.go $NAMESPACE $SERVICE $CLUSTERID1 $CLUSTERSETID1 $ENDPT_PORT $SERVICE_PORT $SERVICE_TYPE "$endpts" 15 | exit_code=$? 16 | 17 | # Check imported endpoints in importing cluster 18 | if [ "$exit_code" -eq 0 ] ; then 19 | $KUBECTL_BIN config use-context $IMPORT_CLS 20 | ./integration/shared/scripts/test-import.sh "$EXPECTED_ENDPOINT_COUNT" "$endpts" 21 | exit_code=$? 22 | fi 23 | 24 | # Verifying that importing cluster is properly consuming services 25 | if [ "$exit_code" -eq 0 ] ; then 26 | ./integration/eks-test/scripts/eks-DNS-test.sh 27 | exit_code=$? 28 | fi 29 | 30 | echo "sleeping..." 31 | sleep 2 32 | 33 | # Scaling and verifying deployment 34 | if [ "$exit_code" -eq 0 ] ; then 35 | $KUBECTL_BIN config use-context $EXPORT_CLS 36 | deployment=$($KUBECTL_BIN get deployment --namespace "$NAMESPACE" -o json | jq -r '.items[0].metadata.name') 37 | 38 | echo "scaling the deployment $deployment to $UPDATED_ENDPOINT_COUNT" 39 | $KUBECTL_BIN scale deployment/"$deployment" --replicas="$UPDATED_ENDPOINT_COUNT" --namespace "$NAMESPACE" 40 | exit_code=$? 41 | fi 42 | 43 | if [ "$exit_code" -eq 0 ] ; then 44 | if ! updated_endpoints=$(./integration/shared/scripts/poll-endpoints.sh "$UPDATED_ENDPOINT_COUNT") ; then 45 | exit $? 46 | fi 47 | fi 48 | 49 | if [ "$exit_code" -eq 0 ] ; then 50 | go run $SCENARIOS/runner/main.go $NAMESPACE $SERVICE $CLUSTERID1 $CLUSTERSETID1 $ENDPT_PORT $SERVICE_PORT $SERVICE_TYPE "$updated_endpoints" 51 | exit_code=$? 52 | fi 53 | 54 | if [ "$exit_code" -eq 0 ] ; then 55 | $KUBECTL_BIN config use-context $IMPORT_CLS 56 | ./integration/shared/scripts/test-import.sh "$UPDATED_ENDPOINT_COUNT" "$updated_endpoints" 57 | exit_code=$? 58 | fi 59 | 60 | if [ "$exit_code" -eq 0 ] ; then 61 | ./integration/eks-test/scripts/eks-DNS-test.sh 62 | exit_code=$? 63 | fi 64 | 65 | 66 | # Dump logs 67 | mkdir -p "$LOGS" 68 | $KUBECTL_BIN config use-context $EXPORT_CLS 69 | $KUBECTL_BIN logs -l control-plane=controller-manager -c manager --namespace $MCS_NAMESPACE &> "$LOGS/ctl-1.log" 70 | $KUBECTL_BIN config use-context $IMPORT_CLS 71 | $KUBECTL_BIN logs -l control-plane=controller-manager -c manager --namespace $MCS_NAMESPACE &> "$LOGS/ctl-2.log" 72 | echo "dumped logs" 73 | 74 | exit $exit_code 75 | -------------------------------------------------------------------------------- /config/default/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # Adds namespace to all resources. 2 | namespace: cloud-map-mcs-system 3 | 4 | # Value of this field is prepended to the 5 | # names of all resources, e.g. a deployment named 6 | # "wordpress" becomes "alices-wordpress". 7 | # Note that it should also match with the prefix (text before '-') of the namespace 8 | # field above. 9 | namePrefix: cloud-map-mcs- 10 | 11 | # Labels to add to all resources and selectors. 12 | #commonLabels: 13 | # someName: someValue 14 | 15 | bases: 16 | - ../crd 17 | - ../rbac 18 | - ../manager 19 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in 20 | # crd/kustomization.yaml 21 | #- ../webhook 22 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. 23 | #- ../certmanager 24 | # [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. 25 | #- ../prometheus 26 | 27 | patchesStrategicMerge: 28 | # Protect the /metrics endpoint by putting it behind auth. 29 | # If you want your controller-manager to expose the /metrics 30 | # endpoint w/o any authn/z, please comment the following line. 31 | - manager_auth_proxy_patch.yaml 32 | 33 | # Mount the controller config file for loading manager configurations 34 | # through a ComponentConfig type 35 | #- manager_config_patch.yaml 36 | 37 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in 38 | # crd/kustomization.yaml 39 | #- manager_webhook_patch.yaml 40 | 41 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 42 | # Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. 43 | # 'CERTMANAGER' needs to be enabled to use ca injection 44 | #- webhookcainjection_patch.yaml 45 | 46 | # the following config is for teaching kustomize how to do var substitution 47 | vars: 48 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. 49 | #- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR 50 | # objref: 51 | # kind: Certificate 52 | # group: cert-manager.io 53 | # version: v1 54 | # name: serving-cert # this name should match the one in certificate.yaml 55 | # fieldref: 56 | # fieldpath: metadata.namespace 57 | #- name: CERTIFICATE_NAME 58 | # objref: 59 | # kind: Certificate 60 | # group: cert-manager.io 61 | # version: v1 62 | # name: serving-cert # this name should match the one in certificate.yaml 63 | #- name: SERVICE_NAMESPACE # namespace of the service 64 | # objref: 65 | # kind: Service 66 | # version: v1 67 | # name: webhook-service 68 | # fieldref: 69 | # fieldpath: metadata.namespace 70 | #- name: SERVICE_NAME 71 | # objref: 72 | # kind: Service 73 | # version: v1 74 | # name: webhook-service 75 | -------------------------------------------------------------------------------- /integration/kind-test/scripts/run-tests.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Runs the AWS Cloud Map MCS Controller for K8s as a background process and tests services have been exported 4 | 5 | source ./integration/kind-test/scripts/common.sh 6 | export SERVICE=$1 7 | export SERVICE_TYPE=$2 8 | export IP_TYPE=$3 9 | 10 | # Deploy pods 11 | $KUBECTL_BIN apply -f "$KIND_CONFIGS/e2e-deployment.yaml" 12 | # Get deployment 13 | deployment=$($KUBECTL_BIN get deployment --namespace "$NAMESPACE" -o json | jq -r '.items[0].metadata.name') 14 | 15 | printf "\n***Testing Service: $SERVICE***\n" 16 | 17 | $KUBECTL_BIN apply -f "$KIND_CONFIGS/$SERVICE.yaml" 18 | 19 | if ! endpts=$(./integration/shared/scripts/poll-endpoints.sh "$EXPECTED_ENDPOINT_COUNT") ; then 20 | exit $? 21 | fi 22 | 23 | mkdir -p "$LOGS" 24 | ./bin/manager --zap-devel=true --zap-time-encoding=rfc3339 &> "$LOGS/ctl.log" & 25 | CTL_PID=$! 26 | echo "controller PID:$CTL_PID" 27 | 28 | go run $SCENARIOS/runner/main.go $NAMESPACE $SERVICE $CLUSTERID1 $CLUSTERSETID1 $ENDPT_PORT $SERVICE_PORT $SERVICE_TYPE $IP_TYPE "$endpts" 29 | exit_code=$? 30 | 31 | if [ "$exit_code" -eq 0 ] ; then 32 | ./integration/shared/scripts/test-import.sh "$EXPECTED_ENDPOINT_COUNT" "$endpts" 33 | exit_code=$? 34 | fi 35 | 36 | if [ "$exit_code" -eq 0 ] ; then 37 | ./integration/kind-test/scripts/dns-test.sh "$EXPECTED_ENDPOINT_COUNT" 38 | exit_code=$? 39 | fi 40 | 41 | if [ "$exit_code" -eq 0 ] ; then 42 | ./integration/kind-test/scripts/curl-test.sh "$deployment" 43 | exit_code=$? 44 | fi 45 | 46 | echo "sleeping..." 47 | sleep 2 48 | 49 | if [ "$exit_code" -eq 0 ] ; then 50 | echo "scaling the deployment $deployment to $UPDATED_ENDPOINT_COUNT" 51 | $KUBECTL_BIN scale deployment/"$deployment" --replicas="$UPDATED_ENDPOINT_COUNT" --namespace "$NAMESPACE" 52 | exit_code=$? 53 | fi 54 | 55 | if [ "$exit_code" -eq 0 ] ; then 56 | if ! updated_endpoints=$(./integration/shared/scripts/poll-endpoints.sh "$UPDATED_ENDPOINT_COUNT") ; then 57 | exit $? 58 | fi 59 | fi 60 | 61 | if [ "$exit_code" -eq 0 ] ; then 62 | go run $SCENARIOS/runner/main.go $NAMESPACE $SERVICE $CLUSTERID1 $CLUSTERSETID1 $ENDPT_PORT $SERVICE_PORT $SERVICE_TYPE $IP_TYPE "$updated_endpoints" 63 | exit_code=$? 64 | fi 65 | 66 | if [ "$exit_code" -eq 0 ] ; then 67 | ./integration/shared/scripts/test-import.sh "$UPDATED_ENDPOINT_COUNT" "$updated_endpoints" 68 | exit_code=$? 69 | fi 70 | 71 | if [ "$exit_code" -eq 0 ] ; then 72 | ./integration/kind-test/scripts/dns-test.sh "$UPDATED_ENDPOINT_COUNT" 73 | exit_code=$? 74 | fi 75 | 76 | echo "Test Successful. Cleaning up..." 77 | 78 | # Remove the deployment and delete service (should also delete ServiceExport) 79 | if [ "$exit_code" -eq 0 ] ; then 80 | $KUBECTL_BIN delete -f "$KIND_CONFIGS/e2e-deployment.yaml" 81 | $KUBECTL_BIN delete Service $SERVICE -n $NAMESPACE 82 | # TODO: verify service export is not found 83 | # TODO: verify cloudmap resources are cleaned up 84 | fi 85 | 86 | echo "killing controller PID:$CTL_PID" 87 | kill $CTL_PID 88 | exit $exit_code 89 | -------------------------------------------------------------------------------- /pkg/cloudmap/aws_facade.go: -------------------------------------------------------------------------------- 1 | package cloudmap 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/version" 7 | "github.com/aws/aws-sdk-go-v2/aws/middleware" 8 | 9 | "github.com/aws/aws-sdk-go-v2/aws" 10 | sd "github.com/aws/aws-sdk-go-v2/service/servicediscovery" 11 | ) 12 | 13 | // AwsFacade wraps the minimal surface area of ServiceDiscovery API calls for the AWS SDK 14 | // required by the AWS Cloud Map client. This enables mock generation for unit testing. 15 | type AwsFacade interface { 16 | // ListNamespaces provides ServiceDiscovery ListNamespaces wrapper interface for paginator. 17 | ListNamespaces(context.Context, *sd.ListNamespacesInput, ...func(*sd.Options)) (*sd.ListNamespacesOutput, error) 18 | 19 | // ListServices provides ServiceDiscovery ListServices wrapper interface for paginator. 20 | ListServices(context.Context, *sd.ListServicesInput, ...func(options *sd.Options)) (*sd.ListServicesOutput, error) 21 | 22 | // ListOperations provides ServiceDiscovery ListOperations wrapper interface for paginator. 23 | ListOperations(context.Context, *sd.ListOperationsInput, ...func(*sd.Options)) (*sd.ListOperationsOutput, error) 24 | 25 | // GetOperation provides ServiceDiscovery GetOperation wrapper interface. 26 | GetOperation(context.Context, *sd.GetOperationInput, ...func(*sd.Options)) (*sd.GetOperationOutput, error) 27 | 28 | // CreateHttpNamespace provides ServiceDiscovery CreateHttpNamespace wrapper interface. 29 | CreateHttpNamespace(context.Context, *sd.CreateHttpNamespaceInput, ...func(*sd.Options)) (*sd.CreateHttpNamespaceOutput, error) 30 | 31 | // CreateService provides ServiceDiscovery CreateService wrapper interface. 32 | CreateService(context.Context, *sd.CreateServiceInput, ...func(*sd.Options)) (*sd.CreateServiceOutput, error) 33 | 34 | // RegisterInstance provides ServiceDiscovery RegisterInstance wrapper interface. 35 | RegisterInstance(context.Context, *sd.RegisterInstanceInput, ...func(*sd.Options)) (*sd.RegisterInstanceOutput, error) 36 | 37 | // DeregisterInstance provides ServiceDiscovery DeregisterInstance wrapper interface. 38 | DeregisterInstance(context.Context, *sd.DeregisterInstanceInput, ...func(*sd.Options)) (*sd.DeregisterInstanceOutput, error) 39 | 40 | // DiscoverInstances provides ServiceDiscovery DiscoverInstances wrapper interface. 41 | DiscoverInstances(context.Context, *sd.DiscoverInstancesInput, ...func(*sd.Options)) (*sd.DiscoverInstancesOutput, error) 42 | } 43 | 44 | type awsFacade struct { 45 | *sd.Client 46 | } 47 | 48 | // NewAwsFacadeFromConfig creates a new AWS facade from an AWS client config. 49 | func NewAwsFacadeFromConfig(cfg *aws.Config) AwsFacade { 50 | sdClient := sd.NewFromConfig(*cfg, func(options *sd.Options) { 51 | // Append User-Agent to all the request, the format is going to be aws-cloud-map-mcs-controller-for-k8s/0.0.0-abc 52 | options.APIOptions = append(options.APIOptions, middleware.AddUserAgentKeyValue(version.GetUserAgentKey(), version.GetUserAgentValue())) 53 | }) 54 | return &awsFacade{sdClient} 55 | } 56 | -------------------------------------------------------------------------------- /integration/janitor/janitor_test.go: -------------------------------------------------------------------------------- 1 | package janitor 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | janitorMock "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/mocks/integration/janitor" 8 | "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/model" 9 | "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/test" 10 | "github.com/aws/aws-sdk-go-v2/aws" 11 | "github.com/aws/aws-sdk-go-v2/service/servicediscovery/types" 12 | "github.com/golang/mock/gomock" 13 | "github.com/stretchr/testify/assert" 14 | ) 15 | 16 | type testJanitor struct { 17 | janitor *cloudMapJanitor 18 | mockApi *janitorMock.MockServiceDiscoveryJanitorApi 19 | failed *bool 20 | close func() 21 | } 22 | 23 | func TestNewDefaultJanitor(t *testing.T) { 24 | assert.NotNil(t, NewDefaultJanitor(test.ClusterId1, test.ClusterSet)) 25 | } 26 | 27 | func TestCleanupHappyCase(t *testing.T) { 28 | tj := getTestJanitor(t) 29 | defer tj.close() 30 | 31 | tj.mockApi.EXPECT().GetNamespaceMap(context.TODO()). 32 | Return(map[string]*model.Namespace{test.HttpNsName: test.GetTestHttpNamespace()}, nil) 33 | tj.mockApi.EXPECT().GetServiceIdMap(context.TODO(), test.HttpNsId). 34 | Return(map[string]string{test.SvcName: test.SvcId}, nil) 35 | tj.mockApi.EXPECT().DiscoverInstances(context.TODO(), test.HttpNsName, test.SvcName, map[string]string{ 36 | model.ClusterSetIdAttr: test.ClusterSet, 37 | }). 38 | Return([]types.HttpInstanceSummary{{InstanceId: aws.String(test.EndptId1)}}, nil) 39 | 40 | tj.mockApi.EXPECT().DeregisterInstance(context.TODO(), test.SvcId, test.EndptId1). 41 | Return(test.OpId1, nil) 42 | tj.mockApi.EXPECT().GetOperation(context.TODO(), test.OpId1). 43 | Return(&types.Operation{Status: types.OperationStatusSuccess}, nil) 44 | tj.mockApi.EXPECT().DeleteService(context.TODO(), test.SvcId). 45 | Return(nil) 46 | tj.mockApi.EXPECT().DeleteNamespace(context.TODO(), test.HttpNsId). 47 | Return(test.OpId2, nil) 48 | tj.mockApi.EXPECT().GetOperation(context.TODO(), test.OpId2). 49 | Return(&types.Operation{Status: types.OperationStatusSuccess, 50 | Targets: map[string]string{string(types.OperationTargetTypeNamespace): test.HttpNsId}}, nil) 51 | 52 | tj.janitor.Cleanup(context.TODO(), test.HttpNsName) 53 | assert.False(t, *tj.failed) 54 | } 55 | 56 | func TestCleanupNothingToClean(t *testing.T) { 57 | tj := getTestJanitor(t) 58 | defer tj.close() 59 | 60 | tj.mockApi.EXPECT().GetNamespaceMap(context.TODO()). 61 | Return(map[string]*model.Namespace{}, nil) 62 | 63 | tj.janitor.Cleanup(context.TODO(), test.HttpNsName) 64 | assert.False(t, *tj.failed) 65 | } 66 | 67 | func getTestJanitor(t *testing.T) *testJanitor { 68 | mockController := gomock.NewController(t) 69 | api := janitorMock.NewMockServiceDiscoveryJanitorApi(mockController) 70 | failed := false 71 | return &testJanitor{ 72 | janitor: &cloudMapJanitor{ 73 | clusterId: test.ClusterId1, 74 | clusterSetId: test.ClusterSet, 75 | sdApi: api, 76 | fail: func() { failed = true }, 77 | }, 78 | mockApi: api, 79 | failed: &failed, 80 | close: func() { mockController.Finish() }, 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /pkg/model/cluster_test.go: -------------------------------------------------------------------------------- 1 | package model 2 | 3 | import ( 4 | "context" 5 | "reflect" 6 | "testing" 7 | 8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | "k8s.io/apimachinery/pkg/runtime" 10 | 11 | aboutv1alpha1 "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/apis/about/v1alpha1" 12 | "sigs.k8s.io/controller-runtime/pkg/client" 13 | "sigs.k8s.io/controller-runtime/pkg/client/fake" 14 | ) 15 | 16 | func TestClusterUtils_GetClusterProperties(t *testing.T) { 17 | type fields struct { 18 | client client.Client 19 | clusterProperties clusterProperties 20 | } 21 | type args struct { 22 | ctx context.Context 23 | } 24 | clusterId := "cluster1" 25 | clusterSetId := "clusterset1" 26 | tests := []struct { 27 | name string 28 | fields fields 29 | args args 30 | want *clusterProperties 31 | wantErr bool 32 | }{ 33 | { 34 | name: "happy case fetch from client", 35 | fields: fields{ 36 | client: fake.NewClientBuilder().WithScheme(GetScheme()).WithObjects(ClusterIdForTest(clusterId), ClusterSetIdForTest(clusterSetId)).Build(), 37 | clusterProperties: clusterProperties{}, 38 | }, 39 | args: args{ctx: context.TODO()}, 40 | want: &clusterProperties{clusterId: clusterId, clusterSetId: clusterSetId}, 41 | wantErr: false, 42 | }, 43 | { 44 | name: "happy case already set", 45 | fields: fields{ 46 | client: nil, 47 | clusterProperties: clusterProperties{clusterId: clusterId, clusterSetId: clusterSetId}, 48 | }, 49 | args: args{ctx: context.TODO()}, 50 | want: &clusterProperties{clusterId: clusterId, clusterSetId: clusterSetId}, 51 | wantErr: false, 52 | }, 53 | { 54 | name: "error cluster properties not present", 55 | fields: fields{ 56 | client: fake.NewClientBuilder().WithScheme(GetScheme()).Build(), 57 | clusterProperties: clusterProperties{}, 58 | }, 59 | args: args{ctx: context.TODO()}, 60 | want: nil, 61 | wantErr: true, 62 | }, 63 | } 64 | for _, tt := range tests { 65 | t.Run(tt.name, func(t *testing.T) { 66 | r := &ClusterUtils{ 67 | client: tt.fields.client, 68 | clusterProperties: tt.fields.clusterProperties, 69 | } 70 | got, err := r.GetClusterProperties(tt.args.ctx) 71 | if (err != nil) != tt.wantErr { 72 | t.Errorf("GetClusterProperties() error = %v, wantErr %v", err, tt.wantErr) 73 | return 74 | } 75 | if !reflect.DeepEqual(got, tt.want) { 76 | t.Errorf("GetClusterProperties() got = %v, want %v", got, tt.want) 77 | } 78 | }) 79 | } 80 | } 81 | 82 | func ClusterIdForTest(clusterId string) *aboutv1alpha1.ClusterProperty { 83 | return &aboutv1alpha1.ClusterProperty{ 84 | ObjectMeta: metav1.ObjectMeta{ 85 | Name: ClusterIdPropertyName, 86 | }, 87 | Spec: aboutv1alpha1.ClusterPropertySpec{ 88 | Value: clusterId, 89 | }, 90 | } 91 | } 92 | 93 | func ClusterSetIdForTest(clusterSetId string) *aboutv1alpha1.ClusterProperty { 94 | return &aboutv1alpha1.ClusterProperty{ 95 | ObjectMeta: metav1.ObjectMeta{ 96 | Name: ClusterSetIdPropertyName, 97 | }, 98 | Spec: aboutv1alpha1.ClusterPropertySpec{ 99 | Value: clusterSetId, 100 | }, 101 | } 102 | } 103 | 104 | func GetScheme() *runtime.Scheme { 105 | scheme := runtime.NewScheme() 106 | scheme.AddKnownTypes(aboutv1alpha1.GroupVersion, &aboutv1alpha1.ClusterProperty{}, &aboutv1alpha1.ClusterPropertyList{}) 107 | return scheme 108 | } 109 | -------------------------------------------------------------------------------- /pkg/controllers/multicluster/controllers_common_test.go: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | import ( 4 | "strconv" 5 | "time" 6 | 7 | "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/model" 8 | 9 | multiclusterv1alpha1 "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/apis/multicluster/v1alpha1" 10 | "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/test" 11 | "github.com/aws/aws-sdk-go-v2/aws" 12 | v1 "k8s.io/api/core/v1" 13 | discovery "k8s.io/api/discovery/v1" 14 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 15 | "k8s.io/apimachinery/pkg/util/intstr" 16 | ) 17 | 18 | // Factory functions for testing 19 | 20 | func k8sNamespaceForTest() *v1.Namespace { 21 | return &v1.Namespace{ 22 | ObjectMeta: metav1.ObjectMeta{ 23 | Name: test.HttpNsName, 24 | Namespace: test.HttpNsName, 25 | }, 26 | } 27 | } 28 | 29 | func k8sServiceForTest() *v1.Service { 30 | return &v1.Service{ 31 | TypeMeta: metav1.TypeMeta{}, 32 | ObjectMeta: metav1.ObjectMeta{ 33 | Name: test.SvcName, 34 | Namespace: test.HttpNsName, 35 | }, 36 | Spec: v1.ServiceSpec{ 37 | Ports: []v1.ServicePort{{ 38 | Name: test.PortName1, 39 | Protocol: test.Protocol1, 40 | Port: test.ServicePort1, 41 | TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: test.Port1}, 42 | }}, 43 | }, 44 | Status: v1.ServiceStatus{}, 45 | } 46 | } 47 | 48 | func serviceExportForTest() *multiclusterv1alpha1.ServiceExport { 49 | creationTimestamp := metav1.NewTime(time.UnixMilli(test.SvcExportCreationTimestamp)) 50 | return &multiclusterv1alpha1.ServiceExport{ 51 | ObjectMeta: metav1.ObjectMeta{ 52 | Name: test.SvcName, 53 | Namespace: test.HttpNsName, 54 | CreationTimestamp: creationTimestamp, 55 | }, 56 | } 57 | } 58 | 59 | func serviceImportForTest(svcName string) *multiclusterv1alpha1.ServiceImport { 60 | return &multiclusterv1alpha1.ServiceImport{ 61 | ObjectMeta: metav1.ObjectMeta{ 62 | Name: svcName, 63 | Namespace: test.HttpNsName, 64 | }, 65 | } 66 | } 67 | 68 | func endpointSliceForTest() *discovery.EndpointSlice { 69 | port := int32(test.Port1) 70 | protocol := v1.ProtocolTCP 71 | nodename := test.Nodename 72 | hostname := test.Hostname 73 | ready, _ := strconv.ParseBool(test.EndptReadyTrue) 74 | return &discovery.EndpointSlice{ 75 | ObjectMeta: metav1.ObjectMeta{ 76 | Namespace: test.HttpNsName, 77 | Name: test.SvcName + "-slice", 78 | Labels: map[string]string{discovery.LabelServiceName: test.SvcName}, 79 | }, 80 | AddressType: discovery.AddressTypeIPv4, 81 | Endpoints: []discovery.Endpoint{{ 82 | Addresses: []string{test.EndptIp1}, 83 | Conditions: discovery.EndpointConditions{ 84 | Ready: aws.Bool(ready), 85 | }, 86 | NodeName: &nodename, 87 | Hostname: &hostname, 88 | }}, 89 | Ports: []discovery.EndpointPort{{ 90 | Name: aws.String(test.PortName1), 91 | Protocol: &protocol, 92 | Port: &port, 93 | }}, 94 | } 95 | } 96 | 97 | func endpointSliceFromEndpointsForTest(endpts []*model.Endpoint, ports []discovery.EndpointPort) *discovery.EndpointSlice { 98 | svc := k8sServiceForTest() 99 | slice := CreateEndpointSliceStruct(svc, test.SvcName, test.ClusterId1, endpts[0].AddressType) 100 | slice.Ports = ports 101 | 102 | testEndpoints := make([]discovery.Endpoint, 0) 103 | for _, endpt := range endpts { 104 | testEndpoints = append(testEndpoints, CreateEndpointForSlice(svc, endpt)) 105 | } 106 | slice.Endpoints = testEndpoints 107 | 108 | return slice 109 | } 110 | -------------------------------------------------------------------------------- /integration/janitor/janitor.go: -------------------------------------------------------------------------------- 1 | package janitor 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "os" 7 | 8 | "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/cloudmap" 9 | 10 | "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/model" 11 | "github.com/aws/aws-sdk-go-v2/aws" 12 | "github.com/aws/aws-sdk-go-v2/config" 13 | ) 14 | 15 | // CloudMapJanitor handles AWS Cloud Map resource cleanup during integration tests. 16 | type CloudMapJanitor interface { 17 | // Cleanup removes all instances, services and the namespace from AWS Cloud Map for a given namespace name. 18 | Cleanup(ctx context.Context, nsName string) 19 | } 20 | 21 | type cloudMapJanitor struct { 22 | clusterId string 23 | clusterSetId string 24 | sdApi ServiceDiscoveryJanitorApi 25 | fail func() 26 | } 27 | 28 | // NewDefaultJanitor returns a new janitor object. 29 | func NewDefaultJanitor(clusterId string, clusterSetId string) CloudMapJanitor { 30 | awsCfg, err := config.LoadDefaultConfig(context.TODO()) 31 | 32 | if err != nil { 33 | fmt.Printf("unable to configure AWS session: %s", err.Error()) 34 | os.Exit(1) 35 | } 36 | 37 | return &cloudMapJanitor{ 38 | clusterId: clusterId, 39 | clusterSetId: clusterSetId, 40 | sdApi: NewServiceDiscoveryJanitorApiFromConfig(&awsCfg), 41 | fail: func() { os.Exit(1) }, 42 | } 43 | } 44 | 45 | func (j *cloudMapJanitor) Cleanup(ctx context.Context, nsName string) { 46 | fmt.Printf("Cleaning up all test resources in Cloud Map for namespace : %s\n", nsName) 47 | 48 | nsMap, err := j.sdApi.GetNamespaceMap(ctx) 49 | j.checkOrFail(err, "", "could not find namespace to clean") 50 | 51 | ns, found := nsMap[nsName] 52 | if !found { 53 | fmt.Println("namespace does not exist in account, nothing to clean") 54 | return 55 | } 56 | 57 | fmt.Printf("found namespace to clean: %s\n", ns.Id) 58 | 59 | svcIdMap, err := j.sdApi.GetServiceIdMap(ctx, ns.Id) 60 | j.checkOrFail(err, 61 | fmt.Sprintf("namespace has %d services to clean", len(svcIdMap)), 62 | "could not find services to clean") 63 | 64 | for svcName, svcId := range svcIdMap { 65 | fmt.Printf("found service to clean: %s\n", svcId) 66 | j.deregisterInstances(ctx, nsName, svcName, svcId) 67 | 68 | delSvcErr := j.sdApi.DeleteService(ctx, svcId) 69 | j.checkOrFail(delSvcErr, "service deleted", "could not cleanup service") 70 | } 71 | 72 | opId, err := j.sdApi.DeleteNamespace(ctx, ns.Id) 73 | if err == nil { 74 | fmt.Println("namespace delete in progress") 75 | _, err = cloudmap.NewOperationPoller(j.sdApi).Poll(ctx, opId) 76 | } 77 | j.checkOrFail(err, "clean up successful", "could not cleanup namespace") 78 | } 79 | 80 | func (j *cloudMapJanitor) deregisterInstances(ctx context.Context, nsName string, svcName string, svcId string) { 81 | queryParameters := map[string]string{ 82 | model.ClusterSetIdAttr: j.clusterSetId, 83 | } 84 | 85 | insts, err := j.sdApi.DiscoverInstances(ctx, nsName, svcName, queryParameters) 86 | j.checkOrFail(err, 87 | fmt.Sprintf("service has %d instances to clean", len(insts)), 88 | "could not list instances to cleanup") 89 | 90 | opPoller := cloudmap.NewOperationPoller(j.sdApi) 91 | for _, inst := range insts { 92 | instId := aws.ToString(inst.InstanceId) 93 | fmt.Printf("found instance to clean: %s\n", instId) 94 | opPoller.Submit(ctx, func() (opId string, err error) { 95 | return j.sdApi.DeregisterInstance(ctx, svcId, instId) 96 | }) 97 | } 98 | 99 | err = opPoller.Await() 100 | j.checkOrFail(err, "instances de-registered", "could not cleanup instances") 101 | } 102 | 103 | func (j *cloudMapJanitor) checkOrFail(err error, successMsg string, failMsg string) { 104 | if err != nil { 105 | fmt.Printf("%s: %s\n", failMsg, err.Error()) 106 | j.fail() 107 | } 108 | 109 | if successMsg != "" { 110 | fmt.Println(successMsg) 111 | } 112 | } 113 | -------------------------------------------------------------------------------- /pkg/apis/about/v1alpha1/zz_generated.deepcopy.go: -------------------------------------------------------------------------------- 1 | //go:build !ignore_autogenerated 2 | // +build !ignore_autogenerated 3 | 4 | /* 5 | 6 | 7 | Licensed under the Apache License, Version 2.0 (the "License"); 8 | you may not use this file except in compliance with the License. 9 | You may obtain a copy of the License at 10 | 11 | http://www.apache.org/licenses/LICENSE-2.0 12 | 13 | Unless required by applicable law or agreed to in writing, software 14 | distributed under the License is distributed on an "AS IS" BASIS, 15 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | See the License for the specific language governing permissions and 17 | limitations under the License. 18 | */ 19 | 20 | // Code generated by controller-gen. DO NOT EDIT. 21 | 22 | package v1alpha1 23 | 24 | import ( 25 | runtime "k8s.io/apimachinery/pkg/runtime" 26 | ) 27 | 28 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 29 | func (in *ClusterProperty) DeepCopyInto(out *ClusterProperty) { 30 | *out = *in 31 | out.TypeMeta = in.TypeMeta 32 | in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) 33 | out.Spec = in.Spec 34 | out.Status = in.Status 35 | } 36 | 37 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterProperty. 38 | func (in *ClusterProperty) DeepCopy() *ClusterProperty { 39 | if in == nil { 40 | return nil 41 | } 42 | out := new(ClusterProperty) 43 | in.DeepCopyInto(out) 44 | return out 45 | } 46 | 47 | // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. 48 | func (in *ClusterProperty) DeepCopyObject() runtime.Object { 49 | if c := in.DeepCopy(); c != nil { 50 | return c 51 | } 52 | return nil 53 | } 54 | 55 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 56 | func (in *ClusterPropertyList) DeepCopyInto(out *ClusterPropertyList) { 57 | *out = *in 58 | out.TypeMeta = in.TypeMeta 59 | in.ListMeta.DeepCopyInto(&out.ListMeta) 60 | if in.Items != nil { 61 | in, out := &in.Items, &out.Items 62 | *out = make([]ClusterProperty, len(*in)) 63 | for i := range *in { 64 | (*in)[i].DeepCopyInto(&(*out)[i]) 65 | } 66 | } 67 | } 68 | 69 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterPropertyList. 70 | func (in *ClusterPropertyList) DeepCopy() *ClusterPropertyList { 71 | if in == nil { 72 | return nil 73 | } 74 | out := new(ClusterPropertyList) 75 | in.DeepCopyInto(out) 76 | return out 77 | } 78 | 79 | // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. 80 | func (in *ClusterPropertyList) DeepCopyObject() runtime.Object { 81 | if c := in.DeepCopy(); c != nil { 82 | return c 83 | } 84 | return nil 85 | } 86 | 87 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 88 | func (in *ClusterPropertySpec) DeepCopyInto(out *ClusterPropertySpec) { 89 | *out = *in 90 | } 91 | 92 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterPropertySpec. 93 | func (in *ClusterPropertySpec) DeepCopy() *ClusterPropertySpec { 94 | if in == nil { 95 | return nil 96 | } 97 | out := new(ClusterPropertySpec) 98 | in.DeepCopyInto(out) 99 | return out 100 | } 101 | 102 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 103 | func (in *ClusterPropertyStatus) DeepCopyInto(out *ClusterPropertyStatus) { 104 | *out = *in 105 | } 106 | 107 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterPropertyStatus. 108 | func (in *ClusterPropertyStatus) DeepCopy() *ClusterPropertyStatus { 109 | if in == nil { 110 | return nil 111 | } 112 | out := new(ClusterPropertyStatus) 113 | in.DeepCopyInto(out) 114 | return out 115 | } 116 | -------------------------------------------------------------------------------- /pkg/cloudmap/operation_poller.go: -------------------------------------------------------------------------------- 1 | package cloudmap 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "sync" 7 | "time" 8 | 9 | "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/common" 10 | "github.com/aws/aws-sdk-go-v2/aws" 11 | "github.com/aws/aws-sdk-go-v2/service/servicediscovery/types" 12 | "k8s.io/apimachinery/pkg/util/wait" 13 | ) 14 | 15 | const ( 16 | // Interval between each getOperation call. 17 | defaultOperationPollInterval = 2 * time.Second 18 | 19 | // Time until we stop polling the operation 20 | defaultOperationPollTimeout = 1 * time.Minute 21 | 22 | operationPollTimoutErrorMessage = "timed out while polling operations" 23 | ) 24 | 25 | // OperationPoller polls a list operations for a terminal status 26 | type OperationPoller interface { 27 | // Submit operations to async poll 28 | Submit(ctx context.Context, opProvider func() (opId string, err error)) 29 | 30 | // Poll operations for a terminal state 31 | Poll(ctx context.Context, opId string) (*types.Operation, error) 32 | 33 | // Await waits for all operation results from async poll 34 | Await() (err error) 35 | } 36 | 37 | type operationPoller struct { 38 | log common.Logger 39 | sdApi ServiceDiscoveryApi 40 | opChan chan opResult 41 | waitGroup sync.WaitGroup 42 | pollInterval time.Duration 43 | pollTimeout time.Duration 44 | } 45 | 46 | type opResult struct { 47 | opId string 48 | err error 49 | } 50 | 51 | // NewOperationPoller creates a new operation poller 52 | func NewOperationPoller(sdApi ServiceDiscoveryApi) OperationPoller { 53 | return NewOperationPollerWithConfig(defaultOperationPollInterval, defaultOperationPollTimeout, sdApi) 54 | } 55 | 56 | // NewOperationPollerWithConfig creates a new operation poller 57 | func NewOperationPollerWithConfig(pollInterval, pollTimeout time.Duration, sdApi ServiceDiscoveryApi) OperationPoller { 58 | return &operationPoller{ 59 | log: common.NewLogger("cloudmap", "OperationPoller"), 60 | sdApi: sdApi, 61 | opChan: make(chan opResult), 62 | pollInterval: pollInterval, 63 | pollTimeout: pollTimeout, 64 | } 65 | } 66 | 67 | func (p *operationPoller) Submit(ctx context.Context, opProvider func() (opId string, err error)) { 68 | p.waitGroup.Add(1) 69 | 70 | // Poll for the operation in a separate go routine 71 | go func() { 72 | // Indicate the polling done i.e. decrement the WaitGroup counter when the goroutine returns 73 | defer p.waitGroup.Done() 74 | 75 | opId, err := opProvider() 76 | // Poll for the operationId if the provider doesn't throw error 77 | if err == nil { 78 | _, err = p.Poll(ctx, opId) 79 | } 80 | 81 | p.opChan <- opResult{opId: opId, err: err} 82 | }() 83 | } 84 | 85 | func (p *operationPoller) Poll(ctx context.Context, opId string) (op *types.Operation, err error) { 86 | // poll tries a condition func until it returns true, an error, or the timeout is reached. 87 | err = wait.Poll(p.pollInterval, p.pollTimeout, func() (done bool, err error) { 88 | p.log.Info("polling operation", "opId", opId) 89 | 90 | op, err = p.sdApi.GetOperation(ctx, opId) 91 | if err != nil { 92 | return true, err 93 | } 94 | 95 | switch op.Status { 96 | case types.OperationStatusSuccess: 97 | return true, nil 98 | case types.OperationStatusFail: 99 | return true, fmt.Errorf("operation failed, opId: %s, reason: %s", opId, aws.ToString(op.ErrorMessage)) 100 | default: 101 | return false, nil 102 | } 103 | }) 104 | if err == wait.ErrWaitTimeout { 105 | err = fmt.Errorf("%s, opId: %s", operationPollTimoutErrorMessage, opId) 106 | } 107 | 108 | return op, err 109 | } 110 | 111 | func (p *operationPoller) Await() (err error) { 112 | // Run wait in separate go routine to unblock reading from the channel. 113 | go func() { 114 | // Block till the polling done i.e. WaitGroup counter is zero, and then close the channel 115 | p.waitGroup.Wait() 116 | close(p.opChan) 117 | }() 118 | 119 | for res := range p.opChan { 120 | if res.err != nil { 121 | p.log.Error(res.err, "operation failed", "opId", res.opId) 122 | err = common.Wrap(err, res.err) 123 | } else { 124 | p.log.Info("operations completed successfully", "opId", res.opId) 125 | } 126 | } 127 | 128 | return err 129 | } 130 | -------------------------------------------------------------------------------- /samples/coredns-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | eks.amazonaws.com/component: coredns 6 | k8s-app: kube-dns 7 | kubernetes.io/name: CoreDNS 8 | name: coredns 9 | namespace: kube-system 10 | spec: 11 | progressDeadlineSeconds: 600 12 | replicas: 2 13 | revisionHistoryLimit: 10 14 | selector: 15 | matchLabels: 16 | eks.amazonaws.com/component: coredns 17 | k8s-app: kube-dns 18 | strategy: 19 | rollingUpdate: 20 | maxSurge: 25% 21 | maxUnavailable: 1 22 | type: RollingUpdate 23 | template: 24 | metadata: 25 | annotations: 26 | eks.amazonaws.com/compute-type: ec2 27 | creationTimestamp: null 28 | labels: 29 | eks.amazonaws.com/component: coredns 30 | k8s-app: kube-dns 31 | spec: 32 | affinity: 33 | nodeAffinity: 34 | requiredDuringSchedulingIgnoredDuringExecution: 35 | nodeSelectorTerms: 36 | - matchExpressions: 37 | - key: beta.kubernetes.io/os 38 | operator: In 39 | values: 40 | - linux 41 | - key: beta.kubernetes.io/arch 42 | operator: In 43 | values: 44 | - amd64 45 | - arm64 46 | podAntiAffinity: 47 | preferredDuringSchedulingIgnoredDuringExecution: 48 | - podAffinityTerm: 49 | labelSelector: 50 | matchExpressions: 51 | - key: k8s-app 52 | operator: In 53 | values: 54 | - kube-dns 55 | topologyKey: kubernetes.io/hostname 56 | weight: 100 57 | containers: 58 | - args: 59 | - -conf 60 | - /etc/coredns/Corefile 61 | image: ghcr.io/aws/aws-cloud-map-mcs-controller-for-k8s/coredns-multicluster/coredns:v1.8.6 62 | imagePullPolicy: IfNotPresent 63 | livenessProbe: 64 | failureThreshold: 5 65 | httpGet: 66 | path: /health 67 | port: 8080 68 | scheme: HTTP 69 | initialDelaySeconds: 60 70 | periodSeconds: 10 71 | successThreshold: 1 72 | timeoutSeconds: 5 73 | name: coredns 74 | ports: 75 | - containerPort: 53 76 | name: dns 77 | protocol: UDP 78 | - containerPort: 53 79 | name: dns-tcp 80 | protocol: TCP 81 | - containerPort: 9153 82 | name: metrics 83 | protocol: TCP 84 | readinessProbe: 85 | failureThreshold: 3 86 | httpGet: 87 | path: /health 88 | port: 8080 89 | scheme: HTTP 90 | periodSeconds: 10 91 | successThreshold: 1 92 | timeoutSeconds: 1 93 | resources: 94 | limits: 95 | memory: 170Mi 96 | requests: 97 | cpu: 100m 98 | memory: 70Mi 99 | securityContext: 100 | allowPrivilegeEscalation: false 101 | capabilities: 102 | add: 103 | - NET_BIND_SERVICE 104 | drop: 105 | - all 106 | readOnlyRootFilesystem: true 107 | terminationMessagePath: /dev/termination-log 108 | terminationMessagePolicy: File 109 | volumeMounts: 110 | - mountPath: /etc/coredns 111 | name: config-volume 112 | readOnly: true 113 | - mountPath: /tmp 114 | name: tmp 115 | dnsPolicy: Default 116 | priorityClassName: system-cluster-critical 117 | restartPolicy: Always 118 | schedulerName: default-scheduler 119 | securityContext: {} 120 | serviceAccount: coredns 121 | serviceAccountName: coredns 122 | terminationGracePeriodSeconds: 30 123 | tolerations: 124 | - effect: NoSchedule 125 | key: node-role.kubernetes.io/master 126 | - key: CriticalAddonsOnly 127 | operator: Exists 128 | volumes: 129 | - emptyDir: {} 130 | name: tmp 131 | - configMap: 132 | defaultMode: 420 133 | items: 134 | - key: Corefile 135 | path: Corefile 136 | name: coredns 137 | name: config-volume 138 | -------------------------------------------------------------------------------- /integration/eks-test/configs/coredns-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | eks.amazonaws.com/component: coredns 6 | k8s-app: kube-dns 7 | kubernetes.io/name: CoreDNS 8 | name: coredns 9 | namespace: kube-system 10 | spec: 11 | progressDeadlineSeconds: 600 12 | replicas: 2 13 | revisionHistoryLimit: 10 14 | selector: 15 | matchLabels: 16 | eks.amazonaws.com/component: coredns 17 | k8s-app: kube-dns 18 | strategy: 19 | rollingUpdate: 20 | maxSurge: 25% 21 | maxUnavailable: 1 22 | type: RollingUpdate 23 | template: 24 | metadata: 25 | annotations: 26 | eks.amazonaws.com/compute-type: ec2 27 | creationTimestamp: null 28 | labels: 29 | eks.amazonaws.com/component: coredns 30 | k8s-app: kube-dns 31 | spec: 32 | affinity: 33 | nodeAffinity: 34 | requiredDuringSchedulingIgnoredDuringExecution: 35 | nodeSelectorTerms: 36 | - matchExpressions: 37 | - key: beta.kubernetes.io/os 38 | operator: In 39 | values: 40 | - linux 41 | - key: beta.kubernetes.io/arch 42 | operator: In 43 | values: 44 | - amd64 45 | - arm64 46 | podAntiAffinity: 47 | preferredDuringSchedulingIgnoredDuringExecution: 48 | - podAffinityTerm: 49 | labelSelector: 50 | matchExpressions: 51 | - key: k8s-app 52 | operator: In 53 | values: 54 | - kube-dns 55 | topologyKey: kubernetes.io/hostname 56 | weight: 100 57 | containers: 58 | - args: 59 | - -conf 60 | - /etc/coredns/Corefile 61 | image: ghcr.io/aws/aws-cloud-map-mcs-controller-for-k8s/coredns-multicluster/coredns:v1.8.6 62 | imagePullPolicy: IfNotPresent 63 | livenessProbe: 64 | failureThreshold: 5 65 | httpGet: 66 | path: /health 67 | port: 8080 68 | scheme: HTTP 69 | initialDelaySeconds: 60 70 | periodSeconds: 10 71 | successThreshold: 1 72 | timeoutSeconds: 5 73 | name: coredns 74 | ports: 75 | - containerPort: 53 76 | name: dns 77 | protocol: UDP 78 | - containerPort: 53 79 | name: dns-tcp 80 | protocol: TCP 81 | - containerPort: 9153 82 | name: metrics 83 | protocol: TCP 84 | readinessProbe: 85 | failureThreshold: 3 86 | httpGet: 87 | path: /health 88 | port: 8080 89 | scheme: HTTP 90 | periodSeconds: 10 91 | successThreshold: 1 92 | timeoutSeconds: 1 93 | resources: 94 | limits: 95 | memory: 170Mi 96 | requests: 97 | cpu: 100m 98 | memory: 70Mi 99 | securityContext: 100 | allowPrivilegeEscalation: false 101 | capabilities: 102 | add: 103 | - NET_BIND_SERVICE 104 | drop: 105 | - all 106 | readOnlyRootFilesystem: true 107 | terminationMessagePath: /dev/termination-log 108 | terminationMessagePolicy: File 109 | volumeMounts: 110 | - mountPath: /etc/coredns 111 | name: config-volume 112 | readOnly: true 113 | - mountPath: /tmp 114 | name: tmp 115 | dnsPolicy: Default 116 | priorityClassName: system-cluster-critical 117 | restartPolicy: Always 118 | schedulerName: default-scheduler 119 | securityContext: {} 120 | serviceAccount: coredns 121 | serviceAccountName: coredns 122 | terminationGracePeriodSeconds: 30 123 | tolerations: 124 | - effect: NoSchedule 125 | key: node-role.kubernetes.io/master 126 | - key: CriticalAddonsOnly 127 | operator: Exists 128 | volumes: 129 | - emptyDir: {} 130 | name: tmp 131 | - configMap: 132 | defaultMode: 420 133 | items: 134 | - key: Corefile 135 | path: Corefile 136 | name: coredns 137 | name: config-volume -------------------------------------------------------------------------------- /integration/kind-test/configs/coredns-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | # eks.amazonaws.com/component: coredns 6 | k8s-app: kube-dns 7 | kubernetes.io/name: CoreDNS 8 | name: coredns 9 | namespace: kube-system 10 | spec: 11 | progressDeadlineSeconds: 600 12 | replicas: 2 13 | revisionHistoryLimit: 10 14 | selector: 15 | matchLabels: 16 | # eks.amazonaws.com/component: coredns 17 | k8s-app: kube-dns 18 | strategy: 19 | rollingUpdate: 20 | maxSurge: 25% 21 | maxUnavailable: 1 22 | type: RollingUpdate 23 | template: 24 | metadata: 25 | # annotations: 26 | # eks.amazonaws.com/compute-type: ec2 27 | creationTimestamp: null 28 | labels: 29 | # eks.amazonaws.com/component: coredns 30 | k8s-app: kube-dns 31 | spec: 32 | affinity: 33 | nodeAffinity: 34 | requiredDuringSchedulingIgnoredDuringExecution: 35 | nodeSelectorTerms: 36 | - matchExpressions: 37 | - key: beta.kubernetes.io/os 38 | operator: In 39 | values: 40 | - linux 41 | - key: beta.kubernetes.io/arch 42 | operator: In 43 | values: 44 | - amd64 45 | - arm64 46 | podAntiAffinity: 47 | preferredDuringSchedulingIgnoredDuringExecution: 48 | - podAffinityTerm: 49 | labelSelector: 50 | matchExpressions: 51 | - key: k8s-app 52 | operator: In 53 | values: 54 | - kube-dns 55 | topologyKey: kubernetes.io/hostname 56 | weight: 100 57 | containers: 58 | - args: 59 | - -conf 60 | - /etc/coredns/Corefile 61 | image: ghcr.io/aws/aws-cloud-map-mcs-controller-for-k8s/coredns-multicluster/coredns:v1.8.6 62 | imagePullPolicy: IfNotPresent 63 | livenessProbe: 64 | failureThreshold: 5 65 | httpGet: 66 | path: /health 67 | port: 8080 68 | scheme: HTTP 69 | initialDelaySeconds: 60 70 | periodSeconds: 10 71 | successThreshold: 1 72 | timeoutSeconds: 5 73 | name: coredns 74 | ports: 75 | - containerPort: 53 76 | name: dns 77 | protocol: UDP 78 | - containerPort: 53 79 | name: dns-tcp 80 | protocol: TCP 81 | - containerPort: 9153 82 | name: metrics 83 | protocol: TCP 84 | readinessProbe: 85 | failureThreshold: 3 86 | httpGet: 87 | path: /health 88 | port: 8080 89 | scheme: HTTP 90 | periodSeconds: 10 91 | successThreshold: 1 92 | timeoutSeconds: 1 93 | resources: 94 | limits: 95 | memory: 170Mi 96 | requests: 97 | cpu: 100m 98 | memory: 70Mi 99 | securityContext: 100 | allowPrivilegeEscalation: false 101 | capabilities: 102 | add: 103 | - NET_BIND_SERVICE 104 | drop: 105 | - all 106 | readOnlyRootFilesystem: true 107 | terminationMessagePath: /dev/termination-log 108 | terminationMessagePolicy: File 109 | volumeMounts: 110 | - mountPath: /etc/coredns 111 | name: config-volume 112 | readOnly: true 113 | - mountPath: /tmp 114 | name: tmp 115 | dnsPolicy: Default 116 | priorityClassName: system-cluster-critical 117 | restartPolicy: Always 118 | schedulerName: default-scheduler 119 | securityContext: {} 120 | serviceAccount: coredns 121 | serviceAccountName: coredns 122 | terminationGracePeriodSeconds: 30 123 | tolerations: 124 | - effect: NoSchedule 125 | key: node-role.kubernetes.io/master 126 | - key: CriticalAddonsOnly 127 | operator: Exists 128 | volumes: 129 | - emptyDir: {} 130 | name: tmp 131 | - configMap: 132 | defaultMode: 420 133 | items: 134 | - key: Corefile 135 | path: Corefile 136 | name: coredns 137 | name: config-volume 138 | -------------------------------------------------------------------------------- /integration/shared/scenarios/export_service.go: -------------------------------------------------------------------------------- 1 | package scenarios 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "strconv" 7 | "strings" 8 | "time" 9 | 10 | "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/common" 11 | 12 | "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/cloudmap" 13 | "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/model" 14 | "github.com/aws/aws-sdk-go-v2/aws" 15 | v1 "k8s.io/api/core/v1" 16 | "k8s.io/apimachinery/pkg/util/wait" 17 | ) 18 | 19 | const ( 20 | defaultScenarioPollInterval = 10 * time.Second 21 | defaultScenarioPollTimeout = 2 * time.Minute 22 | ) 23 | 24 | // ExportServiceScenario defines an integration test against a service export to check creation of namespace, service, 25 | // and endpoint export. 26 | type ExportServiceScenario interface { 27 | // Run executes the service export integration test scenario, returning any error. 28 | Run() error 29 | } 30 | 31 | type exportServiceScenario struct { 32 | sdClient cloudmap.ServiceDiscoveryClient 33 | expectedSvc model.Service 34 | } 35 | 36 | func NewExportServiceScenario(cfg *aws.Config, nsName string, svcName string, clusterId string, clusterSetId string, portStr string, servicePortStr string, serviceType string, addressTypeStr string, ips string) (ExportServiceScenario, error) { 37 | endpts := make([]*model.Endpoint, 0) 38 | 39 | port, parseError := strconv.ParseUint(portStr, 10, 16) 40 | if parseError != nil { 41 | return nil, parseError 42 | } 43 | servicePort, parseError := strconv.ParseUint(servicePortStr, 10, 16) 44 | if parseError != nil { 45 | return nil, parseError 46 | } 47 | addressType, parseError := model.GetAddressTypeFromString(addressTypeStr) 48 | if parseError != nil { 49 | return nil, parseError 50 | } 51 | 52 | for _, ip := range strings.Split(ips, ",") { 53 | endpointPort := model.Port{ 54 | Port: int32(port), 55 | Protocol: string(v1.ProtocolTCP), 56 | } 57 | endpts = append(endpts, &model.Endpoint{ 58 | Id: model.EndpointIdFromIPAddressAndPort(ip, endpointPort), 59 | IP: ip, 60 | AddressType: addressType, 61 | ServicePort: model.Port{ 62 | Port: int32(servicePort), 63 | TargetPort: portStr, 64 | Protocol: string(v1.ProtocolTCP), 65 | }, 66 | Ready: true, 67 | EndpointPort: endpointPort, 68 | ClusterId: clusterId, 69 | ClusterSetId: clusterSetId, 70 | ServiceType: model.ServiceType(serviceType), 71 | Attributes: make(map[string]string), 72 | }) 73 | } 74 | 75 | return &exportServiceScenario{ 76 | sdClient: cloudmap.NewServiceDiscoveryClientWithCustomCache(cfg, 77 | &cloudmap.SdCacheConfig{ 78 | NsTTL: time.Second, 79 | SvcTTL: time.Second, 80 | EndptTTL: time.Second, 81 | }, model.NewClusterUtilsWithValues(clusterId, clusterSetId)), 82 | expectedSvc: model.Service{ 83 | Namespace: nsName, 84 | Name: svcName, 85 | Endpoints: endpts, 86 | }, 87 | }, nil 88 | } 89 | 90 | func (e *exportServiceScenario) Run() error { 91 | fmt.Printf("Seeking expected service: %v\n", e.expectedSvc) 92 | 93 | return wait.Poll(defaultScenarioPollInterval, defaultScenarioPollTimeout, func() (done bool, err error) { 94 | fmt.Println("Polling service...") 95 | cmSvc, err := e.sdClient.GetService(context.TODO(), e.expectedSvc.Namespace, e.expectedSvc.Name) 96 | if common.IsUnknown(err) { 97 | return true, err 98 | } 99 | 100 | if common.IsNotFound(err) { 101 | fmt.Println("Service not found.") 102 | return false, nil 103 | } 104 | 105 | fmt.Printf("Found service: %+v\n", cmSvc) 106 | return e.compareEndpoints(cmSvc.Endpoints), nil 107 | }) 108 | } 109 | 110 | func (e *exportServiceScenario) compareEndpoints(cmEndpoints []*model.Endpoint) bool { 111 | if len(e.expectedSvc.Endpoints) != len(cmEndpoints) { 112 | fmt.Println("Endpoints do not match.") 113 | return false 114 | } 115 | 116 | for _, expected := range e.expectedSvc.Endpoints { 117 | match := false 118 | for _, actual := range cmEndpoints { 119 | // Ignore K8S instance attribute for the purpose of this test. 120 | delete(actual.Attributes, model.K8sVersionAttr) 121 | // Ignore ServiceExportCreationTimestamp attribute for the purpose of this test by setting value to 0. 122 | actual.ServiceExportCreationTimestamp = 0 123 | // Ignore Nodename and Hostname, as they can be platform dependent 124 | actual.Nodename = "" 125 | actual.Hostname = "" 126 | if expected.Equals(actual) { 127 | match = true 128 | break 129 | } 130 | } 131 | if !match { 132 | fmt.Println("Endpoints do not match.") 133 | return false 134 | } 135 | } 136 | 137 | fmt.Println("Endpoints match.") 138 | return true 139 | } 140 | -------------------------------------------------------------------------------- /pkg/apis/multicluster/v1alpha1/serviceimport_types.go: -------------------------------------------------------------------------------- 1 | package v1alpha1 2 | 3 | import ( 4 | v1 "k8s.io/api/core/v1" 5 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 6 | ) 7 | 8 | // +genclient 9 | // +kubebuilder:object:root=true 10 | 11 | // ServiceImport describes a service imported from clusters in a ClusterSet. 12 | type ServiceImport struct { 13 | metav1.TypeMeta `json:",inline"` 14 | // +optional 15 | metav1.ObjectMeta `json:"metadata,omitempty"` 16 | // spec defines the behavior of a ServiceImport. 17 | // +optional 18 | Spec ServiceImportSpec `json:"spec,omitempty"` 19 | // status contains information about the exported services that form 20 | // the multi-cluster service referenced by this ServiceImport. 21 | // +optional 22 | Status ServiceImportStatus `json:"status,omitempty"` 23 | } 24 | 25 | // ServiceImportType designates the type of a ServiceImport 26 | type ServiceImportType string 27 | 28 | const ( 29 | // ClusterSetIP are only accessible via the ClusterSet IP. 30 | ClusterSetIP ServiceImportType = "ClusterSetIP" 31 | // Headless services allow backend pods to be addressed directly. 32 | Headless ServiceImportType = "Headless" 33 | ) 34 | 35 | // ServiceImportSpec describes an imported service and the information necessary to consume it. 36 | type ServiceImportSpec struct { 37 | // +listType=atomic 38 | Ports []ServicePort `json:"ports"` 39 | // ip will be used as the VIP for this service when type is ClusterSetIP. 40 | // +optional 41 | IPs []string `json:"ips,omitempty"` 42 | // type defines the type of this service. 43 | // Must be ClusterSetIP or Headless. 44 | // +kubebuilder:validation:Enum=ClusterSetIP;Headless 45 | Type ServiceImportType `json:"type"` 46 | // Supports "ClientIP" and "None". Used to maintain session affinity. 47 | // Enable client IP based session affinity. 48 | // Must be ClientIP or None. 49 | // Defaults to None. 50 | // Ignored when type is Headless 51 | // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies 52 | // +optional 53 | SessionAffinity v1.ServiceAffinity `json:"sessionAffinity,omitempty"` 54 | // sessionAffinityConfig contains session affinity configuration. 55 | // +optional 56 | SessionAffinityConfig *v1.SessionAffinityConfig `json:"sessionAffinityConfig,omitempty"` 57 | } 58 | 59 | // ServicePort represents the port on which the service is exposed 60 | type ServicePort struct { 61 | // The name of this port within the service. This must be a DNS_LABEL. 62 | // All ports within a ServiceSpec must have unique names. When considering 63 | // the endpoints for a Service, this must match the 'name' field in the 64 | // EndpointPort. 65 | // Optional if only one ServicePort is defined on this service. 66 | // +optional 67 | Name string `json:"name,omitempty"` 68 | 69 | // The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". 70 | // Default is TCP. 71 | // +optional 72 | Protocol v1.Protocol `json:"protocol,omitempty"` 73 | 74 | // The application protocol for this port. 75 | // This field follows standard Kubernetes label syntax. 76 | // Un-prefixed names are reserved for IANA standard service names (as per 77 | // RFC-6335 and http://www.iana.org/assignments/service-names). 78 | // Non-standard protocols should use prefixed names such as 79 | // mycompany.com/my-custom-protocol. 80 | // Field can be enabled with ServiceAppProtocol feature gate. 81 | // +optional 82 | AppProtocol *string `json:"appProtocol,omitempty"` 83 | 84 | // The port that will be exposed by this service. 85 | Port int32 `json:"port"` 86 | } 87 | 88 | // ServiceImportStatus describes derived state of an imported service. 89 | type ServiceImportStatus struct { 90 | // clusters is the list of exporting clusters from which this service 91 | // was derived. 92 | // +optional 93 | // +patchStrategy=merge 94 | // +patchMergeKey=cluster 95 | // +listType=map 96 | // +listMapKey=cluster 97 | Clusters []ClusterStatus `json:"clusters,omitempty"` 98 | } 99 | 100 | // ClusterStatus contains service configuration mapped to a specific source cluster 101 | type ClusterStatus struct { 102 | // cluster is the name of the exporting cluster. Must be a valid RFC-1123 DNS 103 | // label. 104 | Cluster string `json:"cluster"` 105 | } 106 | 107 | // +kubebuilder:object:root=true 108 | 109 | // ServiceImportList represents a list of endpoint slices 110 | type ServiceImportList struct { 111 | metav1.TypeMeta `json:",inline"` 112 | // Standard list metadata. 113 | // +optional 114 | metav1.ListMeta `json:"metadata,omitempty"` 115 | // List of endpoint slices 116 | // +listType=set 117 | Items []ServiceImport `json:"items"` 118 | } 119 | 120 | func init() { 121 | SchemeBuilder.Register(&ServiceImport{}, &ServiceImportList{}) 122 | } 123 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/aws/aws-cloud-map-mcs-controller-for-k8s 2 | 3 | go 1.19 4 | 5 | require ( 6 | github.com/aws/aws-sdk-go-v2 v1.22.0 7 | github.com/aws/aws-sdk-go-v2/config v1.20.0 8 | github.com/aws/aws-sdk-go-v2/service/servicediscovery v1.21.0 9 | github.com/go-logr/logr v1.2.4 10 | github.com/golang/mock v1.6.0 11 | github.com/google/go-cmp v0.5.9 12 | github.com/pkg/errors v0.9.1 13 | github.com/stretchr/testify v1.8.4 14 | golang.org/x/time v0.3.0 15 | k8s.io/api v0.24.3 16 | k8s.io/apimachinery v0.24.3 17 | k8s.io/client-go v0.24.2 18 | sigs.k8s.io/controller-runtime v0.12.3 19 | ) 20 | 21 | require ( 22 | cloud.google.com/go v0.81.0 // indirect 23 | github.com/Azure/go-autorest v14.2.0+incompatible // indirect 24 | github.com/Azure/go-autorest/autorest v0.11.18 // indirect 25 | github.com/Azure/go-autorest/autorest/adal v0.9.13 // indirect 26 | github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect 27 | github.com/Azure/go-autorest/logger v0.2.1 // indirect 28 | github.com/Azure/go-autorest/tracing v0.6.0 // indirect 29 | github.com/PuerkitoBio/purell v1.1.1 // indirect 30 | github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect 31 | github.com/aws/aws-sdk-go-v2/credentials v1.14.0 // indirect 32 | github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.0 // indirect 33 | github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.0 // indirect 34 | github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.0 // indirect 35 | github.com/aws/aws-sdk-go-v2/internal/ini v1.4.0 // indirect 36 | github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.0 // indirect 37 | github.com/aws/aws-sdk-go-v2/service/sso v1.16.0 // indirect 38 | github.com/aws/aws-sdk-go-v2/service/ssooidc v1.18.0 // indirect 39 | github.com/aws/aws-sdk-go-v2/service/sts v1.24.0 // indirect 40 | github.com/aws/smithy-go v1.16.0 // indirect 41 | github.com/beorn7/perks v1.0.1 // indirect 42 | github.com/cespare/xxhash/v2 v2.1.2 // indirect 43 | github.com/davecgh/go-spew v1.1.1 // indirect 44 | github.com/emicklei/go-restful v2.16.0+incompatible // indirect 45 | github.com/evanphx/json-patch v4.12.0+incompatible // indirect 46 | github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect 47 | github.com/fsnotify/fsnotify v1.5.1 // indirect 48 | github.com/go-logr/zapr v1.2.0 // indirect 49 | github.com/go-openapi/jsonpointer v0.19.5 // indirect 50 | github.com/go-openapi/jsonreference v0.19.5 // indirect 51 | github.com/go-openapi/swag v0.19.14 // indirect 52 | github.com/gogo/protobuf v1.3.2 // indirect 53 | github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect 54 | github.com/golang/protobuf v1.5.2 // indirect 55 | github.com/google/gnostic v0.5.7-v3refs // indirect 56 | github.com/google/gofuzz v1.1.0 // indirect 57 | github.com/google/uuid v1.1.2 // indirect 58 | github.com/imdario/mergo v0.3.12 // indirect 59 | github.com/josharian/intern v1.0.0 // indirect 60 | github.com/json-iterator/go v1.1.12 // indirect 61 | github.com/mailru/easyjson v0.7.6 // indirect 62 | github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect 63 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 64 | github.com/modern-go/reflect2 v1.0.2 // indirect 65 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 66 | github.com/onsi/gomega v1.20.2 // indirect 67 | github.com/pmezard/go-difflib v1.0.0 // indirect 68 | github.com/prometheus/client_golang v1.12.1 // indirect 69 | github.com/prometheus/client_model v0.2.0 // indirect 70 | github.com/prometheus/common v0.32.1 // indirect 71 | github.com/prometheus/procfs v0.7.3 // indirect 72 | github.com/spf13/pflag v1.0.5 // indirect 73 | go.uber.org/atomic v1.7.0 // indirect 74 | go.uber.org/multierr v1.6.0 // indirect 75 | go.uber.org/zap v1.19.1 // indirect 76 | golang.org/x/crypto v0.17.0 // indirect 77 | golang.org/x/net v0.10.0 // indirect 78 | golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect 79 | golang.org/x/sys v0.15.0 // indirect 80 | golang.org/x/term v0.15.0 // indirect 81 | golang.org/x/text v0.14.0 // indirect 82 | gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect 83 | google.golang.org/appengine v1.6.7 // indirect 84 | google.golang.org/protobuf v1.33.0 // indirect 85 | gopkg.in/inf.v0 v0.9.1 // indirect 86 | gopkg.in/yaml.v2 v2.4.0 // indirect 87 | gopkg.in/yaml.v3 v3.0.1 // indirect 88 | k8s.io/apiextensions-apiserver v0.24.2 // indirect 89 | k8s.io/component-base v0.24.2 // indirect 90 | k8s.io/klog/v2 v2.60.1 // indirect 91 | k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 // indirect 92 | k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 // indirect 93 | sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 // indirect 94 | sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect 95 | sigs.k8s.io/yaml v1.3.0 // indirect 96 | ) 97 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "flag" 6 | "os" 7 | 8 | "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/cloudmap" 9 | "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/common" 10 | "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/model" 11 | "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/version" 12 | "github.com/aws/aws-sdk-go-v2/config" 13 | 14 | // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) 15 | // to ensure that exec-entrypoint and run can make use of them. 16 | _ "k8s.io/client-go/plugin/pkg/client/auth" 17 | 18 | "k8s.io/apimachinery/pkg/runtime" 19 | utilruntime "k8s.io/apimachinery/pkg/util/runtime" 20 | clientgoscheme "k8s.io/client-go/kubernetes/scheme" 21 | ctrl "sigs.k8s.io/controller-runtime" 22 | "sigs.k8s.io/controller-runtime/pkg/healthz" 23 | "sigs.k8s.io/controller-runtime/pkg/log/zap" 24 | 25 | aboutv1alpha1 "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/apis/about/v1alpha1" 26 | multiclusterv1alpha1 "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/apis/multicluster/v1alpha1" 27 | multiclustercontrollers "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/controllers/multicluster" 28 | // +kubebuilder:scaffold:imports 29 | ) 30 | 31 | var ( 32 | scheme = runtime.NewScheme() 33 | log = ctrl.Log.WithName("main") 34 | ) 35 | 36 | func init() { 37 | utilruntime.Must(clientgoscheme.AddToScheme(scheme)) 38 | 39 | utilruntime.Must(multiclusterv1alpha1.AddToScheme(scheme)) 40 | 41 | utilruntime.Must(aboutv1alpha1.AddToScheme(scheme)) 42 | //+kubebuilder:scaffold:scheme 43 | } 44 | 45 | func main() { 46 | var metricsAddr string 47 | var enableLeaderElection bool 48 | var probeAddr string 49 | flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.") 50 | flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") 51 | flag.BoolVar(&enableLeaderElection, "leader-elect", false, 52 | "Enable leader election for controller manager. "+ 53 | "Enabling this will ensure there is only one active controller manager.") 54 | 55 | // Add the zap logger flag set to the CLI. The flag set must 56 | // be added before calling flag.Parse(). 57 | opts := zap.Options{} 58 | opts.BindFlags(flag.CommandLine) 59 | 60 | flag.Parse() 61 | 62 | ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts))) 63 | 64 | v := version.GetVersion() 65 | log.Info("starting AWS Cloud Map MCS Controller for K8s", "version", v) 66 | 67 | mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ 68 | Scheme: scheme, 69 | MetricsBindAddress: metricsAddr, 70 | Port: 9443, 71 | HealthProbeBindAddress: probeAddr, 72 | LeaderElection: enableLeaderElection, 73 | LeaderElectionID: "aws-cloud-map-mcs-controller-for-k8s-lock", 74 | }) 75 | if err != nil { 76 | log.Error(err, "unable to start manager") 77 | os.Exit(1) 78 | } 79 | log.Info("configuring AWS session") 80 | // GO sdk will look for region in order 1) AWS_REGION env var, 2) ~/.aws/config file, 3) EC2 IMDS 81 | awsCfg, err := config.LoadDefaultConfig(context.TODO(), config.WithEC2IMDSRegion()) 82 | 83 | if err != nil || awsCfg.Region == "" { 84 | log.Error(err, "unable to configure AWS session", "AWS_REGION", awsCfg.Region) 85 | os.Exit(1) 86 | } 87 | 88 | log.Info("Running with AWS region", "AWS_REGION", awsCfg.Region) 89 | 90 | clusterUtils := model.NewClusterUtils(mgr.GetClient()) 91 | serviceDiscoveryClient := cloudmap.NewDefaultServiceDiscoveryClient(&awsCfg, clusterUtils) 92 | 93 | if err = (&multiclustercontrollers.ServiceExportReconciler{ 94 | Client: mgr.GetClient(), 95 | Log: common.NewLogger("controllers", "ServiceExportReconciler"), 96 | Scheme: mgr.GetScheme(), 97 | CloudMap: serviceDiscoveryClient, 98 | ClusterUtils: clusterUtils, 99 | }).SetupWithManager(mgr); err != nil { 100 | log.Error(err, "unable to create controller", "controller", "ServiceExportReconciler") 101 | os.Exit(1) 102 | } 103 | 104 | cloudMapReconciler := &multiclustercontrollers.CloudMapReconciler{ 105 | Client: mgr.GetClient(), 106 | Cloudmap: serviceDiscoveryClient, 107 | Log: common.NewLogger("controllers", "CloudmapReconciler"), 108 | ClusterUtils: clusterUtils, 109 | } 110 | 111 | if err = mgr.Add(cloudMapReconciler); err != nil { 112 | log.Error(err, "unable to create controller", "controller", "CloudmapReconciler") 113 | os.Exit(1) 114 | } 115 | 116 | //+kubebuilder:scaffold:builder 117 | 118 | if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { 119 | log.Error(err, "unable to set up health check") 120 | os.Exit(1) 121 | } 122 | if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { 123 | log.Error(err, "unable to set up ready check") 124 | os.Exit(1) 125 | } 126 | 127 | log.Info("starting manager") 128 | if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { 129 | log.Error(err, "problem running manager") 130 | os.Exit(1) 131 | } 132 | } 133 | -------------------------------------------------------------------------------- /pkg/cloudmap/cache.go: -------------------------------------------------------------------------------- 1 | package cloudmap 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "time" 7 | 8 | "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/common" 9 | "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/model" 10 | "k8s.io/apimachinery/pkg/util/cache" 11 | ) 12 | 13 | const ( 14 | nsKey = "ns-map" 15 | svcKeyPrefix = "svc-map" 16 | 17 | defaultCacheSize = 2048 18 | defaultNsTTL = 10 * time.Second 19 | defaultSvcTTL = 10 * time.Second 20 | defaultEndptTTL = 5 * time.Second 21 | ) 22 | 23 | type ServiceDiscoveryClientCache interface { 24 | GetNamespaceMap() (namespaces map[string]*model.Namespace, found bool) 25 | CacheNamespaceMap(namespaces map[string]*model.Namespace) 26 | EvictNamespaceMap() 27 | GetServiceIdMap(namespaceName string) (serviceIdMap map[string]string, found bool) 28 | CacheServiceIdMap(namespaceName string, serviceIdMap map[string]string) 29 | EvictServiceIdMap(namespaceName string) 30 | GetEndpoints(namespaceName string, serviceName string) (endpoints []*model.Endpoint, found bool) 31 | CacheEndpoints(namespaceName string, serviceName string, endpoints []*model.Endpoint) 32 | EvictEndpoints(namespaceName string, serviceName string) 33 | } 34 | 35 | type sdCache struct { 36 | log common.Logger 37 | defaultCache *cache.LRUExpireCache 38 | endpointsCache *cache.LRUExpireCache 39 | config *SdCacheConfig 40 | } 41 | 42 | type SdCacheConfig struct { 43 | NsTTL time.Duration 44 | SvcTTL time.Duration 45 | EndptTTL time.Duration 46 | } 47 | 48 | func NewServiceDiscoveryClientCache(cacheConfig *SdCacheConfig) ServiceDiscoveryClientCache { 49 | return &sdCache{ 50 | log: common.NewLogger("cloudmap"), 51 | defaultCache: cache.NewLRUExpireCache(defaultCacheSize), 52 | endpointsCache: cache.NewLRUExpireCache(defaultCacheSize), 53 | config: cacheConfig, 54 | } 55 | } 56 | 57 | func NewDefaultServiceDiscoveryClientCache() ServiceDiscoveryClientCache { 58 | return NewServiceDiscoveryClientCache( 59 | &SdCacheConfig{ 60 | NsTTL: defaultNsTTL, 61 | SvcTTL: defaultSvcTTL, 62 | EndptTTL: defaultEndptTTL, 63 | }) 64 | } 65 | 66 | func (sdCache *sdCache) GetNamespaceMap() (namespaceMap map[string]*model.Namespace, found bool) { 67 | entry, exists := sdCache.defaultCache.Get(nsKey) 68 | if !exists { 69 | return nil, false 70 | } 71 | 72 | namespaceMap, ok := entry.(map[string]*model.Namespace) 73 | if !ok { 74 | sdCache.log.Error(errors.New("failed to retrieve namespaceMap from cache"), "") 75 | sdCache.defaultCache.Remove(nsKey) 76 | return nil, false 77 | } 78 | 79 | return namespaceMap, true 80 | } 81 | 82 | func (sdCache *sdCache) CacheNamespaceMap(namespaces map[string]*model.Namespace) { 83 | sdCache.defaultCache.Add(nsKey, namespaces, sdCache.config.NsTTL) 84 | } 85 | 86 | func (sdCache *sdCache) EvictNamespaceMap() { 87 | sdCache.defaultCache.Remove(nsKey) 88 | } 89 | 90 | func (sdCache *sdCache) GetServiceIdMap(nsName string) (serviceIdMap map[string]string, found bool) { 91 | key := sdCache.buildSvcKey(nsName) 92 | entry, exists := sdCache.defaultCache.Get(key) 93 | if !exists { 94 | return nil, false 95 | } 96 | 97 | serviceIdMap, ok := entry.(map[string]string) 98 | if !ok { 99 | err := fmt.Errorf("failed to retrieve service IDs from cache") 100 | sdCache.log.Error(err, err.Error(), "namespace", nsName) 101 | sdCache.defaultCache.Remove(key) 102 | return nil, false 103 | } 104 | 105 | return serviceIdMap, true 106 | } 107 | 108 | func (sdCache *sdCache) CacheServiceIdMap(nsName string, serviceIdMap map[string]string) { 109 | key := sdCache.buildSvcKey(nsName) 110 | sdCache.defaultCache.Add(key, serviceIdMap, sdCache.config.SvcTTL) 111 | } 112 | 113 | func (sdCache *sdCache) EvictServiceIdMap(nsName string) { 114 | key := sdCache.buildSvcKey(nsName) 115 | sdCache.defaultCache.Remove(key) 116 | } 117 | 118 | func (sdCache *sdCache) GetEndpoints(nsName string, svcName string) (endpts []*model.Endpoint, found bool) { 119 | key := sdCache.buildEndptsKey(nsName, svcName) 120 | entry, exists := sdCache.endpointsCache.Get(key) 121 | if !exists { 122 | return nil, false 123 | } 124 | 125 | endpts, ok := entry.([]*model.Endpoint) 126 | if !ok { 127 | err := fmt.Errorf("failed to retrieve endpoints from cache") 128 | sdCache.log.Error(err, err.Error(), "namespace", nsName, "service", svcName) 129 | sdCache.endpointsCache.Remove(key) 130 | return nil, false 131 | } 132 | 133 | return endpts, true 134 | } 135 | 136 | func (sdCache *sdCache) CacheEndpoints(nsName string, svcName string, endpts []*model.Endpoint) { 137 | key := sdCache.buildEndptsKey(nsName, svcName) 138 | sdCache.endpointsCache.Add(key, endpts, sdCache.config.EndptTTL) 139 | } 140 | 141 | func (sdCache *sdCache) EvictEndpoints(nsName string, svcName string) { 142 | key := sdCache.buildEndptsKey(nsName, svcName) 143 | sdCache.endpointsCache.Remove(key) 144 | } 145 | 146 | func (sdCache *sdCache) buildSvcKey(nsName string) (cacheKey string) { 147 | return fmt.Sprintf("%s:%s", svcKeyPrefix, nsName) 148 | } 149 | 150 | func (sdCache *sdCache) buildEndptsKey(nsName string, svcName string) string { 151 | return fmt.Sprintf("%s:%s", nsName, svcName) 152 | } 153 | -------------------------------------------------------------------------------- /pkg/cloudmap/cache_test.go: -------------------------------------------------------------------------------- 1 | package cloudmap 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/common" 8 | "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/model" 9 | "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/test" 10 | "github.com/go-logr/logr/testr" 11 | "github.com/stretchr/testify/assert" 12 | "k8s.io/apimachinery/pkg/util/cache" 13 | ) 14 | 15 | func TestNewServiceDiscoveryClientCache(t *testing.T) { 16 | sdc, ok := NewServiceDiscoveryClientCache(&SdCacheConfig{ 17 | NsTTL: 3 * time.Second, 18 | SvcTTL: 3 * time.Second, 19 | EndptTTL: 3 * time.Second, 20 | }).(*sdCache) 21 | if !ok { 22 | t.Fatalf("failed to create cache") 23 | } 24 | 25 | assert.Equal(t, 3*time.Second, sdc.config.NsTTL) 26 | assert.Equal(t, 3*time.Second, sdc.config.SvcTTL) 27 | assert.Equal(t, 3*time.Second, sdc.config.EndptTTL) 28 | } 29 | 30 | func TestNewDefaultServiceDiscoveryClientCache(t *testing.T) { 31 | sdc, ok := NewDefaultServiceDiscoveryClientCache().(*sdCache) 32 | if !ok { 33 | t.Fatalf("failed to create cache") 34 | } 35 | 36 | assert.Equal(t, defaultNsTTL, sdc.config.NsTTL) 37 | assert.Equal(t, defaultSvcTTL, sdc.config.SvcTTL) 38 | assert.Equal(t, defaultEndptTTL, sdc.config.EndptTTL) 39 | } 40 | 41 | func TestServiceDiscoveryClientCacheGetNamespaceMap_Found(t *testing.T) { 42 | sdc := NewDefaultServiceDiscoveryClientCache() 43 | sdc.CacheNamespaceMap(map[string]*model.Namespace{ 44 | test.HttpNsName: test.GetTestHttpNamespace(), 45 | }) 46 | 47 | nsMap, found := sdc.GetNamespaceMap() 48 | assert.True(t, found) 49 | assert.Equal(t, test.GetTestHttpNamespace(), nsMap[test.HttpNsName]) 50 | } 51 | 52 | func TestServiceDiscoveryClientCacheGetNamespaceMap_NotFound(t *testing.T) { 53 | sdc := NewDefaultServiceDiscoveryClientCache() 54 | 55 | nsMap, found := sdc.GetNamespaceMap() 56 | assert.False(t, found) 57 | assert.Nil(t, nsMap) 58 | } 59 | 60 | func TestServiceDiscoveryClientCacheGetNamespaceMap_Corrupt(t *testing.T) { 61 | sdc := getCacheImpl(t) 62 | sdc.defaultCache.Add(nsKey, &model.Plan{}, time.Minute) 63 | 64 | nsMap, found := sdc.GetNamespaceMap() 65 | assert.False(t, found) 66 | assert.Nil(t, nsMap) 67 | } 68 | 69 | func TestServiceDiscoveryClientEvictNamespaceMap(t *testing.T) { 70 | sdc := NewDefaultServiceDiscoveryClientCache() 71 | sdc.CacheNamespaceMap(map[string]*model.Namespace{ 72 | test.HttpNsName: test.GetTestHttpNamespace(), 73 | }) 74 | sdc.EvictNamespaceMap() 75 | 76 | nsMap, found := sdc.GetNamespaceMap() 77 | assert.False(t, found) 78 | assert.Nil(t, nsMap) 79 | } 80 | 81 | func TestServiceDiscoveryClientCacheGetServiceIdMap_Found(t *testing.T) { 82 | sdc := NewDefaultServiceDiscoveryClientCache() 83 | sdc.CacheServiceIdMap(test.HttpNsName, map[string]string{ 84 | test.SvcName: test.SvcId, 85 | }) 86 | 87 | svcIdMap, found := sdc.GetServiceIdMap(test.HttpNsName) 88 | assert.True(t, found) 89 | assert.Equal(t, test.SvcId, svcIdMap[test.SvcName]) 90 | } 91 | 92 | func TestServiceDiscoveryClientCacheGetServiceIdMap_NotFound(t *testing.T) { 93 | sdc := NewDefaultServiceDiscoveryClientCache() 94 | 95 | svcIdMap, found := sdc.GetServiceIdMap(test.HttpNsName) 96 | assert.False(t, found) 97 | assert.Empty(t, svcIdMap) 98 | } 99 | 100 | func TestServiceDiscoveryClientCacheGetServiceIdMap_Corrupt(t *testing.T) { 101 | sdc := getCacheImpl(t) 102 | sdc.defaultCache.Add(sdc.buildSvcKey(test.HttpNsName), &model.Plan{}, time.Minute) 103 | 104 | svcIdMap, found := sdc.GetServiceIdMap(test.HttpNsName) 105 | assert.False(t, found) 106 | assert.Empty(t, svcIdMap) 107 | } 108 | 109 | func TestServiceDiscoveryClientEvictServiceIdMap(t *testing.T) { 110 | sdc := NewDefaultServiceDiscoveryClientCache() 111 | sdc.CacheServiceIdMap(test.HttpNsName, map[string]string{ 112 | test.SvcName: test.SvcId, 113 | }) 114 | sdc.EvictServiceIdMap(test.HttpNsName) 115 | 116 | svcIdMap, found := sdc.GetServiceIdMap(test.HttpNsName) 117 | assert.False(t, found) 118 | assert.Empty(t, svcIdMap) 119 | } 120 | 121 | func TestServiceDiscoveryClientCacheGetEndpoints_Found(t *testing.T) { 122 | sdc := NewDefaultServiceDiscoveryClientCache() 123 | sdc.CacheEndpoints(test.HttpNsName, test.SvcName, []*model.Endpoint{test.GetTestEndpoint1(), test.GetTestEndpoint2()}) 124 | 125 | endpts, found := sdc.GetEndpoints(test.HttpNsName, test.SvcName) 126 | assert.True(t, found) 127 | assert.Equal(t, []*model.Endpoint{test.GetTestEndpoint1(), test.GetTestEndpoint2()}, endpts) 128 | } 129 | 130 | func TestServiceDiscoveryClientCacheGetEndpoints_NotFound(t *testing.T) { 131 | sdc := NewDefaultServiceDiscoveryClientCache() 132 | 133 | endpts, found := sdc.GetEndpoints(test.HttpNsName, test.SvcName) 134 | assert.False(t, found) 135 | assert.Nil(t, endpts) 136 | } 137 | 138 | func TestServiceDiscoveryClientCacheGetEndpoints_Corrupt(t *testing.T) { 139 | sdc := getCacheImpl(t) 140 | sdc.defaultCache.Add(sdc.buildEndptsKey(test.HttpNsName, test.SvcName), &model.Plan{}, time.Minute) 141 | 142 | endpts, found := sdc.GetEndpoints(test.HttpNsName, test.SvcName) 143 | assert.False(t, found) 144 | assert.Nil(t, endpts) 145 | } 146 | 147 | func TestServiceDiscoveryClientEvictEndpoints(t *testing.T) { 148 | sdc := NewDefaultServiceDiscoveryClientCache() 149 | sdc.CacheEndpoints(test.HttpNsName, test.SvcName, []*model.Endpoint{test.GetTestEndpoint1(), test.GetTestEndpoint2()}) 150 | sdc.EvictEndpoints(test.HttpNsName, test.SvcName) 151 | 152 | endpts, found := sdc.GetEndpoints(test.HttpNsName, test.SvcName) 153 | assert.False(t, found) 154 | assert.Nil(t, endpts) 155 | } 156 | 157 | func getCacheImpl(t *testing.T) sdCache { 158 | return sdCache{ 159 | log: common.NewLoggerWithLogr(testr.New(t)), 160 | defaultCache: cache.NewLRUExpireCache(defaultCacheSize), 161 | endpointsCache: cache.NewLRUExpireCache(defaultCacheSize), 162 | } 163 | } 164 | -------------------------------------------------------------------------------- /docs/images/cloudmap.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Icon-Architecture/64/Arch_AWS-CloudMap_64 5 | Created with Sketch. 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /config/crd/bases/multicluster.x-k8s.io_serviceexports.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | controller-gen.kubebuilder.io/version: v0.9.2 7 | creationTimestamp: null 8 | name: serviceexports.multicluster.x-k8s.io 9 | spec: 10 | group: multicluster.x-k8s.io 11 | names: 12 | kind: ServiceExport 13 | listKind: ServiceExportList 14 | plural: serviceexports 15 | singular: serviceexport 16 | scope: Namespaced 17 | versions: 18 | - name: v1alpha1 19 | schema: 20 | openAPIV3Schema: 21 | description: ServiceExport declares that the Service with the same name and 22 | namespace as this export should be consumable from other clusters. 23 | properties: 24 | apiVersion: 25 | description: 'APIVersion defines the versioned schema of this representation 26 | of an object. Servers should convert recognized schemas to the latest 27 | internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' 28 | type: string 29 | kind: 30 | description: 'Kind is a string value representing the REST resource this 31 | object represents. Servers may infer this from the endpoint the client 32 | submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' 33 | type: string 34 | metadata: 35 | type: object 36 | status: 37 | description: status describes the current state of an exported service. 38 | Service configuration comes from the Service that had the same name 39 | and namespace as this ServiceExport. Populated by the multi-cluster 40 | service implementation's controller. 41 | properties: 42 | conditions: 43 | items: 44 | description: "Condition contains details for one aspect of the current 45 | state of this API Resource. --- This struct is intended for direct 46 | use as an array at the field path .status.conditions. For example, 47 | type FooStatus struct{ // Represents the observations of a foo's 48 | current state. // Known .status.conditions.type are: \"Available\", 49 | \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge 50 | // +listType=map // +listMapKey=type Conditions []metav1.Condition 51 | `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" 52 | protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" 53 | properties: 54 | lastTransitionTime: 55 | description: lastTransitionTime is the last time the condition 56 | transitioned from one status to another. This should be when 57 | the underlying condition changed. If that is not known, then 58 | using the time when the API field changed is acceptable. 59 | format: date-time 60 | type: string 61 | message: 62 | description: message is a human readable message indicating 63 | details about the transition. This may be an empty string. 64 | maxLength: 32768 65 | type: string 66 | observedGeneration: 67 | description: observedGeneration represents the .metadata.generation 68 | that the condition was set based upon. For instance, if .metadata.generation 69 | is currently 12, but the .status.conditions[x].observedGeneration 70 | is 9, the condition is out of date with respect to the current 71 | state of the instance. 72 | format: int64 73 | minimum: 0 74 | type: integer 75 | reason: 76 | description: reason contains a programmatic identifier indicating 77 | the reason for the condition's last transition. Producers 78 | of specific condition types may define expected values and 79 | meanings for this field, and whether the values are considered 80 | a guaranteed API. The value should be a CamelCase string. 81 | This field may not be empty. 82 | maxLength: 1024 83 | minLength: 1 84 | pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ 85 | type: string 86 | status: 87 | description: status of the condition, one of True, False, Unknown. 88 | enum: 89 | - "True" 90 | - "False" 91 | - Unknown 92 | type: string 93 | type: 94 | description: type of condition in CamelCase or in foo.example.com/CamelCase. 95 | --- Many .condition.type values are consistent across resources 96 | like Available, but because arbitrary conditions can be useful 97 | (see .node.status.conditions), the ability to deconflict is 98 | important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) 99 | maxLength: 316 100 | pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ 101 | type: string 102 | required: 103 | - lastTransitionTime 104 | - message 105 | - reason 106 | - status 107 | - type 108 | type: object 109 | type: array 110 | x-kubernetes-list-map-keys: 111 | - type 112 | x-kubernetes-list-type: map 113 | type: object 114 | type: object 115 | served: true 116 | storage: true 117 | -------------------------------------------------------------------------------- /pkg/cloudmap/operation_poller_test.go: -------------------------------------------------------------------------------- 1 | package cloudmap 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "testing" 7 | "time" 8 | 9 | cloudmapMock "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/mocks/pkg/cloudmap" 10 | "github.com/aws/aws-sdk-go-v2/aws" 11 | "github.com/aws/aws-sdk-go-v2/service/servicediscovery/types" 12 | "github.com/golang/mock/gomock" 13 | "github.com/stretchr/testify/assert" 14 | ) 15 | 16 | const ( 17 | op1 = "one" 18 | op2 = "two" 19 | op3 = "three" 20 | interval = 100 * time.Millisecond 21 | timeout = 500 * time.Millisecond 22 | ) 23 | 24 | func TestOperationPoller_HappyCase(t *testing.T) { 25 | mockController := gomock.NewController(t) 26 | defer mockController.Finish() 27 | 28 | sdApi := cloudmapMock.NewMockServiceDiscoveryApi(mockController) 29 | 30 | op1First := sdApi.EXPECT().GetOperation(gomock.Any(), op1).Return(opSubmitted(), nil) 31 | op1Second := sdApi.EXPECT().GetOperation(gomock.Any(), op1).Return(opPending(), nil) 32 | op1Third := sdApi.EXPECT().GetOperation(gomock.Any(), op1).Return(opSuccess(), nil) 33 | gomock.InOrder(op1First, op1Second, op1Third) 34 | 35 | op2First := sdApi.EXPECT().GetOperation(gomock.Any(), op2).Return(opPending(), nil) 36 | op2Second := sdApi.EXPECT().GetOperation(gomock.Any(), op2).Return(opSuccess(), nil) 37 | gomock.InOrder(op2First, op2Second) 38 | 39 | sdApi.EXPECT().GetOperation(gomock.Any(), op3).Return(opSuccess(), nil) 40 | 41 | op := NewOperationPollerWithConfig(interval, timeout, sdApi) 42 | op.Submit(context.TODO(), func() (opId string, err error) { return op1, nil }) 43 | op.Submit(context.TODO(), func() (opId string, err error) { return op2, nil }) 44 | op.Submit(context.TODO(), func() (opId string, err error) { return op3, nil }) 45 | 46 | result := op.Await() 47 | assert.Nil(t, result) 48 | } 49 | 50 | func TestOperationPoller_AllFail(t *testing.T) { 51 | mockController := gomock.NewController(t) 52 | defer mockController.Finish() 53 | 54 | sdApi := cloudmapMock.NewMockServiceDiscoveryApi(mockController) 55 | 56 | op1First := sdApi.EXPECT().GetOperation(gomock.Any(), op1).Return(opSubmitted(), nil) 57 | op1Second := sdApi.EXPECT().GetOperation(gomock.Any(), op1).Return(opPending(), nil) 58 | op1Third := sdApi.EXPECT().GetOperation(gomock.Any(), op1).Return(opFailed(), nil) 59 | gomock.InOrder(op1First, op1Second, op1Third) 60 | 61 | op2First := sdApi.EXPECT().GetOperation(gomock.Any(), op2).Return(opSubmitted(), nil) 62 | op2Second := sdApi.EXPECT().GetOperation(gomock.Any(), op2).Return(opFailed(), nil) 63 | gomock.InOrder(op2First, op2Second) 64 | 65 | op := NewOperationPollerWithConfig(interval, timeout, sdApi) 66 | op.Submit(context.TODO(), func() (opId string, err error) { return op1, nil }) 67 | op.Submit(context.TODO(), func() (opId string, err error) { return op2, nil }) 68 | unknown := "failed to reg error" 69 | op.Submit(context.TODO(), func() (opId string, err error) { 70 | return "", fmt.Errorf(unknown) 71 | }) 72 | 73 | err := op.Await() 74 | assert.NotNil(t, err) 75 | assert.Contains(t, err.Error(), op1) 76 | assert.Contains(t, err.Error(), op2) 77 | assert.Contains(t, err.Error(), unknown) 78 | } 79 | 80 | func TestOperationPoller_Mixed(t *testing.T) { 81 | mockController := gomock.NewController(t) 82 | defer mockController.Finish() 83 | 84 | sdApi := cloudmapMock.NewMockServiceDiscoveryApi(mockController) 85 | 86 | op1First := sdApi.EXPECT().GetOperation(gomock.Any(), op1).Return(opSubmitted(), nil) 87 | op1Second := sdApi.EXPECT().GetOperation(gomock.Any(), op1).Return(opPending(), nil) 88 | op1Third := sdApi.EXPECT().GetOperation(gomock.Any(), op1).Return(opFailed(), nil) 89 | gomock.InOrder(op1First, op1Second, op1Third) 90 | 91 | op2First := sdApi.EXPECT().GetOperation(gomock.Any(), op2).Return(opSubmitted(), nil) 92 | op2Second := sdApi.EXPECT().GetOperation(gomock.Any(), op2).Return(opPending(), nil) 93 | op2Third := sdApi.EXPECT().GetOperation(gomock.Any(), op2).Return(opSuccess(), nil) 94 | gomock.InOrder(op2First, op2Second, op2Third) 95 | 96 | op := NewOperationPollerWithConfig(interval, timeout, sdApi) 97 | op.Submit(context.TODO(), func() (opId string, err error) { return op1, nil }) 98 | op.Submit(context.TODO(), func() (opId string, err error) { return op2, nil }) 99 | 100 | err := op.Await() 101 | assert.NotNil(t, err) 102 | assert.Contains(t, err.Error(), op1) 103 | assert.NotContains(t, err.Error(), op2) 104 | } 105 | 106 | func TestOperationPoller_Timeout(t *testing.T) { 107 | mockController := gomock.NewController(t) 108 | defer mockController.Finish() 109 | 110 | sdApi := cloudmapMock.NewMockServiceDiscoveryApi(mockController) 111 | 112 | sdApi.EXPECT().GetOperation(gomock.Any(), op1).Return(opPending(), nil).AnyTimes() 113 | 114 | op2First := sdApi.EXPECT().GetOperation(gomock.Any(), op2).Return(opPending(), nil) 115 | op2Second := sdApi.EXPECT().GetOperation(gomock.Any(), op2).Return(opSuccess(), nil) 116 | gomock.InOrder(op2First, op2Second) 117 | 118 | op := NewOperationPollerWithConfig(interval, timeout, sdApi) 119 | op.Submit(context.TODO(), func() (opId string, err error) { return op1, nil }) 120 | op.Submit(context.TODO(), func() (opId string, err error) { return op2, nil }) 121 | 122 | err := op.Await() 123 | assert.NotNil(t, err) 124 | assert.Contains(t, err.Error(), op1) 125 | assert.Contains(t, err.Error(), operationPollTimoutErrorMessage) 126 | assert.NotContains(t, err.Error(), op2) 127 | } 128 | 129 | func TestOperationPoller_Poll_HappyCase(t *testing.T) { 130 | mockController := gomock.NewController(t) 131 | defer mockController.Finish() 132 | 133 | sdApi := cloudmapMock.NewMockServiceDiscoveryApi(mockController) 134 | 135 | sdApi.EXPECT().GetOperation(context.TODO(), op1).Return(opPending(), nil) 136 | sdApi.EXPECT().GetOperation(context.TODO(), op1).Return(opSuccess(), nil) 137 | 138 | op := NewOperationPollerWithConfig(interval, timeout, sdApi) 139 | _, err := op.Poll(context.TODO(), op1) 140 | assert.Nil(t, err) 141 | } 142 | 143 | func opPending() *types.Operation { 144 | return &types.Operation{ 145 | Status: types.OperationStatusPending, 146 | } 147 | } 148 | 149 | func opFailed() *types.Operation { 150 | return &types.Operation{ 151 | Status: types.OperationStatusFail, 152 | ErrorMessage: aws.String("fail"), 153 | } 154 | } 155 | 156 | func opSubmitted() *types.Operation { 157 | return &types.Operation{ 158 | Status: types.OperationStatusSubmitted, 159 | } 160 | } 161 | 162 | func opSuccess() *types.Operation { 163 | return &types.Operation{ 164 | Status: types.OperationStatusSuccess, 165 | } 166 | } 167 | -------------------------------------------------------------------------------- /config/crd/bases/multicluster.x-k8s.io_serviceimports.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | controller-gen.kubebuilder.io/version: v0.9.2 7 | creationTimestamp: null 8 | name: serviceimports.multicluster.x-k8s.io 9 | spec: 10 | group: multicluster.x-k8s.io 11 | names: 12 | kind: ServiceImport 13 | listKind: ServiceImportList 14 | plural: serviceimports 15 | singular: serviceimport 16 | scope: Namespaced 17 | versions: 18 | - name: v1alpha1 19 | schema: 20 | openAPIV3Schema: 21 | description: ServiceImport describes a service imported from clusters in a 22 | ClusterSet. 23 | properties: 24 | apiVersion: 25 | description: 'APIVersion defines the versioned schema of this representation 26 | of an object. Servers should convert recognized schemas to the latest 27 | internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' 28 | type: string 29 | kind: 30 | description: 'Kind is a string value representing the REST resource this 31 | object represents. Servers may infer this from the endpoint the client 32 | submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' 33 | type: string 34 | metadata: 35 | type: object 36 | spec: 37 | description: spec defines the behavior of a ServiceImport. 38 | properties: 39 | ips: 40 | description: ip will be used as the VIP for this service when type 41 | is ClusterSetIP. 42 | items: 43 | type: string 44 | type: array 45 | ports: 46 | items: 47 | description: ServicePort represents the port on which the service 48 | is exposed 49 | properties: 50 | appProtocol: 51 | description: The application protocol for this port. This field 52 | follows standard Kubernetes label syntax. Un-prefixed names 53 | are reserved for IANA standard service names (as per RFC-6335 54 | and http://www.iana.org/assignments/service-names). Non-standard 55 | protocols should use prefixed names such as mycompany.com/my-custom-protocol. 56 | Field can be enabled with ServiceAppProtocol feature gate. 57 | type: string 58 | name: 59 | description: The name of this port within the service. This 60 | must be a DNS_LABEL. All ports within a ServiceSpec must have 61 | unique names. When considering the endpoints for a Service, 62 | this must match the 'name' field in the EndpointPort. Optional 63 | if only one ServicePort is defined on this service. 64 | type: string 65 | port: 66 | description: The port that will be exposed by this service. 67 | format: int32 68 | type: integer 69 | protocol: 70 | default: TCP 71 | description: The IP protocol for this port. Supports "TCP", 72 | "UDP", and "SCTP". Default is TCP. 73 | type: string 74 | required: 75 | - port 76 | type: object 77 | type: array 78 | x-kubernetes-list-type: atomic 79 | sessionAffinity: 80 | description: 'Supports "ClientIP" and "None". Used to maintain session 81 | affinity. Enable client IP based session affinity. Must be ClientIP 82 | or None. Defaults to None. Ignored when type is Headless More info: 83 | https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies' 84 | type: string 85 | sessionAffinityConfig: 86 | description: sessionAffinityConfig contains session affinity configuration. 87 | properties: 88 | clientIP: 89 | description: clientIP contains the configurations of Client IP 90 | based session affinity. 91 | properties: 92 | timeoutSeconds: 93 | description: timeoutSeconds specifies the seconds of ClientIP 94 | type session sticky time. The value must be >0 && <=86400(for 95 | 1 day) if ServiceAffinity == "ClientIP". Default value is 96 | 10800(for 3 hours). 97 | format: int32 98 | type: integer 99 | type: object 100 | type: object 101 | type: 102 | description: type defines the type of this service. Must be ClusterSetIP 103 | or Headless. 104 | enum: 105 | - ClusterSetIP 106 | - Headless 107 | type: string 108 | required: 109 | - ports 110 | - type 111 | type: object 112 | status: 113 | description: status contains information about the exported services that 114 | form the multi-cluster service referenced by this ServiceImport. 115 | properties: 116 | clusters: 117 | description: clusters is the list of exporting clusters from which 118 | this service was derived. 119 | items: 120 | description: ClusterStatus contains service configuration mapped 121 | to a specific source cluster 122 | properties: 123 | cluster: 124 | description: cluster is the name of the exporting cluster. Must 125 | be a valid RFC-1123 DNS label. 126 | type: string 127 | required: 128 | - cluster 129 | type: object 130 | type: array 131 | x-kubernetes-list-map-keys: 132 | - cluster 133 | x-kubernetes-list-type: map 134 | type: object 135 | type: object 136 | served: true 137 | storage: true 138 | -------------------------------------------------------------------------------- /pkg/controllers/multicluster/endpointslice_plan.go: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | import ( 4 | "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/model" 5 | v1 "k8s.io/api/core/v1" 6 | discovery "k8s.io/api/discovery/v1" 7 | ) 8 | 9 | const defaultMaxEndpointsPerSlice = 100 10 | 11 | type EndpointSliceChanges struct { 12 | // Create: List of EndpointSlices that need to be created 13 | Create []*discovery.EndpointSlice 14 | // Update: List of EndpointSlices that need to be updated 15 | Update []*discovery.EndpointSlice 16 | // Delete: List of EndpointSlices that need to be deleted 17 | Delete []*discovery.EndpointSlice 18 | // Unmodified: List of EndpointSlices that do not need to be changed 19 | Unmodified []*discovery.EndpointSlice 20 | } 21 | 22 | type EndpointSlicePlan struct { 23 | // maxEndpointsPerSlice defaults to 100 24 | maxEndpointsPerSlice int 25 | 26 | // Service to reconcile endpoints in 27 | Service *v1.Service 28 | 29 | // ServiceImportName name used to create new EndpointSlices 30 | ServiceImportName string 31 | 32 | // Current EndpontSlices 33 | Current []*discovery.EndpointSlice 34 | 35 | // Desired Endpoints 36 | Desired []*model.Endpoint 37 | 38 | // Cluster the EndpointSlice belongs to 39 | ClusterId string 40 | } 41 | 42 | // CheckAddressType TODO: Will need to improve how IP Type is determined when we implement dual stack. 43 | func (p *EndpointSlicePlan) CheckAddressType() discovery.AddressType { 44 | // Peek at the first endpoint for its AddressType. All endpoints in a slice will be of the same AddressType. 45 | if len(p.Desired) == 0 { 46 | return discovery.AddressTypeIPv4 47 | } 48 | return p.Desired[0].AddressType 49 | } 50 | 51 | // CalculateChanges returns list of EndpointSlice Changes that need to applied 52 | func (p *EndpointSlicePlan) CalculateChanges() EndpointSliceChanges { 53 | // populate map of desired endpoints for lookup efficiency 54 | desiredEndpoints := make(map[string]*model.Endpoint) 55 | for _, desiredEndpoint := range p.Desired { 56 | desiredEndpoints[desiredEndpoint.IP] = desiredEndpoint 57 | } 58 | 59 | desiredPorts := ExtractEndpointPorts(p.Desired) 60 | 61 | // Remove unwanted endpoints from slices 62 | changes := p.trimSlices(desiredEndpoints, desiredPorts) 63 | 64 | // Add new endpoints to slices 65 | for len(desiredEndpoints) > 0 { 66 | sliceWithRoom, needsPortUpdate := p.getOrCreateUnfilledEndpointSlice(&changes, len(desiredEndpoints)) 67 | 68 | for key, endpointToAdd := range desiredEndpoints { 69 | roomInSlice := p.getMaxEndpointsPerSlice() - len(sliceWithRoom.Endpoints) 70 | if roomInSlice <= 0 { 71 | // stop adding to slice once it is full 72 | break 73 | } 74 | sliceWithRoom.Endpoints = append(sliceWithRoom.Endpoints, CreateEndpointForSlice(p.Service, endpointToAdd)) 75 | delete(desiredEndpoints, key) 76 | } 77 | 78 | if needsPortUpdate { 79 | newPorts := portSliceToEndpointPortSlice(desiredPorts) 80 | sliceWithRoom.Ports = newPorts 81 | } 82 | } 83 | 84 | return changes 85 | } 86 | 87 | func (p *EndpointSlicePlan) trimSlices(desiredEndpoints map[string]*model.Endpoint, desiredPorts []*model.Port) (changes EndpointSliceChanges) { 88 | // remove all undesired existing endpoints in slices 89 | for _, existingSlice := range p.Current { 90 | updatedEndpointList := make([]discovery.Endpoint, 0) 91 | for _, existingEndpoint := range existingSlice.Endpoints { 92 | key := existingEndpoint.Addresses[0] 93 | if _, found := desiredEndpoints[key]; found { 94 | updatedEndpointList = append(updatedEndpointList, existingEndpoint) 95 | delete(desiredEndpoints, key) 96 | } 97 | } 98 | 99 | // mark slice for deletion if all endpoints were removed 100 | if len(updatedEndpointList) == 0 { 101 | changes.Delete = append(changes.Delete, existingSlice) 102 | continue 103 | } 104 | 105 | sliceNeedsUpdate := false 106 | 107 | // slice needs to be updated if ports do not match 108 | if !PortsEqualIgnoreOrder(desiredPorts, endpointPortSliceToPortSlice(existingSlice.Ports)) { 109 | existingSlice.Ports = portSliceToEndpointPortSlice(desiredPorts) 110 | sliceNeedsUpdate = true 111 | } 112 | 113 | // slice needs to be updated if endpoint list changed 114 | if len(updatedEndpointList) != len(existingSlice.Endpoints) { 115 | existingSlice.Endpoints = updatedEndpointList 116 | sliceNeedsUpdate = true 117 | } 118 | 119 | if sliceNeedsUpdate { 120 | changes.Update = append(changes.Update, existingSlice) 121 | } else { 122 | changes.Unmodified = append(changes.Unmodified, existingSlice) 123 | } 124 | } 125 | 126 | return changes 127 | } 128 | 129 | func (p *EndpointSlicePlan) getOrCreateUnfilledEndpointSlice(changes *EndpointSliceChanges, requiredCapacity int) (sliceWithRoom *discovery.EndpointSlice, needsPortUpdate bool) { 130 | // Prefer slices we are already updating 131 | for _, sliceToUpdate := range changes.Update { 132 | if len(sliceToUpdate.Endpoints) < p.getMaxEndpointsPerSlice() { 133 | return sliceToUpdate, false 134 | } 135 | } 136 | 137 | // Update a slice marked for deletion if possible 138 | if len(changes.Delete) > 0 { 139 | sliceToReuse := changes.Delete[0] 140 | changes.Delete = changes.Delete[1:] 141 | changes.Update = append(changes.Update, sliceToReuse) 142 | 143 | // clear endpoint list that was marked for deletion before reusing 144 | sliceToReuse.Endpoints = []discovery.Endpoint{} 145 | return sliceToReuse, true 146 | } 147 | 148 | // Update an unmodified slice if it has capacity to add all endpoints 149 | for i, unmodifiedSlice := range changes.Unmodified { 150 | proposedSliceLength := len(unmodifiedSlice.Endpoints) + requiredCapacity 151 | if proposedSliceLength <= p.getMaxEndpointsPerSlice() { 152 | changes.Unmodified = append(changes.Unmodified[:i], changes.Unmodified[i+1:]...) 153 | changes.Update = append(changes.Update, unmodifiedSlice) 154 | return unmodifiedSlice, false 155 | } 156 | } 157 | 158 | // No existing slices can fill new endpoint requirements so create a new slice 159 | sliceToCreate := CreateEndpointSliceStruct(p.Service, p.ServiceImportName, p.ClusterId, p.CheckAddressType()) 160 | changes.Create = append(changes.Create, sliceToCreate) 161 | return sliceToCreate, true 162 | } 163 | 164 | func (p *EndpointSlicePlan) getMaxEndpointsPerSlice() int { 165 | if p.maxEndpointsPerSlice != 0 { 166 | return p.maxEndpointsPerSlice 167 | } 168 | 169 | return defaultMaxEndpointsPerSlice 170 | } 171 | 172 | func endpointPortSliceToPortSlice(endpointPorts []discovery.EndpointPort) (ports []*model.Port) { 173 | for _, endpointPort := range endpointPorts { 174 | port := EndpointPortToPort(endpointPort) 175 | ports = append(ports, &port) 176 | } 177 | return ports 178 | } 179 | 180 | func portSliceToEndpointPortSlice(ports []*model.Port) (endpointPorts []discovery.EndpointPort) { 181 | for _, port := range ports { 182 | endpointPort := PortToEndpointPort(*port) 183 | endpointPorts = append(endpointPorts, endpointPort) 184 | } 185 | return endpointPorts 186 | } 187 | -------------------------------------------------------------------------------- /test/test-constants.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "fmt" 5 | 6 | discovery "k8s.io/api/discovery/v1" 7 | 8 | "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/version" 9 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 10 | 11 | aboutv1alpha1 "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/apis/about/v1alpha1" 12 | "github.com/aws/aws-cloud-map-mcs-controller-for-k8s/pkg/model" 13 | ) 14 | 15 | const ( 16 | HttpNsName = "http-ns-name" 17 | DnsNsName = "dns-ns-name" 18 | HttpNsId = "http-ns-id" 19 | DnsNsId = "dns-ns-id" 20 | SvcName = "svc-name" 21 | SvcId = "svc-id" 22 | ClusterId1 = "test-mcs-clusterid-1" 23 | ClusterSet = "test-mcs-clustersetid" 24 | ClusterId2 = "test-mcs-clusterid-2" 25 | EndptId1 = "tcp-192_168_0_1-1" 26 | EndptId2 = "tcp-192_168_0_2-2" 27 | EndptIdIpv6 = "tcp-2001_0db8_0001_0000_0000_0ab9_C0A8:0102-1" 28 | EndptIp1 = "192.168.0.1" 29 | EndptIp2 = "192.168.0.2" 30 | EndptIpv6 = "2001:0db8:0001:0000:0000:0ab9:C0A8:0102" 31 | EndptReadyTrue = "true" 32 | EndptReadyFalse = "false" 33 | Port1 = 1 34 | PortStr1 = "1" 35 | PortName1 = "http" 36 | Protocol1 = "TCP" 37 | ServicePort1 = 11 38 | ServicePortStr1 = "11" 39 | Port2 = 2 40 | PortStr2 = "2" 41 | PortName2 = "https" 42 | Protocol2 = "UDP" 43 | ServicePort2 = 22 44 | ServicePortStr2 = "22" 45 | ClusterIp1 = "10.10.10.1" 46 | ClusterIp2 = "10.10.10.2" 47 | OpId1 = "operation-id-1" 48 | OpId2 = "operation-id-2" 49 | OpStart = 1 50 | SvcType = "ClusterSetIP" 51 | SvcExportCreationTimestamp int64 = 1640995200000 52 | Hostname = "host" 53 | Nodename = "node" 54 | PackageVersion = "aws-cloud-map-mcs-controller-for-k8s 0.0.1 (abcd)" 55 | ) 56 | 57 | func SetTestVersion() { 58 | version.GitVersion = "v0.0.1" 59 | version.GitCommit = "abcd" 60 | } 61 | 62 | func GetTestHttpNamespace() *model.Namespace { 63 | return &model.Namespace{ 64 | Id: HttpNsId, 65 | Name: HttpNsName, 66 | Type: model.HttpNamespaceType, 67 | } 68 | } 69 | 70 | func GetTestDnsNamespace() *model.Namespace { 71 | return &model.Namespace{ 72 | Id: DnsNsId, 73 | Name: DnsNsName, 74 | Type: model.DnsPrivateNamespaceType, 75 | } 76 | } 77 | 78 | func GetTestService() *model.Service { 79 | return &model.Service{ 80 | Namespace: HttpNsName, 81 | Name: SvcName, 82 | Endpoints: []*model.Endpoint{GetTestEndpoint1(), GetTestEndpoint2()}, 83 | } 84 | } 85 | 86 | func GetTestServiceWithEndpoint(endpoints []*model.Endpoint) *model.Service { 87 | return &model.Service{ 88 | Namespace: HttpNsName, 89 | Name: SvcName, 90 | Endpoints: endpoints, 91 | } 92 | } 93 | 94 | func GetTestMulticlusterService() *model.Service { 95 | // Service has two endpoints belonging to two different clusters in the same clusterset 96 | return &model.Service{ 97 | Namespace: HttpNsName, 98 | Name: SvcName, 99 | Endpoints: GetMulticlusterTestEndpoints(), 100 | } 101 | } 102 | 103 | func GetTestEndpoint1() *model.Endpoint { 104 | return &model.Endpoint{ 105 | Id: EndptId1, 106 | IP: EndptIp1, 107 | AddressType: discovery.AddressTypeIPv4, 108 | EndpointPort: model.Port{ 109 | Name: PortName1, 110 | Port: Port1, 111 | Protocol: Protocol1, 112 | }, 113 | ServicePort: model.Port{ 114 | Name: PortName1, 115 | Port: ServicePort1, 116 | TargetPort: PortStr1, 117 | Protocol: Protocol1, 118 | }, 119 | Ready: true, 120 | Hostname: Hostname, 121 | Nodename: Nodename, 122 | ClusterId: ClusterId1, 123 | ClusterSetId: ClusterSet, 124 | ServiceType: model.ClusterSetIPType, 125 | ServiceExportCreationTimestamp: SvcExportCreationTimestamp, 126 | Attributes: map[string]string{model.K8sVersionAttr: PackageVersion}, 127 | } 128 | } 129 | 130 | func GetTestEndpoint2() *model.Endpoint { 131 | return &model.Endpoint{ 132 | Id: EndptId2, 133 | IP: EndptIp2, 134 | AddressType: discovery.AddressTypeIPv4, 135 | EndpointPort: model.Port{ 136 | Name: PortName2, 137 | Port: Port2, 138 | Protocol: Protocol2, 139 | }, 140 | ServicePort: model.Port{ 141 | Name: PortName2, 142 | Port: ServicePort2, 143 | TargetPort: PortStr2, 144 | Protocol: Protocol2, 145 | }, 146 | Ready: true, 147 | Hostname: Hostname, 148 | Nodename: Nodename, 149 | ClusterId: ClusterId1, 150 | ClusterSetId: ClusterSet, 151 | ServiceType: model.ClusterSetIPType, 152 | ServiceExportCreationTimestamp: SvcExportCreationTimestamp, 153 | Attributes: map[string]string{model.K8sVersionAttr: PackageVersion}, 154 | } 155 | } 156 | 157 | func GetTestEndpointIpv6() *model.Endpoint { 158 | return &model.Endpoint{ 159 | Id: EndptId2, 160 | IP: EndptIpv6, 161 | AddressType: discovery.AddressTypeIPv6, 162 | EndpointPort: model.Port{ 163 | Name: PortName2, 164 | Port: Port2, 165 | Protocol: Protocol2, 166 | }, 167 | ServicePort: model.Port{ 168 | Name: PortName2, 169 | Port: ServicePort2, 170 | TargetPort: PortStr2, 171 | Protocol: Protocol2, 172 | }, 173 | Ready: true, 174 | Hostname: Hostname, 175 | Nodename: Nodename, 176 | ClusterId: ClusterId1, 177 | ClusterSetId: ClusterSet, 178 | ServiceType: model.ClusterSetIPType, 179 | ServiceExportCreationTimestamp: SvcExportCreationTimestamp, 180 | Attributes: map[string]string{model.K8sVersionAttr: PackageVersion}, 181 | } 182 | } 183 | 184 | func GetMulticlusterTestEndpoints() []*model.Endpoint { 185 | endpoint1 := GetTestEndpoint1() 186 | endpoint2 := GetTestEndpoint2() 187 | // Set Different ClusterIds 188 | endpoint2.ClusterId = ClusterId2 189 | return []*model.Endpoint{endpoint1, endpoint2} 190 | } 191 | 192 | func GetTestEndpoints(count int) (endpts []*model.Endpoint) { 193 | // use +3 offset go avoid collision with test endpoint 1 and 2 194 | for i := 3; i < count+3; i++ { 195 | e := GetTestEndpoint1() 196 | e.ClusterId = ClusterId1 197 | e.Id = fmt.Sprintf("tcp-192_168_0_%d-1", i) 198 | e.IP = fmt.Sprintf("192.168.0.%d", i) 199 | endpts = append(endpts, e) 200 | } 201 | return endpts 202 | } 203 | 204 | func ClusterIdForTest() *aboutv1alpha1.ClusterProperty { 205 | return &aboutv1alpha1.ClusterProperty{ 206 | ObjectMeta: metav1.ObjectMeta{ 207 | Name: model.ClusterIdPropertyName, 208 | }, 209 | Spec: aboutv1alpha1.ClusterPropertySpec{ 210 | Value: ClusterId1, 211 | }, 212 | } 213 | } 214 | 215 | func ClusterSetIdForTest() *aboutv1alpha1.ClusterProperty { 216 | return &aboutv1alpha1.ClusterProperty{ 217 | ObjectMeta: metav1.ObjectMeta{ 218 | Name: model.ClusterSetIdPropertyName, 219 | }, 220 | Spec: aboutv1alpha1.ClusterPropertySpec{ 221 | Value: ClusterSet, 222 | }, 223 | } 224 | } 225 | --------------------------------------------------------------------------------