├── .github
├── ISSUE_TEMPLATE
│ ├── bug-report.md
│ ├── cherry-pick.md
│ ├── enhancement.md
│ ├── flaking-test.md
│ ├── good-first.md
│ └── question.md
├── PULL_REQUEST_TEMPLATE.md
└── workflows
│ └── ci.yml
├── .gitignore
├── .golangci.yml
├── CODE_OF_CONDUCT.md
├── LICENSE
├── Makefile
├── OWNERS
├── README.md
├── app
├── controllers.go
└── manager.go
├── artifacts
└── deploy
│ ├── ingressclass-fake.yaml
│ └── multicluster-provider-fake.yaml
├── cloud.go
├── cluster
└── images
│ ├── Dockerfile
│ └── buildx.Dockerfile
├── cmd
└── controller-manager
│ └── controller-manager.go
├── fake
├── doc.go
├── fake.go
└── regist.go
├── go.mod
├── go.sum
├── hack
├── build.sh
├── deploy-provider.sh
├── docker.sh
├── util.sh
├── verify-all.sh
└── verify-staticcheck.sh
├── options
├── options.go
└── validation.go
├── pkg
├── controllers
│ ├── context
│ │ └── context.go
│ ├── crdinstallation
│ │ └── crd_installation_controller.go
│ ├── indexes
│ │ └── reference_indexer.go
│ ├── mciservicelocations
│ │ └── mci_service_locations.go
│ ├── multiclusteringress
│ │ ├── eventhandlers.go
│ │ └── mci_controller.go
│ ├── multiclusterservice
│ │ ├── eventhandlers.go
│ │ └── mcs_controller.go
│ └── serviceexportpropagation
│ │ ├── eventhandlers.go
│ │ └── serviceexport_propagation_controller.go
└── util
│ ├── ingressclass.go
│ └── multiclusterservice.go
└── plugins.go
/.github/ISSUE_TEMPLATE/bug-report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug Report
3 | about: Report a bug encountered while using Karmada.
4 | labels: kind/bug
5 |
6 | ---
7 |
8 |
10 |
11 |
12 | **What happened**:
13 |
14 | **What you expected to happen**:
15 |
16 | **How to reproduce it (as minimally and precisely as possible)**:
17 |
18 | **Anything else we need to know?**:
19 |
20 | **Environment**:
21 | - Karmada version:
22 | - kubectl-karmada or karmadactl version (the result of `kubectl-karmada version` or `karmadactl version`):
23 | - Others:
24 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/cherry-pick.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: CherryPick Track
3 | about: Track tasks when release branches need cherry-pick.
4 | labels: help wanted
5 | ---
6 |
7 | **Which PR needs cherry-picks:**
8 |
11 | PR #
12 |
13 | **Which release branches need this patch:**
14 |
18 | - [ ] release-1.x
19 | - [ ] release-1.y
20 | - [ ] release-1.z
21 |
22 | **How to cherry-pick PRs:**
23 |
24 | The `hack/cherry_pick_pull.sh` script can help you initiate a cherry-pick
25 | automatically, please follow the instructions at [this guideline](https://karmada.io/docs/contributor/cherry-picks).
26 |
27 | The script will send the PR for you, please remember `copy the release notes` from
28 | the original PR by to the new PR description part.
29 |
30 | **How to join or take the task**:
31 |
32 | Just reply on the issue with the message `/assign` in a **separate line**.
33 |
34 | Then, the issue will be assigned to you.
35 |
36 | **Useful References:**
37 |
38 | - Release timeline: https://karmada.io/docs/releases
39 | - How to cherry-pick PRs: https://karmada.io/docs/contributor/cherry-picks
40 |
41 | **Anything else we need to know:**
42 |
43 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/enhancement.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Enhancement Request
3 | about: Suggest an enhancement to the project
4 | labels: kind/feature
5 |
6 | ---
7 |
8 |
9 | **What would you like to be added**:
10 |
11 | **Why is this needed**:
12 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/flaking-test.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Flaking Test
3 | about: Report flaky tests or jobs in CI
4 | labels: kind/flake
5 |
6 | ---
7 |
8 | #### Which jobs are flaking:
9 |
10 | #### Which test(s) are flaking:
11 |
12 | #### Reason for failure:
13 |
14 | #### Anything else we need to know:
15 |
16 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/good-first.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Good First Issue
3 | about: Publish a good first issue
4 | labels: good first issue
5 |
6 | ---
7 |
8 |
10 |
11 | **Task description**:
12 |
13 | **Solution**:
14 |
15 | **Who can join or take the task**:
16 |
17 | The good first issue is intended for `first-time contributors` to get started on his/her contributor journey.
18 |
19 | After a contributor has successfully completed 1-2 good first issue's,
20 | they should be ready to move on to `help wanted` items, saving the remaining `good first issue` for other new contributors.
21 |
22 | **How to join or take the task**:
23 |
24 | Just reply on the issue with the message `/assign` in a separate line.
25 |
26 | Then, the issue will be assigned to you.
27 |
28 | **How to ask for help**:
29 |
30 | If you need help or have questions, please feel free to ask on this issue.
31 | The issue author or other members of the community will guide you through the contribution process.
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/question.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Question
3 | about: Question relating to Karmada.
4 | labels: kind/question
5 |
6 | ---
7 |
8 |
13 | **Please provide an in-depth description of the question you have**:
14 |
15 | **What do you think about this question?**:
16 |
17 | **Environment**:
18 | - Karmada version:
19 | - Kubernetes version:
20 | - Others:
--------------------------------------------------------------------------------
/.github/PULL_REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
1 | **What type of PR is this?**
2 |
3 |
17 |
18 | **What this PR does / why we need it**:
19 |
20 | **Which issue(s) this PR fixes**:
21 | Fixes #
22 |
23 | **Special notes for your reviewer**:
24 |
25 | **Does this PR introduce a user-facing change?**:
26 |
30 | ```release-note
31 |
32 | ```
33 |
34 |
--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | # This workflow will build a golang project
2 | # For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-go
3 |
4 | name: CI Workflow
5 | on:
6 | # Run this workflow every time a new commit pushed to upstream/fork repository.
7 | # Run workflow on fork repository will help contributors find and resolve issues before sending a PR.
8 | push:
9 | pull_request:
10 | # https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#concurrency
11 | concurrency:
12 | group: ${{ github.workflow }}-${{ github.actor }}-${{ github.head_ref || github.run_id }}
13 | cancel-in-progress: true
14 | jobs:
15 | golangci:
16 | name: lint
17 | runs-on: ubuntu-22.04
18 | steps:
19 | - name: checkout code
20 | uses: actions/checkout@v3
21 | - name: install Go
22 | uses: actions/setup-go@v3
23 | with:
24 | go-version: 1.21.10
25 | - name: lint
26 | run: hack/verify-staticcheck.sh
27 | build:
28 | runs-on: ubuntu-22.04
29 | steps:
30 | - uses: actions/checkout@v3
31 | - name: Set up Go
32 | uses: actions/setup-go@v3
33 | with:
34 | go-version: 1.21.10
35 | - name: Build
36 | run: go build -v ./...
37 | - name: Test
38 | run: go test -v ./...
39 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Binaries for programs and plugins
2 | *.exe
3 | *.exe~
4 | *.dll
5 | *.so
6 | *.dylib
7 |
8 | # Test binary, built with `go test -c`
9 | *.test
10 |
11 | # Output of the go coverage tool, specifically when used with LiteIDE
12 | *.out
13 | _tmp/
14 | _output/
15 |
16 | # Dependency directories (remove the comment below to include it)
17 | vendor/
18 |
19 | .idea/
20 | .vscode/
21 |
22 | # Dependency directories (remove the comment below to include it)
23 | # vendor/
24 |
--------------------------------------------------------------------------------
/.golangci.yml:
--------------------------------------------------------------------------------
1 | # This files contains all configuration options for analysis running.
2 | # More details please refer to: https://golangci-lint.run/usage/configuration/
3 |
4 | run:
5 | # timeout for analysis, e.g. 30s, 5m, default is 1m
6 | timeout: 10m
7 |
8 | # which dirs to skip: issues from them won't be reported;
9 | # can use regexp here: generated.*, regexp is applied on full path;
10 | # default value is empty list, but default dirs are skipped independently
11 | # from this option's value (see skip-dirs-use-default).
12 | # "/" will be replaced by current OS file path separator to properly work
13 | # on Windows.
14 | skip-dirs:
15 | - (^|/)vendor($|/)
16 |
17 | # default is true. Enables skipping of directories:
18 | # vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
19 | skip-dirs-use-default: false
20 |
21 | # One of 'readonly' and 'vendor'.
22 | # - readonly: the go command is disallowed from the implicit automatic updating of go.mod described above.
23 | # Instead, it fails when any changes to go.mod are needed. This setting is most useful to check
24 | # that go.mod does not need updates, such as in a continuous integration and testing system.
25 | # - vendor: the go command assumes that the vendor directory holds the correct copies of dependencies and ignores
26 | # the dependency descriptions in go.mod.
27 | modules-download-mode: readonly
28 | linters:
29 | enable:
30 | # linters maintained by golang.org
31 | - gofmt
32 | - goimports
33 | - govet
34 | # linters default enabled by golangci-lint .
35 | - errcheck
36 | - gosimple
37 | - ineffassign
38 | - staticcheck
39 | - typecheck
40 | - unused
41 | # other linters supported by golangci-lint.
42 | - gci
43 | - gocyclo
44 | - gosec
45 | - misspell
46 | - whitespace
47 | - revive
48 |
49 | linters-settings:
50 | goimports:
51 | local-prefixes: github.com/karmada-io/multicluster-cloud-provider
52 | gocyclo:
53 | # minimal cyclomatic complexity to report
54 | min-complexity: 15
55 | gci:
56 | sections:
57 | - Standard
58 | - Default
59 | - Prefix(github.com/karmada-io/multicluster-cloud-provider)
60 |
61 | issues:
62 | # The list of ids of default excludes to include or disable. By default it's empty.
63 | include:
64 | # disable excluding of issues about comments from revive
65 | # see https://golangci-lint.run/usage/configuration/#command-line-options for more info
66 | - EXC0012
67 | - EXC0013
68 | - EXC0014
69 |
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Karmada Community Code of Conduct
2 |
3 | Please refer to our [Karmada Community Code of Conduct](https://github.com/karmada-io/community/blob/main/CODE_OF_CONDUCT.md).
4 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | GOOS ?= $(shell go env GOOS)
2 | GOARCH ?= $(shell go env GOARCH)
3 |
4 | # Images management
5 | REGISTRY?="docker.io/karmada"
6 |
7 | TARGETS := multicluster-provider-fake
8 |
9 | # Build code.
10 | #
11 | # Args:
12 | # GOOS: OS to build.
13 | # GOARCH: Arch to build.
14 | #
15 | # Example:
16 | # make
17 | # make all
18 | # make multicluster-provider-fake
19 | # make multicluster-provider-fake GOOS=linux
20 | CMD_TARGET=$(TARGETS)
21 |
22 | .PHONY: all
23 | all: $(CMD_TARGET)
24 |
25 | .PHONY: $(CMD_TARGET)
26 | $(CMD_TARGET):
27 | BUILD_PLATFORMS=$(GOOS)/$(GOARCH) hack/build.sh $@
28 |
29 | # Build image.
30 | #
31 | # Args:
32 | # GOARCH: Arch to build.
33 | # OUTPUT_TYPE: Destination to save image(docker/registry).
34 | #
35 | # Example:
36 | # make images
37 | # make image-multicluster-provider-fake
38 | # make image-multicluster-provider-fake GOARCH=arm64
39 | IMAGE_TARGET=$(addprefix image-, $(TARGETS))
40 | .PHONY: $(IMAGE_TARGET)
41 | $(IMAGE_TARGET):
42 | set -e;\
43 | target=$$(echo $(subst image-,,$@));\
44 | make $$target GOOS=linux;\
45 | VERSION=$(VERSION) REGISTRY=$(REGISTRY) BUILD_PLATFORMS=linux/$(GOARCH) hack/docker.sh $$target
46 |
47 | images: $(IMAGE_TARGET)
48 |
49 | .PHONY: clean
50 | clean:
51 | rm -rf _tmp _output
52 |
53 | .PHONY: verify
54 | verify:
55 | hack/verify-all.sh
56 |
--------------------------------------------------------------------------------
/OWNERS:
--------------------------------------------------------------------------------
1 | reviewers:
2 | - chaunceyjiang
3 | - RainbowMango
4 | - XiShanYongYe-Chang
5 | approvers:
6 | - chaunceyjiang
7 | - RainbowMango
8 | - XiShanYongYe-Chang
9 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # multicluster-cloud-provider
2 |
3 | This repository defines the shared interfaces which Karmada cloud providers implement. These interfaces allow various
4 | controllers to integrate with any cloud provider in a pluggable fashion.
5 |
6 | ## Background
7 |
8 | To enable Karmada to run on the public cloud platform and flexibly use and manage other basic resources and services on
9 | the cloud, cloud providers need to implement their own adapters. However, in the implementation process, some works are
10 | the same for each cloud provider.
11 |
12 | Learn from the experience of Kubernetes Cloud Controller Manager (CCM): https://github.com/kubernetes/cloud-provider.
13 | Karmada can also provide a public repository that provides interfaces for using and managing basic resources and services
14 | on the public cloud. Cloud providers only need to implement these interfaces to provide users with their own adapters.
15 |
16 | ## Purpose
17 |
18 | This library is shared dependency for processes which need to be able to integrate with cloud provider specific functionality.
19 |
20 | ## Fake testing
21 |
22 | ### Command:
23 |
24 | Make multicluster-provider-fake binary:
25 | ```shell
26 | make multicluster-provider-fake
27 | ```
28 |
29 | Make multicluster-provider-fake image:
30 | ```shell
31 | make image-multicluster-provider-fake
32 | ```
33 |
34 | Deploy multicluster-provider-fake deployment:
35 | ```shell
36 | hack/deploy-provider.sh
37 | ```
38 |
39 | Delete multicluster-provider-fake deployment :
40 | ```shell
41 | kubectl --context karmada-host -n karmada-system delete deployments.apps multicluster-provider-fake
42 | ```
43 |
44 | ### Verify
45 |
46 |
47 | mci.yaml
48 |
49 | ```yaml
50 | apiVersion: networking.karmada.io/v1alpha1
51 | kind: MultiClusterIngress
52 | metadata:
53 | name: minimal-ingress
54 | annotations:
55 | nginx.ingress.kubernetes.io/rewrite-target: /
56 | spec:
57 | rules:
58 | - http:
59 | paths:
60 | - path: /testpath
61 | pathType: Prefix
62 | backend:
63 | service:
64 | name: serve
65 | port:
66 | number: 80
67 | ```
68 |
69 |
70 |
71 | application.yaml
72 |
73 | ```
74 | apiVersion: apps/v1
75 | kind: Deployment
76 | metadata:
77 | name: serve
78 | spec:
79 | replicas: 1
80 | selector:
81 | matchLabels:
82 | app: serve
83 | template:
84 | metadata:
85 | labels:
86 | app: serve
87 | spec:
88 | containers:
89 | - name: serve
90 | image: jeremyot/serve:0a40de8
91 | args:
92 | - "--message='hello from cluster member1 (Node: {{env \"NODE_NAME\"}} Pod: {{env \"POD_NAME\"}} Address: {{addr}})'"
93 | env:
94 | - name: NODE_NAME
95 | valueFrom:
96 | fieldRef:
97 | fieldPath: spec.nodeName
98 | - name: POD_NAME
99 | valueFrom:
100 | fieldRef:
101 | fieldPath: metadata.name
102 | ---
103 | apiVersion: v1
104 | kind: Service
105 | metadata:
106 | name: serve
107 | spec:
108 | ports:
109 | - port: 80
110 | targetPort: 8080
111 | selector:
112 | app: serve
113 | ---
114 | apiVersion: policy.karmada.io/v1alpha1
115 | kind: PropagationPolicy
116 | metadata:
117 | name: mcs-workload
118 | spec:
119 | resourceSelectors:
120 | - apiVersion: apps/v1
121 | kind: Deployment
122 | name: serve
123 | - apiVersion: v1
124 | kind: Service
125 | name: serve
126 | placement:
127 | clusterAffinity:
128 | clusterNames:
129 | - member1
130 | - member2
131 | replicaScheduling:
132 | replicaDivisionPreference: Weighted
133 | replicaSchedulingType: Divided
134 | weightPreference:
135 | staticWeightList:
136 | - targetCluster:
137 | clusterNames:
138 | - member1
139 | weight: 1
140 | - targetCluster:
141 | clusterNames:
142 | - member2
143 | weight: 1
144 | ```
145 |
146 |
147 | ```shell
148 | kubectl apply -f mci.yaml
149 | kubectl apply -f application.yaml
150 | ```
--------------------------------------------------------------------------------
/app/controllers.go:
--------------------------------------------------------------------------------
1 | package app
2 |
3 | import (
4 | "fmt"
5 |
6 | controllerscontext "github.com/karmada-io/multicluster-cloud-provider/pkg/controllers/context"
7 | "github.com/karmada-io/multicluster-cloud-provider/pkg/controllers/crdinstallation"
8 | "github.com/karmada-io/multicluster-cloud-provider/pkg/controllers/mciservicelocations"
9 | "github.com/karmada-io/multicluster-cloud-provider/pkg/controllers/multiclusteringress"
10 | "github.com/karmada-io/multicluster-cloud-provider/pkg/controllers/multiclusterservice"
11 | "github.com/karmada-io/multicluster-cloud-provider/pkg/controllers/serviceexportpropagation"
12 | )
13 |
14 | func startMCIController(ctx controllerscontext.Context) (enabled bool, err error) {
15 | loadBalancer, support := ctx.CloudProvider.LoadBalancer()
16 | if !support {
17 | return false, fmt.Errorf("the multicluster controller manager does not support external loadBalancer")
18 | }
19 | if loadBalancer == nil {
20 | return false, fmt.Errorf("clouldn't get the target external loadBalancer provider")
21 | }
22 |
23 | mciController := &multiclusteringress.MCIController{
24 | Client: ctx.Mgr.GetClient(),
25 | LoadBalancer: loadBalancer,
26 | InformerManager: ctx.InformerManager,
27 | EventRecorder: ctx.Mgr.GetEventRecorderFor(multiclusteringress.ControllerName),
28 | RateLimiterOptions: ctx.Opts.RateLimiterOptions,
29 | ProviderClassName: ctx.ProviderClassName,
30 | }
31 | if err = mciController.SetupWithManager(ctx.Context, ctx.Mgr); err != nil {
32 | return false, err
33 | }
34 | return true, nil
35 | }
36 |
37 | func startMCSController(ctx controllerscontext.Context) (enabled bool, err error) {
38 | loadBalancer, support := ctx.CloudProvider.MCSLoadBalancer()
39 | if !support {
40 | return false, fmt.Errorf("the multicluster controller manager does not support external loadBalancer")
41 | }
42 | if loadBalancer == nil {
43 | return false, fmt.Errorf("clouldn't get the target external loadBalancer provider")
44 | }
45 |
46 | mcsController := &multiclusterservice.MCSController{
47 | Client: ctx.Mgr.GetClient(),
48 | MCSLoadBalancer: loadBalancer,
49 | InformerManager: ctx.InformerManager,
50 | EventRecorder: ctx.Mgr.GetEventRecorderFor(multiclusterservice.ControllerName),
51 | RateLimiterOptions: ctx.Opts.RateLimiterOptions,
52 | }
53 | if err = mcsController.SetupWithManager(ctx.Context, ctx.Mgr); err != nil {
54 | return false, err
55 | }
56 | return true, nil
57 | }
58 |
59 | func startCRDInstallationController(ctx controllerscontext.Context) (enabled bool, err error) {
60 | c := &crdinstallation.Controller{
61 | Client: ctx.Mgr.GetClient(),
62 | EventRecorder: ctx.Mgr.GetEventRecorderFor(crdinstallation.ControllerName),
63 | RateLimiterOptions: ctx.Opts.RateLimiterOptions,
64 | }
65 | if err = c.SetupWithManager(ctx.Context, ctx.Mgr); err != nil {
66 | return false, err
67 | }
68 | return true, nil
69 | }
70 |
71 | func startServiceExportPropagationController(ctx controllerscontext.Context) (enabled bool, err error) {
72 | c := &serviceexportpropagation.Controller{
73 | Client: ctx.Mgr.GetClient(),
74 | EventRecorder: ctx.Mgr.GetEventRecorderFor(serviceexportpropagation.ControllerName),
75 | RateLimiterOptions: ctx.Opts.RateLimiterOptions,
76 | ProviderClassName: ctx.ProviderClassName,
77 | }
78 | if err = c.SetupWithManager(ctx.Context, ctx.Mgr); err != nil {
79 | return false, err
80 | }
81 | return true, nil
82 | }
83 |
84 | func startMCIServiceLocationsController(ctx controllerscontext.Context) (enabled bool, err error) {
85 | c := &mciservicelocations.Controller{
86 | Client: ctx.Mgr.GetClient(),
87 | RateLimiterOptions: ctx.Opts.RateLimiterOptions,
88 | }
89 | if err = c.SetupWithManager(ctx.Mgr); err != nil {
90 | return false, err
91 | }
92 | return true, nil
93 | }
94 |
--------------------------------------------------------------------------------
/app/manager.go:
--------------------------------------------------------------------------------
1 | package app
2 |
3 | import (
4 | "context"
5 | "flag"
6 | "net"
7 | "strconv"
8 |
9 | "github.com/karmada-io/karmada/pkg/sharedcli"
10 | "github.com/karmada-io/karmada/pkg/sharedcli/klogflag"
11 | "github.com/karmada-io/karmada/pkg/sharedcli/profileflag"
12 | "github.com/karmada-io/karmada/pkg/util/fedinformer"
13 | "github.com/karmada-io/karmada/pkg/util/fedinformer/genericmanager"
14 | "github.com/karmada-io/karmada/pkg/util/gclient"
15 | "github.com/karmada-io/karmada/pkg/util/restmapper"
16 | "github.com/karmada-io/karmada/pkg/version"
17 | "github.com/karmada-io/karmada/pkg/version/sharedcommand"
18 | "github.com/spf13/cobra"
19 | "k8s.io/client-go/dynamic"
20 | "k8s.io/client-go/rest"
21 | cliflag "k8s.io/component-base/cli/flag"
22 | "k8s.io/component-base/term"
23 | "k8s.io/klog/v2"
24 | controllerruntime "sigs.k8s.io/controller-runtime"
25 | "sigs.k8s.io/controller-runtime/pkg/cache"
26 | "sigs.k8s.io/controller-runtime/pkg/client"
27 | "sigs.k8s.io/controller-runtime/pkg/healthz"
28 | metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
29 |
30 | multiclusterprovider "github.com/karmada-io/multicluster-cloud-provider"
31 | "github.com/karmada-io/multicluster-cloud-provider/options"
32 | controllersctx "github.com/karmada-io/multicluster-cloud-provider/pkg/controllers/context"
33 | "github.com/karmada-io/multicluster-cloud-provider/pkg/controllers/indexes"
34 | )
35 |
36 | var controllers = make(controllersctx.Initializers)
37 |
38 | func init() {
39 | controllers["multiclusteringress"] = startMCIController
40 | controllers["multiclusterservice"] = startMCSController
41 | controllers["crd-installation"] = startCRDInstallationController
42 | controllers["serviceexport-propagation"] = startServiceExportPropagationController
43 | controllers["mci-service-locations"] = startMCIServiceLocationsController
44 | }
45 |
46 | // InitProviderFunc is used to initialize multicluster provider
47 | type InitProviderFunc func(name, cloudConfigFile string) multiclusterprovider.Interface
48 |
49 | // NewControllerManagerCommand creates a *cobra.Command object with default parameters
50 | func NewControllerManagerCommand(ctx context.Context,
51 | opts *options.MultiClusterControllerManagerOptions,
52 | additionalFlags cliflag.NamedFlagSets,
53 | providerInitializer InitProviderFunc,
54 | ) *cobra.Command {
55 | cmd := &cobra.Command{
56 | Use: "multicluster-controller-manager",
57 | Long: `The MultiCluster controller manager is a daemon that embeds
58 | the cloud specific control loops shipped with Karmada.`,
59 | RunE: func(cmd *cobra.Command, args []string) error {
60 | // validate options
61 | if errs := opts.Validate(); len(errs) != 0 {
62 | return errs.ToAggregate()
63 | }
64 |
65 | provider := providerInitializer(opts.Name, opts.CloudConfigFile)
66 | return Run(ctx, opts, provider)
67 | },
68 | }
69 |
70 | fss := cliflag.NamedFlagSets{}
71 |
72 | genericFlagSet := fss.FlagSet("generic")
73 | // Add the flag(--kubeconfig) that is added by controller-runtime
74 | // (https://github.com/kubernetes-sigs/controller-runtime/blob/v0.11.1/pkg/client/config/config.go#L39),
75 | // and update the flag usage.
76 | genericFlagSet.AddGoFlagSet(flag.CommandLine)
77 | genericFlagSet.Lookup("kubeconfig").Usage = "Path to karmada control plane kubeconfig file."
78 | opts.AddFlags(genericFlagSet)
79 |
80 | // Set klog flags
81 | logsFlagSet := fss.FlagSet("logs")
82 | klogflag.Add(logsFlagSet)
83 |
84 | cmd.AddCommand(sharedcommand.NewCmdVersion("multicluster-controller-manager"))
85 | cmd.Flags().AddFlagSet(genericFlagSet)
86 | cmd.Flags().AddFlagSet(logsFlagSet)
87 | for _, f := range additionalFlags.FlagSets {
88 | cmd.Flags().AddFlagSet(f)
89 | }
90 |
91 | cols, _, _ := term.TerminalSize(cmd.OutOrStdout())
92 | sharedcli.SetUsageAndHelpFunc(cmd, fss, cols)
93 | return cmd
94 | }
95 |
96 | // Run runs the controller-manager with options. This should never exit.
97 | func Run(ctx context.Context, opts *options.MultiClusterControllerManagerOptions, cloudProvider multiclusterprovider.Interface) error {
98 | klog.Infof("multicluster-controller-manager version: %s", version.Get())
99 |
100 | profileflag.ListenAndServe(opts.ProfileOpts)
101 |
102 | config, err := controllerruntime.GetConfig()
103 | if err != nil {
104 | panic(err)
105 | }
106 | config.QPS, config.Burst = opts.KubeAPIQPS, opts.KubeAPIBurst
107 | controllerManager, err := controllerruntime.NewManager(config, controllerruntime.Options{
108 | Logger: klog.Background(),
109 | Scheme: gclient.NewSchema(),
110 | Cache: cache.Options{SyncPeriod: &opts.ResyncPeriod.Duration},
111 | LeaderElection: opts.LeaderElection.LeaderElect,
112 | LeaderElectionID: opts.LeaderElection.ResourceName,
113 | LeaderElectionNamespace: opts.LeaderElection.ResourceNamespace,
114 | LeaseDuration: &opts.LeaderElection.LeaseDuration.Duration,
115 | RenewDeadline: &opts.LeaderElection.RenewDeadline.Duration,
116 | RetryPeriod: &opts.LeaderElection.RetryPeriod.Duration,
117 | LeaderElectionResourceLock: opts.LeaderElection.ResourceLock,
118 | HealthProbeBindAddress: net.JoinHostPort(opts.BindAddress, strconv.Itoa(opts.SecurePort)),
119 | LivenessEndpointName: "/healthz",
120 | Metrics: metricsserver.Options{BindAddress: opts.MetricsBindAddress},
121 | MapperProvider: restmapper.MapperProvider,
122 | BaseContext: func() context.Context {
123 | return ctx
124 | },
125 | NewCache: func(config *rest.Config, opts cache.Options) (cache.Cache, error) {
126 | opts.DefaultTransform = fedinformer.StripUnusedFields
127 | return cache.New(config, opts)
128 | },
129 | })
130 | if err != nil {
131 | klog.Errorf("Failed to build controller manager: %v", err)
132 | return err
133 | }
134 |
135 | if err := controllerManager.AddHealthzCheck("ping", healthz.Ping); err != nil {
136 | klog.Errorf("Failed to add health check endpoint: %v", err)
137 | return err
138 | }
139 |
140 | setupControllers(ctx, controllerManager, cloudProvider, opts)
141 |
142 | // blocks until the context is done.
143 | if err := controllerManager.Start(ctx); err != nil {
144 | klog.Errorf("controller manager exits unexpectedly: %v", err)
145 | return err
146 | }
147 |
148 | // never reach here
149 | return nil
150 | }
151 |
152 | // setupControllers initialize controllers and setup one by one.
153 | func setupControllers(ctx context.Context, mgr controllerruntime.Manager, cloudProvider multiclusterprovider.Interface, opts *options.MultiClusterControllerManagerOptions) {
154 | restConfig := mgr.GetConfig()
155 | dynamicClientSet := dynamic.NewForConfigOrDie(restConfig)
156 |
157 | controlPlaneInformerManager := genericmanager.NewSingleClusterInformerManager(dynamicClientSet, 0, ctx.Done())
158 |
159 | setupIndexesForMCI(ctx, mgr.GetFieldIndexer())
160 |
161 | controllerCtx := controllersctx.Context{
162 | Context: ctx,
163 | Mgr: mgr,
164 | CloudProvider: cloudProvider,
165 | Opts: controllersctx.Options{
166 | Controllers: controllers.ControllerNames(),
167 | RateLimiterOptions: opts.RateLimiterOpts,
168 | },
169 | DynamicClientSet: dynamicClientSet,
170 | InformerManager: controlPlaneInformerManager,
171 | ProviderClassName: opts.ProviderClassName,
172 | }
173 | if err := controllers.StartControllers(controllerCtx, nil); err != nil {
174 | klog.Fatalf("error starting controllers: %v", err)
175 | }
176 |
177 | // Ensure the InformerManager stops when the stop channel closes
178 | go func() {
179 | <-ctx.Done()
180 | genericmanager.StopInstance()
181 | }()
182 | }
183 |
184 | func setupIndexesForMCI(ctx context.Context, fieldIndexer client.FieldIndexer) {
185 | if err := indexes.SetupServiceIndexesForMCI(ctx, fieldIndexer); err != nil {
186 | klog.Fatalf("failed to setup service indexes for MultiClusterIngress object: %v", err)
187 | }
188 |
189 | if err := indexes.SetupSecretIndexesForMCI(ctx, fieldIndexer); err != nil {
190 | klog.Fatalf("failed to setup secret indexes for MultiClusterIngress object: %v", err)
191 | }
192 | }
193 |
--------------------------------------------------------------------------------
/artifacts/deploy/ingressclass-fake.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: networking.k8s.io/v1
2 | kind: IngressClass
3 | metadata:
4 | annotations:
5 | ingressclass.kubernetes.io/is-default-class: "true"
6 | name: fake
7 | spec:
8 | controller: karmada.io/fake
9 |
--------------------------------------------------------------------------------
/artifacts/deploy/multicluster-provider-fake.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: multicluster-provider-fake
5 | namespace: karmada-system
6 | labels:
7 | app: multicluster-provider-fake
8 | spec:
9 | replicas: 2
10 | selector:
11 | matchLabels:
12 | app: multicluster-provider-fake
13 | template:
14 | metadata:
15 | labels:
16 | app: multicluster-provider-fake
17 | spec:
18 | automountServiceAccountToken: false
19 | tolerations:
20 | - key: node-role.kubernetes.io/master
21 | operator: Exists
22 | containers:
23 | - name: multicluster-provider-fake
24 | image: docker.io/karmada/multicluster-provider-fake:latest
25 | imagePullPolicy: IfNotPresent
26 | command:
27 | - /bin/multicluster-provider-fake
28 | - --kubeconfig=/etc/kubeconfig/karmada.config
29 | - --bind-address=0.0.0.0
30 | - --secure-port=10368
31 | - --multicluster-provider=fake
32 | - --provider-ingress-class=karmada.io/fake
33 | - --v=4
34 | livenessProbe:
35 | httpGet:
36 | path: /healthz
37 | port: 10368
38 | scheme: HTTP
39 | failureThreshold: 3
40 | initialDelaySeconds: 15
41 | periodSeconds: 15
42 | timeoutSeconds: 5
43 | volumeMounts:
44 | - name: karmada-config
45 | mountPath: /etc/kubeconfig
46 | readOnly: true
47 | volumes:
48 | - name: karmada-config
49 | secret:
50 | secretName: multicluster-provider-config
51 |
--------------------------------------------------------------------------------
/cloud.go:
--------------------------------------------------------------------------------
1 | package multiclusterprovider
2 |
3 | import (
4 | "context"
5 |
6 | networkingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/networking/v1alpha1"
7 | corev1 "k8s.io/api/core/v1"
8 | networkingv1 "k8s.io/api/networking/v1"
9 | )
10 |
11 | // Interface is an abstract, pluggable interface for multcluster provider.
12 | type Interface interface {
13 | // LoadBalancer returns a MultiClusterIngress balancer interface, also returns true
14 | // if the interface is supported, false otherwise.
15 | LoadBalancer() (LoadBalancer, bool)
16 | // MCSLoadBalancer returns a MultiClusterService balancer interface, also returns true
17 | // if the interface is supported, false otherwise.
18 | MCSLoadBalancer() (MCSLoadBalancer, bool)
19 | // ProviderName returns the cloud provider ID.
20 | ProviderName() string
21 | }
22 |
23 | // LoadBalancer is an abstract, pluggable interface for MultiClusterIngress load balancers.
24 | type LoadBalancer interface {
25 | // GetLoadBalancer return whether the specified load balancer exists, and if so, what its status is.
26 | GetLoadBalancer(ctx context.Context, mci *networkingv1alpha1.MultiClusterIngress) (status *networkingv1.IngressLoadBalancerStatus, exist bool, err error)
27 | // EnsureLoadBalancer creates a new load balancer.
28 | EnsureLoadBalancer(ctx context.Context, mci *networkingv1alpha1.MultiClusterIngress) (status *networkingv1.IngressLoadBalancerStatus, err error)
29 | // UpdateLoadBalancer updates the specified load balancer.
30 | UpdateLoadBalancer(ctx context.Context, mci *networkingv1alpha1.MultiClusterIngress) (status *networkingv1.IngressLoadBalancerStatus, err error)
31 | // EnsureLoadBalancerDeleted deletes the specified load balancer if it exists.
32 | EnsureLoadBalancerDeleted(ctx context.Context, mci *networkingv1alpha1.MultiClusterIngress) error
33 | }
34 |
35 | // MCSLoadBalancer is an abstract, pluggable interface for MultiClusterService load balancers.
36 | type MCSLoadBalancer interface {
37 | // GetMCSLoadBalancer return whether the specified load balancer exists, and if so, what its status is.
38 | GetMCSLoadBalancer(ctx context.Context, mcs *networkingv1alpha1.MultiClusterService) (status *corev1.LoadBalancerStatus, exist bool, err error)
39 | // EnsureMCSLoadBalancer creates a new load balancer.
40 | EnsureMCSLoadBalancer(ctx context.Context, mcs *networkingv1alpha1.MultiClusterService) (status *corev1.LoadBalancerStatus, err error)
41 | // UpdateMCSLoadBalancer updates the specified load balancer.
42 | UpdateMCSLoadBalancer(ctx context.Context, mcs *networkingv1alpha1.MultiClusterService) (status *corev1.LoadBalancerStatus, err error)
43 | // EnsureMCSLoadBalancerDeleted deletes the specified load balancer if it exists.
44 | EnsureMCSLoadBalancerDeleted(ctx context.Context, mcs *networkingv1alpha1.MultiClusterService) error
45 | }
46 |
--------------------------------------------------------------------------------
/cluster/images/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM alpine:3.17.1
2 |
3 | ARG BINARY
4 |
5 | RUN apk add --no-cache ca-certificates
6 |
7 | COPY ${BINARY} /bin/${BINARY}
8 |
--------------------------------------------------------------------------------
/cluster/images/buildx.Dockerfile:
--------------------------------------------------------------------------------
1 | FROM alpine:3.17.1
2 |
3 | ARG BINARY
4 | ARG TARGETPLATFORM
5 |
6 | RUN apk add --no-cache ca-certificates
7 |
8 | COPY ${TARGETPLATFORM}/${BINARY} /bin/${BINARY}
9 |
--------------------------------------------------------------------------------
/cmd/controller-manager/controller-manager.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "os"
5 |
6 | "k8s.io/component-base/cli"
7 | cliflag "k8s.io/component-base/cli/flag"
8 | _ "k8s.io/component-base/logs/json/register" // for JSON log format registration
9 | "k8s.io/klog/v2"
10 | controllerruntime "sigs.k8s.io/controller-runtime"
11 | _ "sigs.k8s.io/controller-runtime/pkg/metrics"
12 |
13 | multiclusterprovider "github.com/karmada-io/multicluster-cloud-provider"
14 | "github.com/karmada-io/multicluster-cloud-provider/app"
15 | _ "github.com/karmada-io/multicluster-cloud-provider/fake" // import fake test
16 | "github.com/karmada-io/multicluster-cloud-provider/options"
17 | )
18 |
19 | func main() {
20 | ctx := controllerruntime.SetupSignalHandler()
21 | opts := options.NewClusterControllerManagerOptions()
22 | fss := cliflag.NamedFlagSets{}
23 | cmd := app.NewControllerManagerCommand(ctx, opts, fss, cloudInitializer)
24 |
25 | code := cli.Run(cmd)
26 | os.Exit(code)
27 | }
28 |
29 | func cloudInitializer(name, cloudConfigFile string) multiclusterprovider.Interface {
30 | provider, err := multiclusterprovider.InitMultiClusterProvider(name, cloudConfigFile)
31 | if err != nil {
32 | klog.Fatalf("Multicluster provider could not be initialized: %v", err)
33 | }
34 | if provider == nil {
35 | klog.Fatalf("Multicluster provider is nil")
36 | }
37 | return provider
38 | }
39 |
--------------------------------------------------------------------------------
/fake/doc.go:
--------------------------------------------------------------------------------
1 | // Package fake is a test-double implementation of
2 | // multiclusterprovider.Interface. It is useful for testing.
3 | package fake
4 |
--------------------------------------------------------------------------------
/fake/fake.go:
--------------------------------------------------------------------------------
1 | package fake
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "net"
7 | "sync"
8 | "time"
9 |
10 | networkingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/networking/v1alpha1"
11 | corev1 "k8s.io/api/core/v1"
12 | networkingv1 "k8s.io/api/networking/v1"
13 | "k8s.io/apimachinery/pkg/types"
14 | "k8s.io/klog/v2"
15 |
16 | multiclusterprovider "github.com/karmada-io/multicluster-cloud-provider"
17 | "github.com/karmada-io/multicluster-cloud-provider/pkg/controllers/indexes"
18 | )
19 |
20 | const defaultProviderName = "fake"
21 |
22 | // Balancer is a fake balancer struct.
23 | type Balancer struct {
24 | Name string
25 | Namespace string
26 | MCI *networkingv1alpha1.MultiClusterIngress
27 | Services []string
28 | Eps []net.IP
29 | ExternalIP net.IP
30 | }
31 |
32 | // MCSBalancer is a fake balancer struct.
33 | type MCSBalancer struct {
34 | Name string
35 | Namespace string
36 | MCS *networkingv1alpha1.MultiClusterService
37 | Eps []net.IP
38 | ExternalIP net.IP
39 | }
40 |
41 | var _ multiclusterprovider.Interface = (*Provider)(nil)
42 | var _ multiclusterprovider.LoadBalancer = (*Provider)(nil)
43 | var _ multiclusterprovider.MCSLoadBalancer = (*Provider)(nil)
44 |
45 | // Provider is a fake provider struct.
46 | type Provider struct {
47 | Name string
48 | Balancers map[string]Balancer
49 | MCSBalancers map[string]MCSBalancer
50 |
51 | Calls []string
52 | RequestDelay time.Duration
53 |
54 | addCallLock sync.Mutex
55 | }
56 |
57 | // ProviderName returns the cloud provider name.
58 | func (p *Provider) ProviderName() string {
59 | if p.Name == "" {
60 | return defaultProviderName
61 | }
62 | return p.Name
63 | }
64 |
65 | // LoadBalancer returns a fake implementation of LoadBalancer. Actually it just returns itself.
66 | func (p *Provider) LoadBalancer() (multiclusterprovider.LoadBalancer, bool) {
67 | return p, true
68 | }
69 |
70 | // GetLoadBalancer is a stub implementation of LoadBalancer.GetLoadBalancer.
71 | func (p *Provider) GetLoadBalancer(_ context.Context, mci *networkingv1alpha1.MultiClusterIngress) (*networkingv1.IngressLoadBalancerStatus, bool, error) {
72 | p.addCall("get")
73 | namespacedName := types.NamespacedName{Namespace: mci.Namespace, Name: mci.Name}.String()
74 | for name := range p.Balancers {
75 | if name == namespacedName {
76 | klog.V(4).Infof("can get Balancer with mci %s", namespacedName)
77 | return &networkingv1.IngressLoadBalancerStatus{}, true, nil
78 | }
79 | }
80 |
81 | klog.V(4).Infof("can not get Balancer with mci %s", namespacedName)
82 | return nil, false, nil
83 | }
84 |
85 | // EnsureLoadBalancer is a stub implementation of LoadBalancer.EnsureLoadBalancer.
86 | func (p *Provider) EnsureLoadBalancer(_ context.Context, mci *networkingv1alpha1.MultiClusterIngress) (status *networkingv1.IngressLoadBalancerStatus, err error) {
87 | p.addCall("create")
88 | if p.Balancers == nil {
89 | p.Balancers = make(map[string]Balancer)
90 | }
91 |
92 | namespacedName := types.NamespacedName{Namespace: mci.Namespace, Name: mci.Name}.String()
93 | _, exist := p.Balancers[namespacedName]
94 | if exist {
95 | klog.V(4).Infof("there already exist a LoadBalance with mci %s", namespacedName)
96 | return nil, fmt.Errorf("there already exist a LoadBalancer")
97 | }
98 |
99 | mciCopy := mci.DeepCopy()
100 | svcRefs := indexes.BuildServiceRefIndexes(mciCopy)
101 | p.Balancers[namespacedName] = Balancer{
102 | Name: mci.Name,
103 | Namespace: mci.Namespace,
104 | MCI: mciCopy,
105 | Services: svcRefs,
106 | }
107 | klog.V(4).Infof("success to create a LoadBalance with mci %s", namespacedName)
108 | return &networkingv1.IngressLoadBalancerStatus{
109 | Ingress: []networkingv1.IngressLoadBalancerIngress{
110 | {IP: "1.2.3.4"}, // construct a random IP address and return it.
111 | },
112 | }, nil
113 | }
114 |
115 | // UpdateLoadBalancer is a stub implementation of LoadBalancer.UpdateLoadBalancer.
116 | func (p *Provider) UpdateLoadBalancer(_ context.Context, mci *networkingv1alpha1.MultiClusterIngress) (status *networkingv1.IngressLoadBalancerStatus, err error) {
117 | p.addCall("update")
118 | namespacedName := types.NamespacedName{Namespace: mci.Namespace, Name: mci.Name}.String()
119 | lb, exist := p.Balancers[namespacedName]
120 | if !exist {
121 | klog.V(4).Infof("there not exist a LoadBalance with mci %s", namespacedName)
122 | return nil, fmt.Errorf("there not exist a LoadBalancer")
123 | }
124 |
125 | mciCopy := mci.DeepCopy()
126 | svcRefs := indexes.BuildServiceRefIndexes(mciCopy)
127 | lb.MCI = mciCopy
128 | lb.Services = svcRefs
129 | klog.V(4).Infof("success to update the LoadBalance with mci %s", namespacedName)
130 | return &networkingv1.IngressLoadBalancerStatus{
131 | Ingress: []networkingv1.IngressLoadBalancerIngress{
132 | {IP: "1.2.3.5"}, // construct a random IP address and return it.
133 | },
134 | }, nil
135 | }
136 |
137 | // EnsureLoadBalancerDeleted is a stub implementation of LoadBalancer.EnsureLoadBalancerDeleted.
138 | func (p *Provider) EnsureLoadBalancerDeleted(_ context.Context, mci *networkingv1alpha1.MultiClusterIngress) error {
139 | p.addCall("delete")
140 | namespacedName := types.NamespacedName{Namespace: mci.Namespace, Name: mci.Name}.String()
141 | delete(p.Balancers, namespacedName)
142 | klog.V(4).Infof("success to delete the LoadBalance with mci %s", namespacedName)
143 | return nil
144 | }
145 |
146 | // MCSLoadBalancer returns a fake implementation of MCSLoadBalancer. Actually it just returns itself.
147 | func (p *Provider) MCSLoadBalancer() (multiclusterprovider.MCSLoadBalancer, bool) {
148 | return p, true
149 | }
150 |
151 | // GetMCSLoadBalancer is a stub implementation of LoadBalancer.GetMCSLoadBalancer.
152 | func (p *Provider) GetMCSLoadBalancer(_ context.Context, mcs *networkingv1alpha1.MultiClusterService) (status *corev1.LoadBalancerStatus, exist bool, err error) {
153 | p.addCall("get")
154 | namespacedName := types.NamespacedName{Namespace: mcs.Namespace, Name: mcs.Name}.String()
155 | for name := range p.MCSBalancers {
156 | if name == namespacedName {
157 | klog.V(4).Infof("can get Balancer with mcs %s", namespacedName)
158 | return &corev1.LoadBalancerStatus{}, true, nil
159 | }
160 | }
161 |
162 | klog.V(4).Infof("can not get Balancer with mcs %s", namespacedName)
163 | return nil, false, nil
164 | }
165 |
166 | // EnsureMCSLoadBalancer is a stub implementation of LoadBalancer.EnsureMCSLoadBalancer.
167 | func (p *Provider) EnsureMCSLoadBalancer(_ context.Context, mcs *networkingv1alpha1.MultiClusterService) (status *corev1.LoadBalancerStatus, err error) {
168 | p.addCall("create")
169 | if p.MCSBalancers == nil {
170 | p.MCSBalancers = make(map[string]MCSBalancer)
171 | }
172 |
173 | namespacedName := types.NamespacedName{Namespace: mcs.Namespace, Name: mcs.Name}.String()
174 | _, exist := p.MCSBalancers[namespacedName]
175 | if exist {
176 | klog.V(4).Infof("there already exist a LoadBalance with mcs %s", namespacedName)
177 | return nil, fmt.Errorf("there already exist a LoadBalancer")
178 | }
179 |
180 | mcsCopy := mcs.DeepCopy()
181 | p.MCSBalancers[namespacedName] = MCSBalancer{
182 | Name: mcs.Name,
183 | Namespace: mcs.Namespace,
184 | MCS: mcsCopy,
185 | }
186 | klog.V(4).Infof("success to create a LoadBalance with mcs %s", namespacedName)
187 | return &corev1.LoadBalancerStatus{
188 | Ingress: []corev1.LoadBalancerIngress{
189 | {IP: "1.2.3.4"}, // construct a random IP address and return it.
190 | },
191 | }, nil
192 | }
193 |
194 | // UpdateMCSLoadBalancer is a stub implementation of LoadBalancer.UpdateMCSLoadBalancer.
195 | func (p *Provider) UpdateMCSLoadBalancer(_ context.Context, mcs *networkingv1alpha1.MultiClusterService) (status *corev1.LoadBalancerStatus, err error) {
196 | p.addCall("update")
197 | namespacedName := types.NamespacedName{Namespace: mcs.Namespace, Name: mcs.Name}.String()
198 | lb, exist := p.MCSBalancers[namespacedName]
199 | if !exist {
200 | klog.V(4).Infof("there not exist a LoadBalance with mcs %s", namespacedName)
201 | return nil, fmt.Errorf("there not exist a LoadBalancer")
202 | }
203 |
204 | mcsCopy := mcs.DeepCopy()
205 | lb.MCS = mcsCopy
206 | klog.V(4).Infof("success to update the LoadBalance with mcs %s", namespacedName)
207 | return &corev1.LoadBalancerStatus{
208 | Ingress: []corev1.LoadBalancerIngress{
209 | {IP: "1.2.3.5"}, // construct a random IP address and return it.
210 | },
211 | }, nil
212 | }
213 |
214 | // EnsureMCSLoadBalancerDeleted is a stub implementation of LoadBalancer.EnsureMCSLoadBalancerDeleted.
215 | func (p *Provider) EnsureMCSLoadBalancerDeleted(_ context.Context, mcs *networkingv1alpha1.MultiClusterService) error {
216 | p.addCall("delete")
217 | namespacedName := types.NamespacedName{Namespace: mcs.Namespace, Name: mcs.Name}.String()
218 | delete(p.MCSBalancers, namespacedName)
219 | klog.V(4).Infof("success to delete the LoadBalance with mcs %s", namespacedName)
220 | return nil
221 | }
222 |
223 | func (p *Provider) addCall(desc string) {
224 | time.Sleep(p.RequestDelay)
225 |
226 | p.addCallLock.Lock()
227 | defer p.addCallLock.Unlock()
228 | p.Calls = append(p.Calls, desc)
229 | }
230 |
231 | // ClearCalls clears internal record of method calls to this Provider.
232 | func (p *Provider) ClearCalls() {
233 | p.Calls = []string{}
234 | }
235 |
--------------------------------------------------------------------------------
/fake/regist.go:
--------------------------------------------------------------------------------
1 | package fake
2 |
3 | import (
4 | "io"
5 |
6 | multiclusterprovider "github.com/karmada-io/multicluster-cloud-provider"
7 | )
8 |
9 | func init() {
10 | multiclusterprovider.RegisterMultiClusterProvider(defaultProviderName, func(config io.Reader) (multiclusterprovider.Interface, error) {
11 | return &Provider{Name: defaultProviderName}, nil
12 | })
13 | }
14 |
--------------------------------------------------------------------------------
/go.mod:
--------------------------------------------------------------------------------
1 | module github.com/karmada-io/multicluster-cloud-provider
2 |
3 | go 1.22.12
4 |
5 | require (
6 | github.com/karmada-io/karmada v1.13.0
7 | github.com/pkg/errors v0.9.1
8 | github.com/spf13/cobra v1.8.1
9 | github.com/spf13/pflag v1.0.5
10 | k8s.io/api v0.31.3
11 | k8s.io/apimachinery v0.31.3
12 | k8s.io/client-go v0.31.3
13 | k8s.io/component-base v0.31.3
14 | k8s.io/klog/v2 v2.130.1
15 | k8s.io/utils v0.0.0-20240711033017-18e509b52bc8
16 | sigs.k8s.io/controller-runtime v0.19.1
17 | sigs.k8s.io/mcs-api v0.1.0
18 | )
19 |
20 | require (
21 | github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
22 | github.com/MakeNowJust/heredoc v1.0.0 // indirect
23 | github.com/beorn7/perks v1.0.1 // indirect
24 | github.com/blang/semver/v4 v4.0.0 // indirect
25 | github.com/cespare/xxhash/v2 v2.3.0 // indirect
26 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
27 | github.com/emicklei/go-restful/v3 v3.12.1 // indirect
28 | github.com/evanphx/json-patch/v5 v5.9.0 // indirect
29 | github.com/fsnotify/fsnotify v1.7.0 // indirect
30 | github.com/fxamacker/cbor/v2 v2.7.0 // indirect
31 | github.com/go-logr/logr v1.4.2 // indirect
32 | github.com/go-logr/zapr v1.3.0 // indirect
33 | github.com/go-openapi/jsonpointer v0.20.2 // indirect
34 | github.com/go-openapi/jsonreference v0.20.4 // indirect
35 | github.com/go-openapi/swag v0.22.9 // indirect
36 | github.com/gogo/protobuf v1.3.2 // indirect
37 | github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
38 | github.com/golang/protobuf v1.5.4 // indirect
39 | github.com/google/gnostic-models v0.6.8 // indirect
40 | github.com/google/go-cmp v0.6.0 // indirect
41 | github.com/google/gofuzz v1.2.0 // indirect
42 | github.com/google/uuid v1.6.0 // indirect
43 | github.com/gorilla/websocket v1.5.0 // indirect
44 | github.com/imdario/mergo v0.3.16 // indirect
45 | github.com/inconshreveable/mousetrap v1.1.0 // indirect
46 | github.com/josharian/intern v1.0.0 // indirect
47 | github.com/json-iterator/go v1.1.12 // indirect
48 | github.com/kr/pretty v0.3.1 // indirect
49 | github.com/kr/text v0.2.0 // indirect
50 | github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
51 | github.com/mailru/easyjson v0.7.7 // indirect
52 | github.com/mitchellh/go-wordwrap v1.0.1 // indirect
53 | github.com/moby/spdystream v0.4.0 // indirect
54 | github.com/moby/term v0.5.0 // indirect
55 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
56 | github.com/modern-go/reflect2 v1.0.2 // indirect
57 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
58 | github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
59 | github.com/prometheus/client_golang v1.19.1 // indirect
60 | github.com/prometheus/client_model v0.6.1 // indirect
61 | github.com/prometheus/common v0.55.0 // indirect
62 | github.com/prometheus/procfs v0.15.1 // indirect
63 | github.com/rogpeppe/go-internal v1.12.0 // indirect
64 | github.com/russross/blackfriday/v2 v2.1.0 // indirect
65 | github.com/x448/float16 v0.8.4 // indirect
66 | go.uber.org/multierr v1.11.0 // indirect
67 | go.uber.org/zap v1.26.0 // indirect
68 | golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
69 | golang.org/x/net v0.34.0 // indirect
70 | golang.org/x/oauth2 v0.21.0 // indirect
71 | golang.org/x/sys v0.29.0 // indirect
72 | golang.org/x/term v0.28.0 // indirect
73 | golang.org/x/text v0.21.0 // indirect
74 | golang.org/x/time v0.5.0 // indirect
75 | gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
76 | google.golang.org/protobuf v1.34.2 // indirect
77 | gopkg.in/inf.v0 v0.9.1 // indirect
78 | gopkg.in/yaml.v2 v2.4.0 // indirect
79 | gopkg.in/yaml.v3 v3.0.1 // indirect
80 | k8s.io/apiextensions-apiserver v0.31.3 // indirect
81 | k8s.io/apiserver v0.31.3 // indirect
82 | k8s.io/cli-runtime v0.31.3 // indirect
83 | k8s.io/kube-openapi v0.0.0-20240430033511-f0e62f92d13f // indirect
84 | k8s.io/kubectl v0.31.3 // indirect
85 | sigs.k8s.io/cluster-api v1.7.1 // indirect
86 | sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
87 | sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
88 | sigs.k8s.io/yaml v1.4.0 // indirect
89 | )
90 |
--------------------------------------------------------------------------------
/hack/build.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -o errexit
4 | set -o nounset
5 | set -o pipefail
6 |
7 | # This script builds go components.
8 | # You can set the platform to build with BUILD_PLATFORMS, with format: `/`
9 | # And binaries will be put in `_output///`
10 | #
11 | # Usage:
12 | # hack/build.sh
13 | # Args:
14 | # $1: target to build
15 | # Environments:
16 | # BUILD_PLATFORMS: platforms to build. You can set one or more platforms separated by comma.
17 | # e.g.: linux/amd64,linux/arm64
18 | # LDFLAGS pass to the `-ldflags` parameter of go build
19 | # Examples:
20 | # hack/build.sh multicluster-provider-fake
21 | # BUILD_PLATFORMS=linux/amd64,linux/arm64 hack/build.sh multicluster-provider-fake
22 |
23 | REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
24 | source "${REPO_ROOT}/hack/util.sh"
25 |
26 | LDFLAGS="$(util::version_ldflags) ${LDFLAGS:-}"
27 |
28 | function build_binary() {
29 | local -r target=$1
30 |
31 | IFS="," read -ra platforms <<< "${BUILD_PLATFORMS:-}"
32 | if [[ ${#platforms[@]} -eq 0 ]]; then
33 | platforms=("$(util:host_platform)")
34 | fi
35 |
36 | for platform in "${platforms[@]}"; do
37 | echo "!!! Building ${target} for ${platform}:"
38 | build_binary_for_platform "${target}" "${platform}"
39 | done
40 | }
41 |
42 | function build_binary_for_platform() {
43 | local -r target=$1
44 | local -r platform=$2
45 | local -r os=${platform%/*}
46 | local -r arch=${platform##*/}
47 |
48 | local gopkg="${PROVIDER_GO_PACKAGE}/$(util::get_target_source $target)"
49 | set -x
50 | CGO_ENABLED=0 GOOS=${os} GOARCH=${arch} go build \
51 | -ldflags "${LDFLAGS:-}" \
52 | -o "_output/bin/${platform}/$target" \
53 | "${gopkg}"
54 | set +x
55 | }
56 |
57 | build_binary "$@"
58 |
--------------------------------------------------------------------------------
/hack/deploy-provider.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -o errexit
3 | set -o nounset
4 | set -o pipefail
5 |
6 | REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
7 | source "${REPO_ROOT}"/hack/util.sh
8 |
9 | # step0: variable define
10 | KARMADA_SYSTEM_NAMESPACE="karmada-system"
11 | KUBECONFIG_PATH=${KUBECONFIG_PATH:-"${HOME}/.kube"}
12 | MAIN_KUBECONFIG=${MAIN_KUBECONFIG:-"${KUBECONFIG_PATH}/karmada.config"}
13 | HOST_CLUSTER_NAME=${HOST_CLUSTER_NAME:-"karmada-host"}
14 | KARMADA_APISERVER=${KARMADA_APISERVER:-"karmada-apiserver"}
15 |
16 | export KUBECONFIG="${MAIN_KUBECONFIG}"
17 |
18 | # step1: make image
19 | export VERSION="latest"
20 | export REGISTRY="docker.io/karmada"
21 | make image-multicluster-provider-fake GOOS="linux" --directory="${REPO_ROOT}"
22 |
23 | # step2: load image
24 | kind load docker-image "${REGISTRY}/multicluster-provider-fake:${VERSION}" --name="${HOST_CLUSTER_NAME}"
25 |
26 | # step3: create multicluster-provider-config secret to access karmada-apiserver
27 | kubectl --context="${HOST_CLUSTER_NAME}" create secret generic multicluster-provider-config --from-file=karmada.config="${MAIN_KUBECONFIG}" -n "${KARMADA_SYSTEM_NAMESPACE}"
28 |
29 | # step4: deploy multicluster-provider-fake
30 | kubectl --context="${HOST_CLUSTER_NAME}" apply -f "${REPO_ROOT}/artifacts/deploy/multicluster-provider-fake.yaml"
31 | util::wait_pod_ready "${HOST_CLUSTER_NAME}" multicluster-provider-fake "${KARMADA_SYSTEM_NAMESPACE}"
32 |
33 | # step5: deploy ingressclass-fake
34 | kubectl --context="${KARMADA_APISERVER}" apply -f "${REPO_ROOT}/artifacts/deploy/ingressclass-fake.yaml"
35 |
--------------------------------------------------------------------------------
/hack/docker.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -o errexit
4 | set -o nounset
5 | set -o pipefail
6 |
7 | # This script holds docker related functions.
8 | # You can set the platform to build with BUILD_PLATFORMS, with format: `/`
9 | # When `OUTPUT_TYPE=docker` is set, `BUILD_PLATFORMS` cannot be set with multi platforms.
10 | # See: https://github.com/docker/buildx/issues/59
11 | #
12 | # Usage:
13 | # hack/docker.sh
14 | # Args:
15 | # $1: target to build
16 | # Environments:
17 | # BUILD_PLATFORMS: platforms to build. You can set one or more platforms separated by comma.
18 | # e.g.: linux/amd64,linux/arm64
19 | # OUTPUT_TYPE Destination to save image(`docker`/`registry`/`local,dest=path`, default is `docker`).
20 | # REGISTRY image registry
21 | # VERSION image version
22 | # DOCKER_BUILD_ARGS additional arguments to the docker build command
23 | # Examples:
24 | # hack/docker.sh multicluster-provider-fake
25 | # BUILD_PLATFORMS=linux/amd64 hack/docker.sh multicluster-provider-fake
26 | # OUTPUT_TYPE=registry BUILD_PLATFORMS=linux/amd64,linux/arm64 hack/docker.sh multicluster-provider-fake
27 | # DOCKER_BUILD_ARGS="--build-arg https_proxy=${https_proxy}" hack/docker.sh multicluster-provider-fake
28 |
29 | REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
30 | source "${REPO_ROOT}/hack/util.sh"
31 |
32 | REGISTRY=${REGISTRY:-"docker.io/karmada"}
33 | VERSION=${VERSION:="latest"}
34 | DOCKER_BUILD_ARGS=${DOCKER_BUILD_ARGS:-}
35 |
36 | function build_images() {
37 | local -r target=$1
38 | local -r output_type=${OUTPUT_TYPE:-docker}
39 | local platforms="${BUILD_PLATFORMS:-"$(util:host_platform)"}"
40 |
41 | # Preferentially use `docker build`. If we are building multi platform,
42 | # or cross building, change to `docker buildx build`
43 | cross=$(isCross "${platforms}")
44 | if [[ "${cross}" == "true" ]]; then
45 | build_cross_image "${output_type}" "${target}" "${platforms}"
46 | else
47 | build_local_image "${output_type}" "${target}" "${platforms}"
48 | fi
49 | }
50 |
51 | function build_local_image() {
52 | local -r output_type=$1
53 | local -r target=$2
54 | local -r platform=$3
55 |
56 | local -r image_name="${REGISTRY}/${target}:${VERSION}"
57 |
58 | echo "Building image for ${platform}: ${image_name}"
59 | set -x
60 | docker build --build-arg BINARY="${target}" \
61 | ${DOCKER_BUILD_ARGS} \
62 | --tag "${image_name}" \
63 | --file "${REPO_ROOT}/cluster/images/Dockerfile" \
64 | "${REPO_ROOT}/_output/bin/${platform}"
65 | set +x
66 |
67 | if [[ "$output_type" == "registry" ]]; then
68 | docker push "${image_name}"
69 | fi
70 | }
71 |
72 | function build_cross_image() {
73 | local -r output_type=$1
74 | local -r target=$2
75 | local -r platforms=$3
76 |
77 | local -r image_name="${REGISTRY}/${target}:${VERSION}"
78 |
79 | echo "Cross building image for ${platforms}: ${image_name}"
80 | set -x
81 | docker buildx build --output=type="${output_type}" \
82 | --platform "${platforms}" \
83 | --build-arg BINARY="${target}" \
84 | ${DOCKER_BUILD_ARGS} \
85 | --tag "${image_name}" \
86 | --file "${REPO_ROOT}/cluster/images/buildx.Dockerfile" \
87 | "${REPO_ROOT}/_output/bin"
88 | set +x
89 | }
90 |
91 | function isCross() {
92 | local platforms=$1
93 |
94 | IFS="," read -ra platform_array <<< "${platforms}"
95 | if [[ ${#platform_array[@]} -ne 1 ]]; then
96 | echo true
97 | return
98 | fi
99 |
100 | local -r arch=${platforms##*/}
101 | if [[ "$arch" == $(go env GOHOSTARCH) ]]; then
102 | echo false
103 | else
104 | echo true
105 | fi
106 | }
107 |
108 | build_images $@
109 |
--------------------------------------------------------------------------------
/hack/util.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -o errexit
4 | set -o nounset
5 | set -o pipefail
6 |
7 | # This script holds common bash variables and utility functions.
8 |
9 | PROVIDER_GO_PACKAGE="github.com/karmada-io/multicluster-cloud-provider"
10 |
11 | KARMADA_TARGET_SOURCE=(
12 | multicluster-provider-fake=cmd/controller-manager
13 | )
14 |
15 | function util::get_target_source() {
16 | local target=$1
17 | for s in "${KARMADA_TARGET_SOURCE[@]}"; do
18 | if [[ "$s" == ${target}=* ]]; then
19 | echo "${s##${target}=}"
20 | return
21 | fi
22 | done
23 | }
24 |
25 | function util::version_ldflags() {
26 | # Git information
27 | GIT_VERSION=0000
28 | GIT_COMMIT_HASH=$(git rev-parse HEAD)
29 | if git_status=$(git status --porcelain 2>/dev/null) && [[ -z ${git_status} ]]; then
30 | GIT_TREESTATE="clean"
31 | else
32 | GIT_TREESTATE="dirty"
33 | fi
34 | BUILDDATE=$(date -u +'%Y-%m-%dT%H:%M:%SZ')
35 | LDFLAGS="-X github.com/karmada-io/multicluster-cloud-provider/pkg/version.gitVersion=${GIT_VERSION} \
36 | -X github.com/karmada-io/multicluster-cloud-provider/version.gitCommit=${GIT_COMMIT_HASH} \
37 | -X github.com/karmada-io/multicluster-cloud-provider/pkg/version.gitTreeState=${GIT_TREESTATE} \
38 | -X github.com/karmada-io/multicluster-cloud-provider/pkg/version.buildDate=${BUILDDATE}"
39 | echo $LDFLAGS
40 | }
41 |
42 | function util:host_platform() {
43 | echo "$(go env GOHOSTOS)/$(go env GOHOSTARCH)"
44 | }
45 |
46 | # util::wait_pod_ready waits for pod state becomes ready until timeout.
47 | # Parameters:
48 | # - $1: k8s context name, such as "karmada-apiserver"
49 | # - $2: pod label, such as "app=etcd"
50 | # - $3: pod namespace, such as "karmada-system"
51 | # - $4: time out, such as "200s"
52 | function util::wait_pod_ready() {
53 | local context_name=$1
54 | local pod_label=$2
55 | local pod_namespace=$3
56 |
57 | echo "wait the $pod_label ready..."
58 | set +e
59 | util::kubectl_with_retry --context="$context_name" wait --for=condition=Ready --timeout=30s pods -l app=${pod_label} -n ${pod_namespace}
60 | ret=$?
61 | set -e
62 | if [ $ret -ne 0 ];then
63 | echo "kubectl describe info:"
64 | kubectl --context="$context_name" describe pod -l app=${pod_label} -n ${pod_namespace}
65 | echo "kubectl logs info:"
66 | kubectl --context="$context_name" logs -l app=${pod_label} -n ${pod_namespace}
67 | fi
68 | return ${ret}
69 | }
70 |
71 | # util::kubectl_with_retry will retry if execute kubectl command failed
72 | # tolerate kubectl command failure that may happen before the pod is created by StatefulSet/Deployment.
73 | function util::kubectl_with_retry() {
74 | local ret=0
75 | for i in {1..10}; do
76 | kubectl "$@"
77 | ret=$?
78 | if [[ ${ret} -ne 0 ]]; then
79 | echo "kubectl $@ failed, retrying(${i} times)"
80 | sleep 1
81 | continue
82 | else
83 | return 0
84 | fi
85 | done
86 |
87 | echo "kubectl $@ failed"
88 | kubectl "$@"
89 | return ${ret}
90 | }
91 |
92 | function util::cmd_exist {
93 | local CMD=$(command -v ${1})
94 | if [[ ! -x ${CMD} ]]; then
95 | return 1
96 | fi
97 | return 0
98 | }
99 |
--------------------------------------------------------------------------------
/hack/verify-all.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -o errexit
4 | set -o nounset
5 | set -o pipefail
6 |
7 | REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
8 |
9 | # Show progress
10 | set -x
11 |
12 | # Orders are determined by two factors:
13 | # (1) Less Execution time item should be executed first.
14 | # (2) More likely to fail item should be executed first.
15 | bash "$REPO_ROOT/hack/verify-staticcheck.sh"
16 |
--------------------------------------------------------------------------------
/hack/verify-staticcheck.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -o errexit
4 | set -o nounset
5 | set -o pipefail
6 |
7 | REPO_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
8 | GOLANGCI_LINT_VER="v1.55.2"
9 |
10 | cd "${REPO_ROOT}"
11 | source "hack/util.sh"
12 |
13 | if util::cmd_exist golangci-lint ; then
14 | echo "Using golangci-lint version:"
15 | golangci-lint version
16 | else
17 | echo "Installing golangci-lint ${GOLANGCI_LINT_VER}"
18 | # https://golangci-lint.run/usage/install/#other-ci
19 | curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin ${GOLANGCI_LINT_VER}
20 | fi
21 |
22 | if golangci-lint run; then
23 | echo 'Congratulations! All Go source files have passed staticcheck.'
24 | else
25 | echo # print one empty line, separate from warning messages.
26 | echo 'Please review the above warnings.'
27 | echo 'If the above warnings do not make sense, feel free to file an issue.'
28 | exit 1
29 | fi
30 |
--------------------------------------------------------------------------------
/options/options.go:
--------------------------------------------------------------------------------
1 | package options
2 |
3 | import (
4 | "time"
5 |
6 | "github.com/karmada-io/karmada/pkg/sharedcli/profileflag"
7 | "github.com/karmada-io/karmada/pkg/sharedcli/ratelimiterflag"
8 | "github.com/spf13/pflag"
9 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
10 | "k8s.io/client-go/tools/leaderelection/resourcelock"
11 | componentbaseconfig "k8s.io/component-base/config"
12 | )
13 |
14 | const (
15 | defaultBindAddress = "0.0.0.0"
16 | defaultPort = 10368
17 | )
18 |
19 | const (
20 | // NamespaceKarmadaSystem is the karmada system namespace.
21 | NamespaceKarmadaSystem = "karmada-system"
22 | )
23 |
24 | var (
25 | defaultElectionLeaseDuration = metav1.Duration{Duration: 15 * time.Second}
26 | defaultElectionRenewDeadline = metav1.Duration{Duration: 10 * time.Second}
27 | defaultElectionRetryPeriod = metav1.Duration{Duration: 2 * time.Second}
28 | )
29 |
30 | // MultiClusterControllerManagerOptions contains everything necessary to create and run multicluster-provider.
31 | type MultiClusterControllerManagerOptions struct {
32 | // Name is the provider for multicluster services.
33 | Name string
34 | // CloudConfigFile is the path to the multicluster provider configuration file.
35 | CloudConfigFile string
36 | // ProviderClassName is the name of the ingress class this provider satisfies.
37 | ProviderClassName string
38 | // LeaderElection defines the configuration of leader election client.
39 | LeaderElection componentbaseconfig.LeaderElectionConfiguration
40 | // BindAddress is the IP address on which to listen for the --secure-port port.
41 | BindAddress string
42 | // SecurePort is the port that the server serves at.
43 | // Note: We hope support https in the future once controller-runtime provides the functionality.
44 | SecurePort int
45 | // KubeAPIQPS is the QPS to use while talking with karmada-apiserver.
46 | KubeAPIQPS float32
47 | // KubeAPIBurst is the burst to allow while talking with karmada-apiserver.
48 | KubeAPIBurst int
49 | // ResyncPeriod is the base frequency the informers are resynced.
50 | // Defaults to 0, which means the created informer will never do resyncs.
51 | ResyncPeriod metav1.Duration
52 | // MetricsBindAddress is the TCP address that the controller should bind to
53 | // for serving prometheus metrics.
54 | // It can be set to "0" to disable the metrics serving.
55 | // Defaults to ":8080".
56 | MetricsBindAddress string
57 |
58 | RateLimiterOpts ratelimiterflag.Options
59 | ProfileOpts profileflag.Options
60 | }
61 |
62 | // NewClusterControllerManagerOptions builds an empty MultiClusterControllerManagerOptions.
63 | func NewClusterControllerManagerOptions() *MultiClusterControllerManagerOptions {
64 | return &MultiClusterControllerManagerOptions{
65 | LeaderElection: componentbaseconfig.LeaderElectionConfiguration{
66 | LeaderElect: true,
67 | ResourceLock: resourcelock.LeasesResourceLock,
68 | ResourceNamespace: NamespaceKarmadaSystem,
69 | ResourceName: "multicluster-controller-manager",
70 | },
71 | }
72 | }
73 |
74 | // AddFlags adds flags to the specified FlagSet.
75 | func (o *MultiClusterControllerManagerOptions) AddFlags(flags *pflag.FlagSet) {
76 | flags.StringVar(&o.Name, "multicluster-provider", o.Name,
77 | "The provider for multicluster services. Empty for no provider.")
78 | flags.StringVar(&o.CloudConfigFile, "cloud-config", o.CloudConfigFile,
79 | "The path to the multicluster provider configuration file. Empty for no configuration file.")
80 | flags.StringVar(&o.ProviderClassName, "provider-ingress-class", o.ProviderClassName,
81 | "The name of the ingress class this provider satisfies. The .spec.controller value of the IngressClass "+
82 | "referenced in an MultiClusterIngress Object should be the same value specified here.")
83 |
84 | flags.BoolVar(&o.LeaderElection.LeaderElect, "leader-elect", true, "Start a leader election "+
85 | "client and gain leadership before executing the main loop. Enable this when running replicated components "+
86 | "for high availability.")
87 | flags.StringVar(&o.LeaderElection.ResourceNamespace, "leader-elect-resource-namespace", NamespaceKarmadaSystem,
88 | "The namespace of resource object that is used for locking during leader election.")
89 | flags.DurationVar(&o.LeaderElection.LeaseDuration.Duration, "leader-elect-lease-duration",
90 | defaultElectionLeaseDuration.Duration, ""+
91 | "The duration that non-leader candidates will wait after observing a leadership "+
92 | "renewal until attempting to acquire leadership of a led but unrenewed leader "+
93 | "slot. This is effectively the maximum duration that a leader can be stopped "+
94 | "before it is replaced by another candidate. This is only applicable if leader "+
95 | "election is enabled.")
96 | flags.DurationVar(&o.LeaderElection.RenewDeadline.Duration, "leader-elect-renew-deadline",
97 | defaultElectionRenewDeadline.Duration, ""+
98 | "The interval between attempts by the acting master to renew a leadership slot "+
99 | "before it stops leading. This must be less than or equal to the lease duration. "+
100 | "This is only applicable if leader election is enabled.")
101 | flags.DurationVar(&o.LeaderElection.RetryPeriod.Duration, "leader-elect-retry-period",
102 | defaultElectionRetryPeriod.Duration, ""+
103 | "The duration the clients should wait between attempting acquisition and renewal "+
104 | "of a leadership. This is only applicable if leader election is enabled.")
105 |
106 | flags.StringVar(&o.BindAddress, "bind-address", defaultBindAddress, "The IP address on which to listen "+
107 | "for the --secure-port port.")
108 | flags.IntVar(&o.SecurePort, "secure-port", defaultPort, "The secure port on which to serve HTTPS.")
109 | flags.Float32Var(&o.KubeAPIQPS, "kube-api-qps", 40.0, "QPS to use while talking with "+
110 | "karmada-apiserver. Doesn't cover events and node heartbeat apis which rate limiting is controlled by "+
111 | "a different set of flags.")
112 | flags.IntVar(&o.KubeAPIBurst, "kube-api-burst", 60, "Burst to use while talking with "+
113 | "karmada-apiserver. Doesn't cover events and node heartbeat apis which rate limiting is controlled by "+
114 | "a different set of flags.")
115 | flags.DurationVar(&o.ResyncPeriod.Duration, "resync-period", 0,
116 | "Base frequency the informers are resynced.")
117 | flags.StringVar(&o.MetricsBindAddress, "metrics-bind-address", ":8080",
118 | "The TCP address that the controller should bind to for serving prometheus "+
119 | "metrics(e.g. 127.0.0.1:8080, :8080). It can be set to \"0\" to disable the metrics serving.")
120 |
121 | o.RateLimiterOpts.AddFlags(flags)
122 | o.ProfileOpts.AddFlags(flags)
123 | }
124 |
--------------------------------------------------------------------------------
/options/validation.go:
--------------------------------------------------------------------------------
1 | package options
2 |
3 | import (
4 | "k8s.io/apimachinery/pkg/util/validation/field"
5 | )
6 |
7 | // Validate checks MultiClusterControllerManagerOptions and return a slice of found errs.
8 | func (o *MultiClusterControllerManagerOptions) Validate() field.ErrorList {
9 | errs := field.ErrorList{}
10 | newPath := field.NewPath("Options")
11 |
12 | if len(o.Name) == 0 {
13 | errs = append(errs, field.Invalid(newPath.Child("Name"), o.Name, "--multicluster-provider cannot be empty"))
14 | }
15 | if len(o.ProviderClassName) == 0 {
16 | errs = append(errs, field.Invalid(newPath.Child("ProviderClassName"), o.ProviderClassName, "--provider-ingress-class cannot be empty"))
17 | }
18 |
19 | return errs
20 | }
21 |
--------------------------------------------------------------------------------
/pkg/controllers/context/context.go:
--------------------------------------------------------------------------------
1 | package context
2 |
3 | import (
4 | "context"
5 |
6 | "github.com/karmada-io/karmada/pkg/sharedcli/ratelimiterflag"
7 | "github.com/karmada-io/karmada/pkg/util/fedinformer/genericmanager"
8 | "k8s.io/apimachinery/pkg/util/sets"
9 | "k8s.io/client-go/dynamic"
10 | "k8s.io/klog/v2"
11 | controllerruntime "sigs.k8s.io/controller-runtime"
12 |
13 | multiclusterprovider "github.com/karmada-io/multicluster-cloud-provider"
14 | )
15 |
16 | // Options defines all the parameters required by our controllers.
17 | type Options struct {
18 | // Controllers contains all controller names.
19 | Controllers []string
20 | // RateLimiterOptions contains the options for rate limiter.
21 | RateLimiterOptions ratelimiterflag.Options
22 | }
23 |
24 | // Context defines the context object for controller.
25 | type Context struct {
26 | Context context.Context
27 | Mgr controllerruntime.Manager
28 | CloudProvider multiclusterprovider.Interface
29 | Opts Options
30 | DynamicClientSet dynamic.Interface
31 | InformerManager genericmanager.SingleClusterInformerManager
32 | ProviderClassName string
33 | }
34 |
35 | // IsControllerEnabled check if a specified controller enabled or not.
36 | func (c Context) IsControllerEnabled(name string, disabledByDefaultControllers sets.Set[string]) bool {
37 | hasStar := false
38 | for _, ctrl := range c.Opts.Controllers {
39 | if ctrl == name {
40 | return true
41 | }
42 | if ctrl == "-"+name {
43 | return false
44 | }
45 | if ctrl == "*" {
46 | hasStar = true
47 | }
48 | }
49 | // if we get here, there was no explicit choice
50 | if !hasStar {
51 | // nothing on by default
52 | return false
53 | }
54 |
55 | return !disabledByDefaultControllers.Has(name)
56 | }
57 |
58 | // InitFunc is used to launch a particular controller.
59 | // Any error returned will cause the controller process to `Fatal`
60 | // The bool indicates whether the controller was enabled.
61 | type InitFunc func(ctx Context) (enabled bool, err error)
62 |
63 | // Initializers is a public map of named controller groups
64 | type Initializers map[string]InitFunc
65 |
66 | // ControllerNames returns all known controller names
67 | func (i Initializers) ControllerNames() []string {
68 | return sets.StringKeySet(i).List()
69 | }
70 |
71 | // StartControllers starts a set of controllers with a specified ControllerContext
72 | func (i Initializers) StartControllers(ctx Context, controllersDisabledByDefault sets.Set[string]) error {
73 | for controllerName, initFn := range i {
74 | if !ctx.IsControllerEnabled(controllerName, controllersDisabledByDefault) {
75 | klog.Warningf("%q is disabled", controllerName)
76 | continue
77 | }
78 | klog.V(1).Infof("Starting %q", controllerName)
79 | started, err := initFn(ctx)
80 | if err != nil {
81 | klog.Errorf("Error starting %q", controllerName)
82 | return err
83 | }
84 | if !started {
85 | klog.Warningf("Skipping %q", controllerName)
86 | continue
87 | }
88 | klog.Infof("Started %q", controllerName)
89 | }
90 | return nil
91 | }
92 |
--------------------------------------------------------------------------------
/pkg/controllers/crdinstallation/crd_installation_controller.go:
--------------------------------------------------------------------------------
1 | package crdinstallation
2 |
3 | import (
4 | "context"
5 |
6 | clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
7 | policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
8 | "github.com/karmada-io/karmada/pkg/sharedcli/ratelimiterflag"
9 | corev1 "k8s.io/api/core/v1"
10 | "k8s.io/apimachinery/pkg/api/equality"
11 | apierrors "k8s.io/apimachinery/pkg/api/errors"
12 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
13 | "k8s.io/apimachinery/pkg/types"
14 | "k8s.io/client-go/tools/record"
15 | "k8s.io/klog/v2"
16 | controllerruntime "sigs.k8s.io/controller-runtime"
17 | "sigs.k8s.io/controller-runtime/pkg/builder"
18 | "sigs.k8s.io/controller-runtime/pkg/client"
19 | "sigs.k8s.io/controller-runtime/pkg/controller"
20 | "sigs.k8s.io/controller-runtime/pkg/event"
21 | "sigs.k8s.io/controller-runtime/pkg/handler"
22 | "sigs.k8s.io/controller-runtime/pkg/predicate"
23 | "sigs.k8s.io/controller-runtime/pkg/reconcile"
24 | )
25 |
26 | // ControllerName is the controller name that will be used when reporting events.
27 | const ControllerName = "crd-installation-controller"
28 |
29 | const clusterPropagationPolicyName = "serviceexportpropagation-policy"
30 |
31 | type eventType int
32 |
33 | const (
34 | ensure eventType = iota
35 | remove
36 | )
37 |
38 | // Controller will to install ServiceExport CRD in the member clusters.
39 | type Controller struct {
40 | client.Client
41 | EventRecorder record.EventRecorder
42 | RateLimiterOptions ratelimiterflag.Options
43 | }
44 |
45 | // Reconcile performs a full reconciliation for the Cluster object and
46 | // installs ServiceExport CRD in the member clusters.
47 | func (r *Controller) Reconcile(ctx context.Context, req controllerruntime.Request) (controllerruntime.Result, error) {
48 | klog.V(4).InfoS("Installing ServiceExport CRD on with cluster", "name", req.Name)
49 |
50 | cluster := &clusterv1alpha1.Cluster{}
51 | if err := r.Client.Get(ctx, req.NamespacedName, cluster); err != nil {
52 | if apierrors.IsNotFound(err) {
53 | return r.syncClusterPropagationPolicy(ctx, cluster.Name, remove)
54 | }
55 | return controllerruntime.Result{}, err
56 | }
57 |
58 | if !cluster.DeletionTimestamp.IsZero() {
59 | return r.syncClusterPropagationPolicy(ctx, cluster.Name, remove)
60 | }
61 |
62 | return r.syncClusterPropagationPolicy(ctx, cluster.Name, ensure)
63 | }
64 |
65 | func (r *Controller) syncClusterPropagationPolicy(ctx context.Context, clusterName string, t eventType) (controllerruntime.Result, error) {
66 | policy := &policyv1alpha1.ClusterPropagationPolicy{}
67 | err := r.Client.Get(ctx, types.NamespacedName{Name: clusterPropagationPolicyName}, policy)
68 | if err != nil {
69 | if apierrors.IsNotFound(err) {
70 | return r.createClusterPropagationPolicy(ctx)
71 | }
72 | klog.ErrorS(err, "failed to get clusterPropagationPolicy", "name", clusterPropagationPolicyName)
73 | return controllerruntime.Result{}, err
74 | }
75 |
76 | index := 0
77 | clusters := policy.Spec.Placement.ClusterAffinity.ClusterNames
78 | for ; index < len(clusters); index++ {
79 | if clusters[index] == clusterName {
80 | break
81 | }
82 | }
83 |
84 | switch t {
85 | case ensure:
86 | if index < len(clusters) {
87 | // target cluster have been added to cpp clusterNames
88 | klog.V(4).InfoS("no need to update clusterPropagationPolicy", "name", clusterPropagationPolicyName)
89 | return controllerruntime.Result{}, nil
90 | }
91 | clusters = append(clusters, clusterName)
92 | case remove:
93 | if index >= len(clusters) {
94 | // target cluster have been removed form cpp clusterNames
95 | klog.V(4).InfoS("no need to update clusterPropagationPolicy", "name", clusterPropagationPolicyName)
96 | return controllerruntime.Result{}, nil
97 | }
98 | clusters = append(clusters[:index], clusters[index+1:]...)
99 | }
100 |
101 | policy.Spec.Placement.ClusterAffinity.ClusterNames = clusters
102 | err = r.Client.Update(ctx, policy)
103 | if err != nil {
104 | klog.ErrorS(err, "failed to update clusterPropagationPolicy", "name", clusterPropagationPolicyName)
105 | return controllerruntime.Result{}, err
106 | }
107 | klog.V(4).InfoS("success to update clusterPropagationPolicy", "name", clusterPropagationPolicyName)
108 | return controllerruntime.Result{}, nil
109 | }
110 |
111 | func (r *Controller) createClusterPropagationPolicy(ctx context.Context) (controllerruntime.Result, error) {
112 | clusters := &clusterv1alpha1.ClusterList{}
113 | err := r.Client.List(ctx, clusters)
114 | if err != nil {
115 | klog.ErrorS(err, "failed to list clusters")
116 | return controllerruntime.Result{}, err
117 | }
118 |
119 | clusterNames := make([]string, len(clusters.Items))
120 | for index, cluster := range clusters.Items {
121 | clusterNames[index] = cluster.Name
122 | }
123 |
124 | policy := clusterPropagationPolicy(clusterNames)
125 | err = r.Client.Create(ctx, policy)
126 | if err != nil {
127 | klog.ErrorS(err, "failed to create clusterPropagationPolicy", "name", clusterPropagationPolicyName)
128 | return controllerruntime.Result{}, err
129 | }
130 | return controllerruntime.Result{}, nil
131 | }
132 |
133 | func clusterPropagationPolicy(clusters []string) *policyv1alpha1.ClusterPropagationPolicy {
134 | return &policyv1alpha1.ClusterPropagationPolicy{
135 | ObjectMeta: metav1.ObjectMeta{
136 | Name: clusterPropagationPolicyName,
137 | },
138 | Spec: policyv1alpha1.PropagationSpec{
139 | ResourceSelectors: []policyv1alpha1.ResourceSelector{
140 | {
141 | APIVersion: "apiextensions.k8s.io/v1",
142 | Kind: "CustomResourceDefinition",
143 | Name: "serviceexports.multicluster.x-k8s.io",
144 | }},
145 | Placement: policyv1alpha1.Placement{
146 | ClusterAffinity: &policyv1alpha1.ClusterAffinity{
147 | ClusterNames: clusters,
148 | },
149 | ClusterTolerations: []corev1.Toleration{
150 | {
151 | Key: clusterv1alpha1.TaintClusterNotReady,
152 | Operator: corev1.TolerationOpExists,
153 | Effect: corev1.TaintEffectNoExecute,
154 | },
155 | {
156 | Key: clusterv1alpha1.TaintClusterUnreachable,
157 | Operator: corev1.TolerationOpExists,
158 | Effect: corev1.TaintEffectNoExecute,
159 | },
160 | },
161 | }}}
162 | }
163 |
164 | // SetupWithManager creates a controller and register to controller manager.
165 | func (r *Controller) SetupWithManager(_ context.Context, mgr controllerruntime.Manager) error {
166 | clusterFilter := predicate.Funcs{
167 | CreateFunc: func(event event.CreateEvent) bool { return true },
168 | UpdateFunc: func(updateEvent event.UpdateEvent) bool {
169 | return !equality.Semantic.DeepEqual(updateEvent.ObjectOld.GetDeletionTimestamp().IsZero(),
170 | updateEvent.ObjectNew.GetDeletionTimestamp().IsZero())
171 | },
172 | DeleteFunc: func(deleteEvent event.DeleteEvent) bool { return true },
173 | GenericFunc: func(genericEvent event.GenericEvent) bool { return false },
174 | }
175 |
176 | cppHandlerFn := handler.MapFunc(
177 | func(ctx context.Context, object client.Object) []reconcile.Request {
178 | // return a fictional cluster, triggering to reconcile to recreate the cpp.
179 | return []reconcile.Request{
180 | {NamespacedName: types.NamespacedName{Name: "no-exist-cluster"}},
181 | }
182 | },
183 | )
184 | cppFilter := builder.WithPredicates(predicate.Funcs{
185 | CreateFunc: func(event event.CreateEvent) bool { return false },
186 | UpdateFunc: func(updateEvent event.UpdateEvent) bool { return false },
187 | DeleteFunc: func(deleteEvent event.DeleteEvent) bool {
188 | return deleteEvent.Object.GetName() == clusterPropagationPolicyName
189 | },
190 | GenericFunc: func(genericEvent event.GenericEvent) bool { return false },
191 | })
192 |
193 | return controllerruntime.NewControllerManagedBy(mgr).
194 | For(&clusterv1alpha1.Cluster{}).
195 | Watches(&policyv1alpha1.ClusterPropagationPolicy{},
196 | handler.EnqueueRequestsFromMapFunc(cppHandlerFn), cppFilter).
197 | WithEventFilter(clusterFilter).
198 | WithOptions(controller.Options{
199 | RateLimiter: ratelimiterflag.DefaultControllerRateLimiter[controllerruntime.Request](r.RateLimiterOptions),
200 | }).
201 | Complete(r)
202 | }
203 |
--------------------------------------------------------------------------------
/pkg/controllers/indexes/reference_indexer.go:
--------------------------------------------------------------------------------
1 | package indexes
2 |
3 | import (
4 | "context"
5 |
6 | networkingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/networking/v1alpha1"
7 | networkingv1 "k8s.io/api/networking/v1"
8 | "k8s.io/apimachinery/pkg/util/sets"
9 | "sigs.k8s.io/controller-runtime/pkg/client"
10 | )
11 |
12 | const (
13 | // IndexKeyServiceRefName is index key for services referenced by MultiClusterIngress.
14 | IndexKeyServiceRefName = "mci.serviceRef.name"
15 | // IndexKeySecretRefName is index key for secrets referenced by MultiClusterIngress.
16 | IndexKeySecretRefName = "mci.secretRef.name" // #nosec G101
17 | )
18 |
19 | // SetupServiceIndexesForMCI setups Service Indexes for MultiClusterIngress object.
20 | func SetupServiceIndexesForMCI(ctx context.Context, fieldIndexer client.FieldIndexer) error {
21 | if err := fieldIndexer.IndexField(ctx, &networkingv1alpha1.MultiClusterIngress{}, IndexKeyServiceRefName,
22 | func(object client.Object) []string {
23 | return BuildServiceRefIndexes(object.(*networkingv1alpha1.MultiClusterIngress))
24 | }); err != nil {
25 | return err
26 | }
27 | return nil
28 | }
29 |
30 | // BuildServiceRefIndexes returns the service refs in the MultiClusterIngress object.
31 | func BuildServiceRefIndexes(mci *networkingv1alpha1.MultiClusterIngress) []string {
32 | var backends []networkingv1.IngressBackend
33 | if mci.Spec.DefaultBackend != nil {
34 | backends = append(backends, *mci.Spec.DefaultBackend)
35 | }
36 |
37 | for _, rule := range mci.Spec.Rules {
38 | if rule.HTTP == nil {
39 | continue
40 | }
41 | for _, path := range rule.HTTP.Paths {
42 | backends = append(backends, path.Backend)
43 | }
44 | }
45 |
46 | svcNames := sets.NewString()
47 | for _, backend := range backends {
48 | svcNames.Insert(backend.Service.Name)
49 | }
50 | return svcNames.List()
51 | }
52 |
53 | // SetupSecretIndexesForMCI setups Secret Indexes for MultiClusterIngress object.
54 | func SetupSecretIndexesForMCI(ctx context.Context, fieldIndexer client.FieldIndexer) error {
55 | if err := fieldIndexer.IndexField(ctx, &networkingv1alpha1.MultiClusterIngress{}, IndexKeySecretRefName,
56 | func(object client.Object) []string {
57 | return BuildSecretRefIndexes(object.(*networkingv1alpha1.MultiClusterIngress))
58 | }); err != nil {
59 | return err
60 | }
61 | return nil
62 | }
63 |
64 | // BuildSecretRefIndexes returns the secret refs in the MultiClusterIngress object.
65 | func BuildSecretRefIndexes(mci *networkingv1alpha1.MultiClusterIngress) []string {
66 | secretNames := sets.NewString()
67 | for _, tls := range mci.Spec.TLS {
68 | secretNames.Insert(tls.SecretName)
69 | }
70 | return secretNames.List()
71 | }
72 |
--------------------------------------------------------------------------------
/pkg/controllers/mciservicelocations/mci_service_locations.go:
--------------------------------------------------------------------------------
1 | package mciservicelocations
2 |
3 | import (
4 | "context"
5 | "reflect"
6 | "sort"
7 |
8 | networkingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/networking/v1alpha1"
9 | workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
10 | "github.com/karmada-io/karmada/pkg/sharedcli/ratelimiterflag"
11 | "github.com/karmada-io/karmada/pkg/util/names"
12 | networkingv1 "k8s.io/api/networking/v1"
13 | apierrors "k8s.io/apimachinery/pkg/api/errors"
14 | "k8s.io/apimachinery/pkg/types"
15 | "k8s.io/apimachinery/pkg/util/sets"
16 | "k8s.io/client-go/util/retry"
17 | "k8s.io/klog/v2"
18 | controllerruntime "sigs.k8s.io/controller-runtime"
19 | "sigs.k8s.io/controller-runtime/pkg/builder"
20 | "sigs.k8s.io/controller-runtime/pkg/client"
21 | "sigs.k8s.io/controller-runtime/pkg/controller"
22 | "sigs.k8s.io/controller-runtime/pkg/event"
23 | "sigs.k8s.io/controller-runtime/pkg/handler"
24 | "sigs.k8s.io/controller-runtime/pkg/predicate"
25 | "sigs.k8s.io/controller-runtime/pkg/reconcile"
26 |
27 | "github.com/karmada-io/multicluster-cloud-provider/pkg/controllers/indexes"
28 | )
29 |
30 | // Controller is used to maintain information about the clusters in which
31 | // the Service backend of the MultiClusterIngress resource resides.
32 | type Controller struct {
33 | client.Client
34 | RateLimiterOptions ratelimiterflag.Options
35 | }
36 |
37 | // Reconcile performs a full reconciliation for the object referred to by the Request.
38 | // The Controller will requeue the Request to be processed again if an error is non-nil or
39 | // Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
40 | func (c *Controller) Reconcile(ctx context.Context, req controllerruntime.Request) (controllerruntime.Result, error) {
41 | klog.V(4).Infof("Reconciling MultiClusterIngress %s", req.NamespacedName.String())
42 |
43 | mci := &networkingv1alpha1.MultiClusterIngress{}
44 | if err := c.Client.Get(ctx, req.NamespacedName, mci); err != nil {
45 | if apierrors.IsNotFound(err) {
46 | return controllerruntime.Result{}, nil
47 | }
48 | klog.InfoS("failed to get multiClusterIngress object", "NamespacedName", req.NamespacedName.String())
49 | return controllerruntime.Result{}, err
50 | }
51 |
52 | svcLocations, err := c.calculateServiceLocations(ctx, mci)
53 | if err != nil {
54 | return controllerruntime.Result{}, err
55 | }
56 |
57 | mci = mci.DeepCopy()
58 | err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
59 | if reflect.DeepEqual(svcLocations, mci.Status.ServiceLocations) {
60 | return nil
61 | }
62 | mci.Status.ServiceLocations = svcLocations
63 | updateErr := c.Client.Status().Update(ctx, mci)
64 | if updateErr == nil {
65 | return nil
66 | }
67 |
68 | updatedMCI := &networkingv1alpha1.MultiClusterIngress{}
69 | err = c.Client.Get(ctx, req.NamespacedName, updatedMCI)
70 | if err == nil {
71 | mci = updatedMCI.DeepCopy()
72 | } else {
73 | klog.Errorf("Failed to get updated multiClusterIngress(%s): %v", req.NamespacedName.String(), err)
74 | }
75 | return updateErr
76 | })
77 | if err != nil {
78 | klog.Errorf("Failed to sync multiClusterIngress(%s) service locations: %v", req.NamespacedName.String(), err)
79 | return controllerruntime.Result{}, err
80 | }
81 | klog.V(4).Infof("Success to sync multiClusterIngress(%s) service locations", req.NamespacedName.String())
82 | return controllerruntime.Result{}, nil
83 | }
84 |
85 | func (c *Controller) calculateServiceLocations(ctx context.Context, mci *networkingv1alpha1.MultiClusterIngress) ([]networkingv1alpha1.ServiceLocation, error) {
86 | backendSvcNames := indexes.BuildServiceRefIndexes(mci)
87 | sort.Strings(backendSvcNames)
88 |
89 | var svcLocations []networkingv1alpha1.ServiceLocation
90 | for _, svcName := range backendSvcNames {
91 | svcBinding := &workv1alpha2.ResourceBinding{}
92 | svcRBNamespacedName := types.NamespacedName{
93 | Namespace: mci.Namespace,
94 | Name: names.GenerateBindingName("Service", svcName),
95 | }
96 | err := c.Client.Get(ctx, svcRBNamespacedName, svcBinding)
97 | if err != nil {
98 | if apierrors.IsNotFound(err) {
99 | continue
100 | }
101 | klog.ErrorS(err, "failed to get service's related resourceBinding",
102 | "ResourceBinding", svcRBNamespacedName.String())
103 | return nil, err
104 | }
105 |
106 | svcLocations = append(svcLocations, networkingv1alpha1.ServiceLocation{
107 | Name: svcName,
108 | Clusters: obtainBindingClusters(svcBinding),
109 | })
110 | }
111 | return svcLocations, nil
112 | }
113 |
114 | func obtainBindingClusters(rb *workv1alpha2.ResourceBinding) []string {
115 | clusters := sets.NewString()
116 | for _, cluster := range rb.Spec.Clusters {
117 | clusters.Insert(cluster.Name)
118 | }
119 | for _, requiredBy := range rb.Spec.RequiredBy {
120 | for _, cluster := range requiredBy.Clusters {
121 | clusters.Insert(cluster.Name)
122 | }
123 | }
124 | return clusters.List()
125 | }
126 |
127 | // SetupWithManager creates a controller and register to controller manager.
128 | func (c *Controller) SetupWithManager(mgr controllerruntime.Manager) error {
129 | mciPredicateFuncs := predicate.Funcs{
130 | CreateFunc: func(event event.CreateEvent) bool { return true },
131 | DeleteFunc: func(event event.DeleteEvent) bool { return false },
132 | UpdateFunc: func(event event.UpdateEvent) bool {
133 | oldMCI := event.ObjectOld.(*networkingv1alpha1.MultiClusterIngress)
134 | newMCI := event.ObjectNew.(*networkingv1alpha1.MultiClusterIngress)
135 | var oldDefaultBackend, newDefaultBackend networkingv1.IngressBackend
136 | if oldMCI.Spec.DefaultBackend != nil {
137 | oldDefaultBackend = *oldMCI.Spec.DefaultBackend
138 | }
139 | if newMCI.Spec.DefaultBackend != nil {
140 | newDefaultBackend = *newMCI.Spec.DefaultBackend
141 | }
142 | return !reflect.DeepEqual(oldDefaultBackend, newDefaultBackend) ||
143 | !reflect.DeepEqual(oldMCI.Spec.Rules, newMCI.Spec.Rules)
144 | },
145 | }
146 |
147 | rbMapFunc := handler.MapFunc(
148 | func(ctx context.Context, object client.Object) []reconcile.Request {
149 | var requests []reconcile.Request
150 |
151 | rb := object.(*workv1alpha2.ResourceBinding)
152 | if rb.Spec.Resource.APIVersion != "v1" || rb.Spec.Resource.Kind != "Service" {
153 | return nil
154 | }
155 |
156 | mciList := &networkingv1alpha1.MultiClusterIngressList{}
157 | if err := c.Client.List(context.Background(), mciList,
158 | client.InNamespace(rb.GetNamespace()),
159 | client.MatchingFields{indexes.IndexKeyServiceRefName: rb.Spec.Resource.Name}); err != nil {
160 | klog.Errorf("failed to fetch multiclusteringresses")
161 | return nil
162 | }
163 |
164 | for index := range mciList.Items {
165 | mci := &mciList.Items[index]
166 | requests = append(requests, reconcile.Request{
167 | NamespacedName: types.NamespacedName{Namespace: mci.Namespace, Name: mci.Name}})
168 | }
169 | return requests
170 | })
171 |
172 | rbPredicateFuncs := predicate.Funcs{
173 | CreateFunc: func(event event.CreateEvent) bool {
174 | rb := event.Object.(*workv1alpha2.ResourceBinding)
175 | return rb.Spec.Resource.APIVersion == "v1" && rb.Spec.Resource.Kind == "Service"
176 | },
177 | UpdateFunc: func(event event.UpdateEvent) bool {
178 | oldRB := event.ObjectOld.(*workv1alpha2.ResourceBinding)
179 | newRB := event.ObjectNew.(*workv1alpha2.ResourceBinding)
180 | if newRB.Spec.Resource.APIVersion != "v1" || newRB.Spec.Resource.Kind != "Service" {
181 | return false
182 | }
183 | return !reflect.DeepEqual(oldRB.Spec.Clusters, newRB.Spec.Clusters) ||
184 | !reflect.DeepEqual(oldRB.Spec.RequiredBy, newRB.Spec.RequiredBy)
185 | },
186 | DeleteFunc: func(event event.DeleteEvent) bool {
187 | rb := event.Object.(*workv1alpha2.ResourceBinding)
188 | return rb.Spec.Resource.APIVersion == "v1" && rb.Spec.Resource.Kind == "Service"
189 | },
190 | }
191 |
192 | return controllerruntime.NewControllerManagedBy(mgr).
193 | For(&networkingv1alpha1.MultiClusterIngress{}, builder.WithPredicates(mciPredicateFuncs)).
194 | Watches(&workv1alpha2.ResourceBinding{}, handler.EnqueueRequestsFromMapFunc(rbMapFunc), builder.WithPredicates(rbPredicateFuncs)).
195 | WithOptions(controller.Options{RateLimiter: ratelimiterflag.DefaultControllerRateLimiter[controllerruntime.Request](c.RateLimiterOptions)}).
196 | Complete(c)
197 | }
198 |
--------------------------------------------------------------------------------
/pkg/controllers/multiclusteringress/eventhandlers.go:
--------------------------------------------------------------------------------
1 | package multiclusteringress
2 |
3 | import (
4 | "context"
5 | "strings"
6 |
7 | clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
8 | networkingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/networking/v1alpha1"
9 | remedyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/remedy/v1alpha1"
10 | corev1 "k8s.io/api/core/v1"
11 | discoveryv1 "k8s.io/api/discovery/v1"
12 | "k8s.io/apimachinery/pkg/api/equality"
13 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
14 | "k8s.io/apimachinery/pkg/types"
15 | "k8s.io/client-go/util/workqueue"
16 | "k8s.io/klog/v2"
17 | "sigs.k8s.io/controller-runtime/pkg/client"
18 | "sigs.k8s.io/controller-runtime/pkg/event"
19 | "sigs.k8s.io/controller-runtime/pkg/handler"
20 | "sigs.k8s.io/controller-runtime/pkg/reconcile"
21 |
22 | "github.com/karmada-io/multicluster-cloud-provider/pkg/controllers/indexes"
23 | "github.com/karmada-io/multicluster-cloud-provider/pkg/util"
24 | )
25 |
26 | func newMultiClusterIngressEventHandler(ctx context.Context, client client.Client, providerClassName string) handler.TypedEventHandler[*networkingv1alpha1.MultiClusterIngress, reconcile.Request] {
27 | return &multiClusterIngressEventHandler{
28 | ctx: ctx,
29 | client: client,
30 | ingClassName: providerClassName,
31 | }
32 | }
33 |
34 | var _ handler.TypedEventHandler[*networkingv1alpha1.MultiClusterIngress, reconcile.Request] = (*multiClusterIngressEventHandler)(nil)
35 |
36 | type multiClusterIngressEventHandler struct {
37 | ctx context.Context
38 | client client.Client
39 | ingClassName string
40 | }
41 |
42 | func (h *multiClusterIngressEventHandler) Create(_ context.Context, e event.TypedCreateEvent[*networkingv1alpha1.MultiClusterIngress], queue workqueue.TypedRateLimitingInterface[reconcile.Request]) {
43 | klog.V(4).Infof("mci(%s/%s) created", e.Object.GetNamespace(), e.Object.GetName())
44 | if !util.CheckIngressClassMatched(h.ctx, h.client, e.Object, h.ingClassName) {
45 | return
46 | }
47 | queue.Add(reconcile.Request{NamespacedName: types.NamespacedName{
48 | Namespace: e.Object.GetNamespace(),
49 | Name: e.Object.GetName(),
50 | }})
51 | }
52 |
53 | func (h *multiClusterIngressEventHandler) Update(_ context.Context, e event.TypedUpdateEvent[*networkingv1alpha1.MultiClusterIngress], queue workqueue.TypedRateLimitingInterface[reconcile.Request]) {
54 | mciOld := e.ObjectOld
55 | mciNew := e.ObjectNew
56 | if !util.CheckIngressClassMatched(h.ctx, h.client, mciNew, h.ingClassName) {
57 | return
58 | }
59 |
60 | // We only care about the update events below:
61 | if equality.Semantic.DeepEqual(mciOld.Annotations, mciNew.Annotations) &&
62 | equality.Semantic.DeepEqual(mciOld.Spec, mciNew.Spec) &&
63 | equality.Semantic.DeepEqual(mciOld.DeletionTimestamp.IsZero(), mciNew.DeletionTimestamp.IsZero()) {
64 | return
65 | }
66 |
67 | queue.Add(reconcile.Request{NamespacedName: types.NamespacedName{
68 | Namespace: mciNew.Namespace,
69 | Name: mciNew.Name,
70 | }})
71 | }
72 |
73 | func (h *multiClusterIngressEventHandler) Delete(_ context.Context, _ event.TypedDeleteEvent[*networkingv1alpha1.MultiClusterIngress], _ workqueue.TypedRateLimitingInterface[reconcile.Request]) {
74 | // Since finalizer is added to the multiClusterIngress object,
75 | // the delete event is processed by the update event.
76 | }
77 |
78 | func (h *multiClusterIngressEventHandler) Generic(_ context.Context, e event.TypedGenericEvent[*networkingv1alpha1.MultiClusterIngress], queue workqueue.TypedRateLimitingInterface[reconcile.Request]) {
79 | if !util.CheckIngressClassMatched(h.ctx, h.client, e.Object, h.ingClassName) {
80 | return
81 | }
82 | queue.Add(reconcile.Request{NamespacedName: types.NamespacedName{
83 | Namespace: e.Object.GetNamespace(),
84 | Name: e.Object.GetName(),
85 | }})
86 | }
87 |
88 | func newServiceEventHandler(mciEventChan chan<- event.TypedGenericEvent[*networkingv1alpha1.MultiClusterIngress], client client.Client) handler.TypedEventHandler[*corev1.Service, reconcile.Request] {
89 | return &serviceEventHandler{
90 | mciEventChan: mciEventChan,
91 | client: client,
92 | }
93 | }
94 |
95 | var _ handler.TypedEventHandler[*corev1.Service, reconcile.Request] = (*serviceEventHandler)(nil)
96 |
97 | type serviceEventHandler struct {
98 | mciEventChan chan<- event.TypedGenericEvent[*networkingv1alpha1.MultiClusterIngress]
99 | client client.Client
100 | }
101 |
102 | func (h *serviceEventHandler) Create(_ context.Context, e event.TypedCreateEvent[*corev1.Service], _ workqueue.TypedRateLimitingInterface[reconcile.Request]) {
103 | klog.V(4).Infof("service(%s/%s) created", e.Object.GetNamespace(), e.Object.GetName())
104 | h.enqueueImpactedMCI(e.Object.GetNamespace(), e.Object.GetName())
105 | }
106 |
107 | func (h *serviceEventHandler) Update(_ context.Context, e event.TypedUpdateEvent[*corev1.Service], _ workqueue.TypedRateLimitingInterface[reconcile.Request]) {
108 | svcOld := e.ObjectOld
109 | svcNew := e.ObjectNew
110 |
111 | // We only care about the update events below:
112 | if equality.Semantic.DeepEqual(svcOld.Annotations, svcNew.Annotations) &&
113 | equality.Semantic.DeepEqual(svcOld.Spec, svcNew.Spec) &&
114 | equality.Semantic.DeepEqual(svcOld.DeletionTimestamp.IsZero(), svcNew.DeletionTimestamp.IsZero()) {
115 | return
116 | }
117 |
118 | h.enqueueImpactedMCI(svcNew.Namespace, svcNew.Name)
119 | }
120 |
121 | func (h *serviceEventHandler) Delete(_ context.Context, e event.TypedDeleteEvent[*corev1.Service], _ workqueue.TypedRateLimitingInterface[reconcile.Request]) {
122 | h.enqueueImpactedMCI(e.Object.GetNamespace(), e.Object.GetName())
123 | }
124 |
125 | func (h *serviceEventHandler) Generic(_ context.Context, e event.TypedGenericEvent[*corev1.Service], _ workqueue.TypedRateLimitingInterface[reconcile.Request]) {
126 | h.enqueueImpactedMCI(e.Object.GetNamespace(), e.Object.GetName())
127 | }
128 |
129 | func (h *serviceEventHandler) enqueueImpactedMCI(svcNamespace, svcName string) {
130 | mciList := &networkingv1alpha1.MultiClusterIngressList{}
131 | if err := h.client.List(context.Background(), mciList,
132 | client.InNamespace(svcNamespace),
133 | client.MatchingFields{indexes.IndexKeyServiceRefName: svcName}); err != nil {
134 | klog.Errorf("failed to fetch multiclusteringresses")
135 | return
136 | }
137 |
138 | for index := range mciList.Items {
139 | mci := &mciList.Items[index]
140 | h.mciEventChan <- event.TypedGenericEvent[*networkingv1alpha1.MultiClusterIngress]{
141 | Object: mci,
142 | }
143 | }
144 | }
145 |
146 | // endpointSlicePrefix is the prefix of service derived from ServiceImport.
147 | const derivedServicePrefix = "derived-"
148 |
149 | func newEndpointSlicesEventHandler(svcEventChan chan<- event.TypedGenericEvent[*corev1.Service]) handler.TypedEventHandler[*discoveryv1.EndpointSlice, reconcile.Request] {
150 | return &endpointSlicesEventHandler{
151 | svcEventChan: svcEventChan,
152 | }
153 | }
154 |
155 | var _ handler.TypedEventHandler[*discoveryv1.EndpointSlice, reconcile.Request] = (*endpointSlicesEventHandler)(nil)
156 |
157 | type endpointSlicesEventHandler struct {
158 | svcEventChan chan<- event.TypedGenericEvent[*corev1.Service]
159 | }
160 |
161 | func (h *endpointSlicesEventHandler) Create(_ context.Context, e event.TypedCreateEvent[*discoveryv1.EndpointSlice], _ workqueue.TypedRateLimitingInterface[reconcile.Request]) {
162 | klog.V(4).Infof("eps(%s/%s) created", e.Object.GetNamespace(), e.Object.GetName())
163 | h.enqueueImpactedSvc(e.Object)
164 | }
165 |
166 | func (h *endpointSlicesEventHandler) Update(_ context.Context, e event.TypedUpdateEvent[*discoveryv1.EndpointSlice], _ workqueue.TypedRateLimitingInterface[reconcile.Request]) {
167 | h.enqueueImpactedSvc(e.ObjectNew)
168 | }
169 |
170 | func (h *endpointSlicesEventHandler) Delete(_ context.Context, e event.TypedDeleteEvent[*discoveryv1.EndpointSlice], _ workqueue.TypedRateLimitingInterface[reconcile.Request]) {
171 | h.enqueueImpactedSvc(e.Object)
172 | }
173 |
174 | func (h *endpointSlicesEventHandler) Generic(_ context.Context, _ event.TypedGenericEvent[*discoveryv1.EndpointSlice], _ workqueue.TypedRateLimitingInterface[reconcile.Request]) {
175 | }
176 |
177 | func (h *endpointSlicesEventHandler) enqueueImpactedSvc(obj client.Object) {
178 | svcName, ok := obj.GetLabels()[discoveryv1.LabelServiceName]
179 | if !ok {
180 | klog.Warningf("Can not get the key(%s) with the endpointSlices object(%s/%s)",
181 | discoveryv1.LabelServiceName, obj.GetNamespace(), obj.GetName())
182 | return
183 | }
184 |
185 | h.svcEventChan <- event.TypedGenericEvent[*corev1.Service]{
186 | Object: &corev1.Service{
187 | ObjectMeta: metav1.ObjectMeta{
188 | Namespace: obj.GetNamespace(),
189 | Name: strings.TrimPrefix(svcName, derivedServicePrefix),
190 | }}}
191 | }
192 |
193 | func newSecretEventHandler(mciEventChan chan<- event.TypedGenericEvent[*networkingv1alpha1.MultiClusterIngress], client client.Client) handler.TypedEventHandler[*corev1.Secret, reconcile.Request] {
194 | return &secretEventHandler{
195 | mciEventChan: mciEventChan,
196 | client: client,
197 | }
198 | }
199 |
200 | var _ handler.TypedEventHandler[*corev1.Secret, reconcile.Request] = (*secretEventHandler)(nil)
201 |
202 | type secretEventHandler struct {
203 | mciEventChan chan<- event.TypedGenericEvent[*networkingv1alpha1.MultiClusterIngress]
204 | client client.Client
205 | }
206 |
207 | func (h *secretEventHandler) Create(_ context.Context, e event.TypedCreateEvent[*corev1.Secret], _ workqueue.TypedRateLimitingInterface[reconcile.Request]) {
208 | klog.V(4).Infof("secret(%s/%s) created", e.Object.GetNamespace(), e.Object.GetName())
209 | h.enqueueImpactedMCI(e.Object.GetNamespace(), e.Object.GetName())
210 | }
211 |
212 | func (h *secretEventHandler) Update(_ context.Context, e event.TypedUpdateEvent[*corev1.Secret], _ workqueue.TypedRateLimitingInterface[reconcile.Request]) {
213 | secretOld := e.ObjectOld
214 | secretNew := e.ObjectNew
215 |
216 | if equality.Semantic.DeepEqual(secretOld.Annotations, secretNew.Annotations) &&
217 | equality.Semantic.DeepEqual(secretOld.Data, secretNew.Data) &&
218 | equality.Semantic.DeepEqual(secretOld.StringData, secretNew.StringData) {
219 | return
220 | }
221 |
222 | h.enqueueImpactedMCI(secretNew.Namespace, secretNew.Name)
223 | }
224 |
225 | func (h *secretEventHandler) Delete(_ context.Context, e event.TypedDeleteEvent[*corev1.Secret], _ workqueue.TypedRateLimitingInterface[reconcile.Request]) {
226 | h.enqueueImpactedMCI(e.Object.GetNamespace(), e.Object.GetName())
227 | }
228 |
229 | func (h *secretEventHandler) enqueueImpactedMCI(secretNamespace, secretName string) {
230 | mciList := &networkingv1alpha1.MultiClusterIngressList{}
231 | if err := h.client.List(context.Background(), mciList,
232 | client.InNamespace(secretNamespace),
233 | client.MatchingFields{indexes.IndexKeySecretRefName: secretName}); err != nil {
234 | klog.Errorf("failed to fetch multiclusteringresses")
235 | return
236 | }
237 |
238 | for index := range mciList.Items {
239 | mci := &mciList.Items[index]
240 | h.mciEventChan <- event.TypedGenericEvent[*networkingv1alpha1.MultiClusterIngress]{
241 | Object: mci,
242 | }
243 | }
244 | }
245 |
246 | func (h *secretEventHandler) Generic(_ context.Context, _ event.TypedGenericEvent[*corev1.Secret], _ workqueue.TypedRateLimitingInterface[reconcile.Request]) {
247 |
248 | }
249 |
250 | func newClusterEventHandler(mciEventChan chan<- event.TypedGenericEvent[*networkingv1alpha1.MultiClusterIngress], client client.Client) handler.TypedEventHandler[*clusterv1alpha1.Cluster, reconcile.Request] {
251 | return &clusterEventHandler{
252 | client: client,
253 | mciEventChan: mciEventChan,
254 | }
255 | }
256 |
257 | var _ handler.TypedEventHandler[*clusterv1alpha1.Cluster, reconcile.Request] = (*clusterEventHandler)(nil)
258 |
259 | type clusterEventHandler struct {
260 | client client.Client
261 | mciEventChan chan<- event.TypedGenericEvent[*networkingv1alpha1.MultiClusterIngress]
262 | }
263 |
264 | func (h *clusterEventHandler) Create(_ context.Context, _ event.TypedCreateEvent[*clusterv1alpha1.Cluster], _ workqueue.TypedRateLimitingInterface[reconcile.Request]) {
265 | }
266 |
267 | func (h *clusterEventHandler) Update(_ context.Context, e event.TypedUpdateEvent[*clusterv1alpha1.Cluster], _ workqueue.TypedRateLimitingInterface[reconcile.Request]) {
268 | oldCluster := e.ObjectOld
269 | newCluster := e.ObjectNew
270 | oldExist, newExist := false, false
271 | for _, action := range oldCluster.Status.RemedyActions {
272 | if action == string(remedyv1alpha1.TrafficControl) {
273 | oldExist = true
274 | break
275 | }
276 | }
277 | for _, action := range newCluster.Status.RemedyActions {
278 | if action == string(remedyv1alpha1.TrafficControl) {
279 | newExist = true
280 | break
281 | }
282 | }
283 |
284 | if oldExist == newExist {
285 | return
286 | }
287 |
288 | mciList := &networkingv1alpha1.MultiClusterIngressList{}
289 | if err := h.client.List(context.Background(), mciList); err != nil {
290 | klog.Errorf("failed to fetch multiclusteringresses")
291 | return
292 | }
293 |
294 | for index := range mciList.Items {
295 | mci := &mciList.Items[index]
296 | if !mciSvcLocationsContainsCluster(mci, newCluster) {
297 | continue
298 | }
299 | h.mciEventChan <- event.TypedGenericEvent[*networkingv1alpha1.MultiClusterIngress]{
300 | Object: mci,
301 | }
302 | }
303 | }
304 |
305 | func (h *clusterEventHandler) Delete(_ context.Context, _ event.TypedDeleteEvent[*clusterv1alpha1.Cluster], _ workqueue.TypedRateLimitingInterface[reconcile.Request]) {
306 | }
307 |
308 | func (h *clusterEventHandler) Generic(_ context.Context, _ event.TypedGenericEvent[*clusterv1alpha1.Cluster], _ workqueue.TypedRateLimitingInterface[reconcile.Request]) {
309 | }
310 |
311 | func mciSvcLocationsContainsCluster(mci *networkingv1alpha1.MultiClusterIngress, cluster *clusterv1alpha1.Cluster) bool {
312 | for _, location := range mci.Status.ServiceLocations {
313 | for _, clusterName := range location.Clusters {
314 | if clusterName == cluster.Name {
315 | return true
316 | }
317 | }
318 | }
319 | return false
320 | }
321 |
--------------------------------------------------------------------------------
/pkg/controllers/multiclusteringress/mci_controller.go:
--------------------------------------------------------------------------------
1 | package multiclusteringress
2 |
3 | import (
4 | "context"
5 | "reflect"
6 | "sort"
7 |
8 | clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
9 | networkingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/networking/v1alpha1"
10 | remedyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/remedy/v1alpha1"
11 | "github.com/karmada-io/karmada/pkg/sharedcli/ratelimiterflag"
12 | "github.com/karmada-io/karmada/pkg/util/fedinformer/genericmanager"
13 | corev1 "k8s.io/api/core/v1"
14 | discoveryv1 "k8s.io/api/discovery/v1"
15 | networkingv1 "k8s.io/api/networking/v1"
16 | apierrors "k8s.io/apimachinery/pkg/api/errors"
17 | "k8s.io/apimachinery/pkg/types"
18 | "k8s.io/apimachinery/pkg/util/sets"
19 | "k8s.io/client-go/tools/record"
20 | "k8s.io/client-go/util/retry"
21 | "k8s.io/klog/v2"
22 | controllerruntime "sigs.k8s.io/controller-runtime"
23 | "sigs.k8s.io/controller-runtime/pkg/client"
24 | "sigs.k8s.io/controller-runtime/pkg/controller"
25 | "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
26 | "sigs.k8s.io/controller-runtime/pkg/event"
27 | "sigs.k8s.io/controller-runtime/pkg/source"
28 |
29 | multiclusterprovider "github.com/karmada-io/multicluster-cloud-provider"
30 | )
31 |
32 | // ControllerName is the controller name that will be used when reporting events.
33 | const ControllerName = "multiclusteringress-controller"
34 |
35 | // MCIControllerFinalizer is added to Cluster to ensure LoadBalancer is deleted before itself is deleted.
36 | const MCIControllerFinalizer = "karmada.io/multi-cluster-ingress-controller"
37 |
38 | // MCIController is to sync MultiClusterIngress.
39 | type MCIController struct {
40 | client.Client
41 | InformerManager genericmanager.SingleClusterInformerManager
42 | LoadBalancer multiclusterprovider.LoadBalancer
43 | EventRecorder record.EventRecorder
44 | RateLimiterOptions ratelimiterflag.Options
45 | ProviderClassName string
46 | }
47 |
48 | // Reconcile performs a full reconciliation for the object referred to by the Request.
49 | // The Controller will requeue the Request to be processed again if an error is non-nil or
50 | // Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
51 | func (c *MCIController) Reconcile(ctx context.Context, req controllerruntime.Request) (controllerruntime.Result, error) {
52 | klog.V(4).InfoS("Reconciling MultiClusterIngress", "namespace", req.Namespace, "name", req.Name)
53 |
54 | mci := &networkingv1alpha1.MultiClusterIngress{}
55 | if err := c.Client.Get(ctx, req.NamespacedName, mci); err != nil {
56 | if apierrors.IsNotFound(err) {
57 | // The mci no longer exist, in which case we stop processing.
58 | return controllerruntime.Result{}, nil
59 | }
60 | klog.ErrorS(err, "failed to get multiClusterIngress object", "NamespacedName", req.NamespacedName)
61 | return controllerruntime.Result{}, err
62 | }
63 |
64 | if !mci.DeletionTimestamp.IsZero() {
65 | return c.handleMCIDelete(ctx, mci.DeepCopy())
66 | }
67 | return c.handleMCICreateOrUpdate(ctx, mci.DeepCopy())
68 | }
69 |
70 | func (c *MCIController) handleMCIDelete(ctx context.Context, mci *networkingv1alpha1.MultiClusterIngress) (controllerruntime.Result, error) {
71 | klog.V(4).InfoS("Begin to handle multiClusterIngress delete event", "namespace", mci.Namespace, "name", mci.Name)
72 |
73 | err := c.LoadBalancer.EnsureLoadBalancerDeleted(ctx, mci)
74 | if err != nil {
75 | klog.ErrorS(err, "Failed to delete multiClusterIngress", "namespace", mci.Namespace, "name", mci.Name)
76 | return controllerruntime.Result{}, err
77 | }
78 | klog.InfoS("Success to delete multiClusterIngress", "namespace", mci.Namespace, "name", mci.Name)
79 |
80 | finalizersUpdated := controllerutil.RemoveFinalizer(mci, MCIControllerFinalizer)
81 | if finalizersUpdated {
82 | err = c.Client.Update(ctx, mci)
83 | if err != nil {
84 | klog.V(4).ErrorS(err, "failed to update mci with finalizer", "namespace", mci.Namespace, "name", mci.Name)
85 | return controllerruntime.Result{}, err
86 | }
87 | }
88 | return controllerruntime.Result{}, nil
89 | }
90 |
91 | func (c *MCIController) handleMCICreateOrUpdate(ctx context.Context, mci *networkingv1alpha1.MultiClusterIngress) (controllerruntime.Result, error) {
92 | klog.V(4).InfoS("Begin to handle multiClusterIngress create or update event", "namespace", mci.Namespace, "name", mci.Name)
93 |
94 | if !controllerutil.ContainsFinalizer(mci, MCIControllerFinalizer) {
95 | objPatch := client.MergeFrom(mci)
96 | modifiedObj := mci.DeepCopy()
97 | controllerutil.AddFinalizer(modifiedObj, MCIControllerFinalizer)
98 | err := c.Client.Patch(ctx, modifiedObj, objPatch)
99 | if err != nil {
100 | klog.V(4).ErrorS(err, "failed to update mci with finalizer", "namespace", mci.Namespace, "name", mci.Name)
101 | return controllerruntime.Result{}, err
102 | }
103 | }
104 |
105 | if err := c.updateMCITrafficBlockClusters(ctx, mci); err != nil {
106 | return controllerruntime.Result{}, err
107 | }
108 |
109 | _, exist, err := c.LoadBalancer.GetLoadBalancer(ctx, mci)
110 | if err != nil {
111 | klog.ErrorS(err, "failed to get loadBalancer with provider", "namespace", mci.Namespace, "name", mci.Name)
112 | return controllerruntime.Result{}, err
113 | }
114 | if exist {
115 | return c.handleMCIUpdate(ctx, mci)
116 | }
117 | return c.handleMCICreate(ctx, mci)
118 | }
119 |
120 | func (c *MCIController) updateMCITrafficBlockClusters(ctx context.Context, mci *networkingv1alpha1.MultiClusterIngress) error {
121 | locatedClusters := sets.NewString()
122 | for _, location := range mci.Status.ServiceLocations {
123 | locatedClusters.Insert(location.Clusters...)
124 | }
125 |
126 | clusterList := &clusterv1alpha1.ClusterList{}
127 | if err := c.Client.List(ctx, clusterList); err != nil {
128 | klog.Errorf("Failed to list cluster: %v", err)
129 | return err
130 | }
131 |
132 | var trafficBlockClusters []string
133 | for _, cluster := range clusterList.Items {
134 | if !locatedClusters.Has(cluster.Name) {
135 | continue
136 | }
137 | for _, action := range cluster.Status.RemedyActions {
138 | if action == string(remedyv1alpha1.TrafficControl) {
139 | trafficBlockClusters = append(trafficBlockClusters, cluster.Name)
140 | break
141 | }
142 | }
143 | }
144 | sort.Strings(trafficBlockClusters)
145 |
146 | mciNamespacedName := types.NamespacedName{Namespace: mci.Namespace, Name: mci.Name}
147 | err := retry.RetryOnConflict(retry.DefaultRetry, func() error {
148 | if reflect.DeepEqual(trafficBlockClusters, mci.Status.TrafficBlockClusters) {
149 | return nil
150 | }
151 | mci.Status.TrafficBlockClusters = trafficBlockClusters
152 | updateErr := c.Client.Status().Update(ctx, mci)
153 | if updateErr == nil {
154 | return nil
155 | }
156 |
157 | updatedMCI := &networkingv1alpha1.MultiClusterIngress{}
158 | err := c.Client.Get(ctx, mciNamespacedName, updatedMCI)
159 | if err == nil {
160 | mci = updatedMCI.DeepCopy()
161 | } else {
162 | klog.Errorf("Failed to get updated multiClusterIngress(%s): %v", mciNamespacedName.String(), err)
163 | }
164 | return updateErr
165 | })
166 | if err != nil {
167 | klog.Errorf("Failed to sync multiClusterIngress(%s) trafficBlockClusters: %v", mciNamespacedName.String(), err)
168 | return err
169 | }
170 | klog.V(4).Infof("Success to sync multiClusterIngress(%s) trafficBlockClusters", mciNamespacedName.String())
171 | return nil
172 | }
173 |
174 | func (c *MCIController) handleMCICreate(ctx context.Context, mci *networkingv1alpha1.MultiClusterIngress) (controllerruntime.Result, error) {
175 | klog.V(4).InfoS("Begin to handle multiClusterIngress create event", "namespace", mci.Namespace, "name", mci.Name)
176 |
177 | status, err := c.LoadBalancer.EnsureLoadBalancer(ctx, mci)
178 | if err != nil {
179 | klog.ErrorS(err, "failed to create loadBalancer with provider", "namespace", mci.Namespace, "name", mci.Name)
180 | return controllerruntime.Result{}, err
181 | }
182 | klog.InfoS("Success to create loadBalancer with provider", "namespace", mci.Namespace, "name", mci.Name)
183 |
184 | err = c.updateMCIStatus(ctx, mci, status)
185 | if err != nil {
186 | klog.ErrorS(err, "failed to update mci status", "namespace", mci.Namespace, "name", mci.Name)
187 | return controllerruntime.Result{}, err
188 | }
189 | return controllerruntime.Result{}, nil
190 | }
191 |
192 | func (c *MCIController) handleMCIUpdate(ctx context.Context, mci *networkingv1alpha1.MultiClusterIngress) (controllerruntime.Result, error) {
193 | klog.V(4).InfoS("Begin to handle multiClusterIngress update event", "namespace", mci.Namespace, "name", mci.Name)
194 |
195 | status, err := c.LoadBalancer.UpdateLoadBalancer(ctx, mci)
196 | if err != nil {
197 | klog.ErrorS(err, "failed to update loadBalancer with provider", "namespace", mci.Namespace, "name", mci.Name)
198 | return controllerruntime.Result{}, err
199 | }
200 | klog.InfoS("Success to update loadBalancer with provider", "namespace", mci.Namespace, "name", mci.Name)
201 |
202 | err = c.updateMCIStatus(ctx, mci, status)
203 | if err != nil {
204 | klog.ErrorS(err, "failed to update mci status", "namespace", mci.Namespace, "name", mci.Name)
205 | return controllerruntime.Result{}, err
206 | }
207 | return controllerruntime.Result{}, nil
208 | }
209 |
210 | func (c *MCIController) updateMCIStatus(ctx context.Context, mci *networkingv1alpha1.MultiClusterIngress, newStatus *networkingv1.IngressLoadBalancerStatus) error {
211 | if reflect.DeepEqual(mci.Status.LoadBalancer, *newStatus) {
212 | return nil
213 | }
214 |
215 | return retry.RetryOnConflict(retry.DefaultRetry, func() (err error) {
216 | mci.Status.LoadBalancer = *newStatus
217 | updateErr := c.Status().Update(ctx, mci)
218 | if updateErr == nil {
219 | return nil
220 | }
221 |
222 | updated := &networkingv1alpha1.MultiClusterIngress{}
223 | if err = c.Get(ctx, client.ObjectKey{Namespace: mci.Namespace, Name: mci.Name}, updated); err == nil {
224 | mci = updated
225 | } else {
226 | klog.Errorf("Failed to get updated multiClusterIngress(%s/%s): %v", mci.Namespace, mci.Name, err)
227 | }
228 | return updateErr
229 | })
230 | }
231 |
232 | // SetupWithManager creates a controller and register to controller manager.
233 | func (c *MCIController) SetupWithManager(ctx context.Context, mgr controllerruntime.Manager) error {
234 | mciController, err := controller.New(ControllerName, mgr,
235 | controller.Options{
236 | Reconciler: c,
237 | RateLimiter: ratelimiterflag.DefaultControllerRateLimiter[controllerruntime.Request](c.RateLimiterOptions),
238 | })
239 | if err != nil {
240 | return err
241 | }
242 |
243 | if err = c.setupWatches(ctx, mciController, mgr); err != nil {
244 | return err
245 | }
246 |
247 | return nil
248 | }
249 |
250 | func (c *MCIController) setupWatches(ctx context.Context, mciController controller.Controller, mgr controllerruntime.Manager) error {
251 | mciEventChan := make(chan event.TypedGenericEvent[*networkingv1alpha1.MultiClusterIngress])
252 | svcEventChan := make(chan event.TypedGenericEvent[*corev1.Service])
253 |
254 | mciEventHandler := newMultiClusterIngressEventHandler(ctx, c.Client, c.ProviderClassName)
255 | svcEventHandler := newServiceEventHandler(mciEventChan, c.Client)
256 | epsEventHandler := newEndpointSlicesEventHandler(svcEventChan)
257 | secEventHandler := newSecretEventHandler(mciEventChan, c.Client)
258 | clusterHandler := newClusterEventHandler(mciEventChan, c.Client)
259 |
260 | if err := mciController.Watch(source.Kind[*networkingv1alpha1.MultiClusterIngress](mgr.GetCache(), &networkingv1alpha1.MultiClusterIngress{}, mciEventHandler)); err != nil {
261 | return err
262 | }
263 | if err := mciController.Watch(source.Channel[*networkingv1alpha1.MultiClusterIngress](mciEventChan, mciEventHandler)); err != nil {
264 | return err
265 | }
266 | if err := mciController.Watch(source.Kind[*corev1.Service](mgr.GetCache(), &corev1.Service{}, svcEventHandler)); err != nil {
267 | return err
268 | }
269 | if err := mciController.Watch(source.Channel[*corev1.Service](svcEventChan, svcEventHandler)); err != nil {
270 | return err
271 | }
272 | if err := mciController.Watch(source.Kind[*discoveryv1.EndpointSlice](mgr.GetCache(), &discoveryv1.EndpointSlice{}, epsEventHandler)); err != nil {
273 | return err
274 | }
275 | if err := mciController.Watch(source.Kind[*corev1.Secret](mgr.GetCache(), &corev1.Secret{}, secEventHandler)); err != nil {
276 | return err
277 | }
278 | if err := mciController.Watch(source.Kind[*clusterv1alpha1.Cluster](mgr.GetCache(), &clusterv1alpha1.Cluster{}, clusterHandler)); err != nil {
279 | return err
280 | }
281 | return nil
282 | }
283 |
--------------------------------------------------------------------------------
/pkg/controllers/multiclusterservice/eventhandlers.go:
--------------------------------------------------------------------------------
1 | package multiclusterservice
2 |
3 | import (
4 | "context"
5 | "strings"
6 |
7 | networkingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/networking/v1alpha1"
8 | corev1 "k8s.io/api/core/v1"
9 | discoveryv1 "k8s.io/api/discovery/v1"
10 | "k8s.io/apimachinery/pkg/api/equality"
11 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
12 | "k8s.io/apimachinery/pkg/types"
13 | "k8s.io/client-go/util/workqueue"
14 | "k8s.io/klog/v2"
15 | "sigs.k8s.io/controller-runtime/pkg/client"
16 | "sigs.k8s.io/controller-runtime/pkg/event"
17 | "sigs.k8s.io/controller-runtime/pkg/handler"
18 | "sigs.k8s.io/controller-runtime/pkg/reconcile"
19 |
20 | "github.com/karmada-io/multicluster-cloud-provider/pkg/util"
21 | )
22 |
23 | func newMultiClusterServiceEventHandler() handler.TypedEventHandler[*networkingv1alpha1.MultiClusterService, reconcile.Request] {
24 | return &multiClusterServiceEventHandler{}
25 | }
26 |
27 | var _ handler.TypedEventHandler[*networkingv1alpha1.MultiClusterService, reconcile.Request] = (*multiClusterServiceEventHandler)(nil)
28 |
29 | type multiClusterServiceEventHandler struct {
30 | }
31 |
32 | func (h *multiClusterServiceEventHandler) Create(_ context.Context, e event.TypedCreateEvent[*networkingv1alpha1.MultiClusterService], queue workqueue.TypedRateLimitingInterface[reconcile.Request]) {
33 | if !util.MCSContainLoadBalanceType(e.Object) {
34 | return
35 | }
36 |
37 | queue.Add(reconcile.Request{NamespacedName: types.NamespacedName{
38 | Namespace: e.Object.Namespace,
39 | Name: e.Object.Name,
40 | }})
41 | }
42 |
43 | func (h *multiClusterServiceEventHandler) Update(_ context.Context, e event.TypedUpdateEvent[*networkingv1alpha1.MultiClusterService], queue workqueue.TypedRateLimitingInterface[reconcile.Request]) {
44 | mcsOld := e.ObjectOld
45 | mcsNew := e.ObjectNew
46 | if !util.MCSContainLoadBalanceType(mcsOld) && !util.MCSContainLoadBalanceType(mcsNew) {
47 | return
48 | }
49 |
50 | // We only care about the update events below:
51 | if equality.Semantic.DeepEqual(mcsOld.Annotations, mcsNew.Annotations) &&
52 | equality.Semantic.DeepEqual(mcsOld.Spec, mcsNew.Spec) &&
53 | equality.Semantic.DeepEqual(mcsOld.DeletionTimestamp.IsZero(), mcsNew.DeletionTimestamp.IsZero()) {
54 | return
55 | }
56 |
57 | queue.Add(reconcile.Request{NamespacedName: types.NamespacedName{
58 | Namespace: mcsNew.GetNamespace(),
59 | Name: mcsNew.GetName(),
60 | }})
61 | }
62 |
63 | func (h *multiClusterServiceEventHandler) Delete(_ context.Context, _ event.TypedDeleteEvent[*networkingv1alpha1.MultiClusterService], _ workqueue.TypedRateLimitingInterface[reconcile.Request]) {
64 | // Since finalizer is added to the MultiClusterService object,
65 | // the delete event is processed by the update event.
66 | }
67 |
68 | func (h *multiClusterServiceEventHandler) Generic(_ context.Context, e event.TypedGenericEvent[*networkingv1alpha1.MultiClusterService], queue workqueue.TypedRateLimitingInterface[reconcile.Request]) {
69 | queue.Add(reconcile.Request{NamespacedName: types.NamespacedName{
70 | Namespace: e.Object.GetNamespace(),
71 | Name: e.Object.GetName(),
72 | }})
73 | }
74 |
75 | func newServiceEventHandler(mcsEventChan chan<- event.TypedGenericEvent[*networkingv1alpha1.MultiClusterService], client client.Client) handler.TypedEventHandler[*corev1.Service, reconcile.Request] {
76 | return &serviceEventHandler{
77 | mcsEventChan: mcsEventChan,
78 | client: client,
79 | }
80 | }
81 |
82 | var _ handler.TypedEventHandler[*corev1.Service, reconcile.Request] = (*serviceEventHandler)(nil)
83 |
84 | type serviceEventHandler struct {
85 | mcsEventChan chan<- event.TypedGenericEvent[*networkingv1alpha1.MultiClusterService]
86 | client client.Client
87 | }
88 |
89 | func (h *serviceEventHandler) Create(_ context.Context, e event.TypedCreateEvent[*corev1.Service], _ workqueue.TypedRateLimitingInterface[reconcile.Request]) {
90 | h.enqueueImpactedMCS(e.Object.GetNamespace(), e.Object.GetName())
91 | }
92 |
93 | func (h *serviceEventHandler) Update(_ context.Context, e event.TypedUpdateEvent[*corev1.Service], _ workqueue.TypedRateLimitingInterface[reconcile.Request]) {
94 | svcOld := e.ObjectOld
95 | svcNew := e.ObjectNew
96 |
97 | // We only care about the update events below:
98 | if equality.Semantic.DeepEqual(svcOld.Annotations, svcNew.Annotations) &&
99 | equality.Semantic.DeepEqual(svcOld.Spec, svcNew.Spec) {
100 | return
101 | }
102 |
103 | h.enqueueImpactedMCS(svcNew.Namespace, svcNew.Name)
104 | }
105 |
106 | func (h *serviceEventHandler) Delete(_ context.Context, e event.TypedDeleteEvent[*corev1.Service], _ workqueue.TypedRateLimitingInterface[reconcile.Request]) {
107 | h.enqueueImpactedMCS(e.Object.GetNamespace(), e.Object.GetName())
108 | }
109 |
110 | func (h *serviceEventHandler) Generic(_ context.Context, e event.TypedGenericEvent[*corev1.Service], _ workqueue.TypedRateLimitingInterface[reconcile.Request]) {
111 | h.enqueueImpactedMCS(e.Object.GetNamespace(), e.Object.GetName())
112 | }
113 |
114 | func (h *serviceEventHandler) enqueueImpactedMCS(svcNamespace, svcName string) {
115 | h.mcsEventChan <- event.TypedGenericEvent[*networkingv1alpha1.MultiClusterService]{
116 | Object: &networkingv1alpha1.MultiClusterService{
117 | ObjectMeta: metav1.ObjectMeta{
118 | Namespace: svcNamespace,
119 | Name: svcName,
120 | }}}
121 | }
122 |
123 | // endpointSlicePrefix is the prefix of service derived from ServiceImport.
124 | const derivedServicePrefix = "derived-"
125 |
126 | func newEndpointSlicesEventHandler(svcEventChan chan<- event.TypedGenericEvent[*corev1.Service]) handler.TypedEventHandler[*discoveryv1.EndpointSlice, reconcile.Request] {
127 | return &endpointSlicesEventHandler{
128 | svcEventChan: svcEventChan,
129 | }
130 | }
131 |
132 | var _ handler.TypedEventHandler[*discoveryv1.EndpointSlice, reconcile.Request] = (*endpointSlicesEventHandler)(nil)
133 |
134 | type endpointSlicesEventHandler struct {
135 | svcEventChan chan<- event.TypedGenericEvent[*corev1.Service]
136 | }
137 |
138 | func (h *endpointSlicesEventHandler) Create(_ context.Context, e event.TypedCreateEvent[*discoveryv1.EndpointSlice], _ workqueue.TypedRateLimitingInterface[reconcile.Request]) {
139 | h.enqueueImpactedSvc(e.Object)
140 | }
141 |
142 | func (h *endpointSlicesEventHandler) Update(_ context.Context, e event.TypedUpdateEvent[*discoveryv1.EndpointSlice], _ workqueue.TypedRateLimitingInterface[reconcile.Request]) {
143 | h.enqueueImpactedSvc(e.ObjectNew)
144 | }
145 |
146 | func (h *endpointSlicesEventHandler) Delete(_ context.Context, e event.TypedDeleteEvent[*discoveryv1.EndpointSlice], _ workqueue.TypedRateLimitingInterface[reconcile.Request]) {
147 | h.enqueueImpactedSvc(e.Object)
148 | }
149 |
150 | func (h *endpointSlicesEventHandler) Generic(_ context.Context, _ event.TypedGenericEvent[*discoveryv1.EndpointSlice], _ workqueue.TypedRateLimitingInterface[reconcile.Request]) {
151 | }
152 |
153 | func (h *endpointSlicesEventHandler) enqueueImpactedSvc(obj client.Object) {
154 | svcName, ok := obj.GetLabels()[discoveryv1.LabelServiceName]
155 | if !ok {
156 | klog.Warningf("Can not get the key(%s) with the endpointSlices object(%s/%s)",
157 | discoveryv1.LabelServiceName, obj.GetNamespace(), obj.GetName())
158 | return
159 | }
160 |
161 | h.svcEventChan <- event.TypedGenericEvent[*corev1.Service]{
162 | Object: &corev1.Service{
163 | ObjectMeta: metav1.ObjectMeta{
164 | Namespace: obj.GetNamespace(),
165 | Name: strings.TrimPrefix(svcName, derivedServicePrefix),
166 | }}}
167 | }
168 |
--------------------------------------------------------------------------------
/pkg/controllers/multiclusterservice/mcs_controller.go:
--------------------------------------------------------------------------------
1 | package multiclusterservice
2 |
3 | import (
4 | "context"
5 | "reflect"
6 |
7 | networkingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/networking/v1alpha1"
8 | "github.com/karmada-io/karmada/pkg/sharedcli/ratelimiterflag"
9 | "github.com/karmada-io/karmada/pkg/util/fedinformer/genericmanager"
10 | corev1 "k8s.io/api/core/v1"
11 | discoveryv1 "k8s.io/api/discovery/v1"
12 | apierrors "k8s.io/apimachinery/pkg/api/errors"
13 | "k8s.io/client-go/tools/record"
14 | "k8s.io/client-go/util/retry"
15 | "k8s.io/klog/v2"
16 | controllerruntime "sigs.k8s.io/controller-runtime"
17 | "sigs.k8s.io/controller-runtime/pkg/client"
18 | "sigs.k8s.io/controller-runtime/pkg/controller"
19 | "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
20 | "sigs.k8s.io/controller-runtime/pkg/event"
21 | "sigs.k8s.io/controller-runtime/pkg/source"
22 |
23 | multiclusterprovider "github.com/karmada-io/multicluster-cloud-provider"
24 | "github.com/karmada-io/multicluster-cloud-provider/pkg/util"
25 | )
26 |
27 | // ControllerName is the controller name that will be used when reporting events.
28 | const ControllerName = "multiclusterservice-controller"
29 |
30 | // MCSControllerFinalizer is added to Cluster to ensure MCSLoadBalancer is deleted before itself is deleted.
31 | const MCSControllerFinalizer = "karmada.io/multi-cluster-service-controller"
32 |
33 | // LBEventReason is indicates the reason of LB event.
34 | const LBEventReason string = "MCSLoadBalance"
35 |
36 | // MCSController is to sync MultiClusterService.
37 | type MCSController struct {
38 | client.Client
39 | InformerManager genericmanager.SingleClusterInformerManager
40 | MCSLoadBalancer multiclusterprovider.MCSLoadBalancer
41 | EventRecorder record.EventRecorder
42 | RateLimiterOptions ratelimiterflag.Options
43 | }
44 |
45 | // Reconcile performs a full reconciliation for the object referred to by the Request.
46 | // The Controller will requeue the Request to be processed again if an error is non-nil or
47 | // Result.Requeue is true, otherwise upon completion it will remove the work from the queue.
48 | func (c *MCSController) Reconcile(
49 | ctx context.Context,
50 | req controllerruntime.Request,
51 | ) (controllerruntime.Result, error) {
52 | klog.V(4).InfoS("Reconciling MultiClusterService",
53 | "namespace", req.Namespace, "name", req.Name)
54 |
55 | mcs := &networkingv1alpha1.MultiClusterService{}
56 | if err := c.Client.Get(ctx, req.NamespacedName, mcs); err != nil {
57 | if apierrors.IsNotFound(err) {
58 | // The mcs no longer exist, in which case we stop processing.
59 | return controllerruntime.Result{}, nil
60 | }
61 | klog.ErrorS(err, "failed to get MultiClusterService object",
62 | "NamespacedName", req.NamespacedName)
63 | return controllerruntime.Result{}, err
64 | }
65 |
66 | if !mcs.DeletionTimestamp.IsZero() || !util.MCSContainLoadBalanceType(mcs) {
67 | return c.handleMCSDelete(ctx, mcs.DeepCopy())
68 | }
69 | return c.handleMCSCreateOrUpdate(ctx, mcs.DeepCopy())
70 | }
71 |
72 | func (c *MCSController) handleMCSDelete(
73 | ctx context.Context,
74 | mcs *networkingv1alpha1.MultiClusterService,
75 | ) (controllerruntime.Result, error) {
76 | klog.V(4).InfoS("Begin to handle MultiClusterService delete event",
77 | "namespace", mcs.Namespace, "name", mcs.Name)
78 |
79 | if util.MCSContainLoadBalanceType(mcs) {
80 | err := c.MCSLoadBalancer.EnsureMCSLoadBalancerDeleted(ctx, mcs)
81 | if err != nil {
82 | klog.ErrorS(err, "Failed to delete MultiClusterService",
83 | "namespace", mcs.Namespace, "name", mcs.Name)
84 | c.EventRecorder.Eventf(mcs, corev1.EventTypeWarning, LBEventReason,
85 | "Failed to delete MultiClusterService",
86 | "namespace", mcs.Namespace, "name", mcs.Name, "error", err.Error())
87 | return controllerruntime.Result{}, err
88 | }
89 | } else {
90 | _, exist, err := c.MCSLoadBalancer.GetMCSLoadBalancer(ctx, mcs)
91 | if err != nil {
92 | klog.ErrorS(err, "Failed to get MultiClusterService",
93 | "namespace", mcs.Namespace, "name", mcs.Name)
94 | c.EventRecorder.Eventf(mcs, corev1.EventTypeWarning, LBEventReason,
95 | "Failed to get MultiClusterService",
96 | "namespace", mcs.Namespace, "name", mcs.Name, "error", err.Error())
97 | return controllerruntime.Result{}, err
98 | }
99 |
100 | if exist {
101 | err := c.MCSLoadBalancer.EnsureMCSLoadBalancerDeleted(ctx, mcs)
102 | if err != nil {
103 | klog.ErrorS(err, "Failed to delete MultiClusterService",
104 | "namespace", mcs.Namespace, "name", mcs.Name)
105 | c.EventRecorder.Eventf(mcs, corev1.EventTypeWarning, LBEventReason,
106 | "Failed to delete MultiClusterService",
107 | "namespace", mcs.Namespace, "name", mcs.Name, "error", err.Error())
108 | return controllerruntime.Result{}, err
109 | }
110 | } else {
111 | klog.V(4).InfoS("MultiClusterService doesn't contain LoadBalancer type, "+
112 | "ignore it", "namespace", mcs.Namespace, "name", mcs.Name)
113 | return controllerruntime.Result{}, nil
114 | }
115 | }
116 |
117 | klog.InfoS("Success to delete MultiClusterService",
118 | "namespace", mcs.Namespace, "name", mcs.Name)
119 |
120 | finalizersUpdated := controllerutil.RemoveFinalizer(mcs, MCSControllerFinalizer)
121 | if finalizersUpdated {
122 | err := c.Client.Update(ctx, mcs)
123 | if err != nil {
124 | klog.V(4).ErrorS(err, "failed to update MultiClusterService with finalizer",
125 | "namespace", mcs.Namespace, "name", mcs.Name)
126 | return controllerruntime.Result{}, err
127 | }
128 | }
129 | return controllerruntime.Result{}, nil
130 | }
131 |
132 | func (c *MCSController) handleMCSCreateOrUpdate(
133 | ctx context.Context,
134 | mcs *networkingv1alpha1.MultiClusterService,
135 | ) (controllerruntime.Result, error) {
136 | klog.V(4).InfoS("Begin to handle MultiClusterService create or update event",
137 | "namespace", mcs.Namespace, "name", mcs.Name)
138 |
139 | finalizersUpdated := controllerutil.AddFinalizer(mcs, MCSControllerFinalizer)
140 | if finalizersUpdated {
141 | err := c.Client.Update(ctx, mcs)
142 | if err != nil {
143 | klog.V(4).ErrorS(err, "failed to update mcs with finalizer",
144 | "namespace", mcs.Namespace, "name", mcs.Name)
145 | return controllerruntime.Result{}, err
146 | }
147 | }
148 |
149 | _, exist, err := c.MCSLoadBalancer.GetMCSLoadBalancer(ctx, mcs)
150 | if err != nil {
151 | klog.ErrorS(err, "failed to get loadBalancer with provider",
152 | "namespace", mcs.Namespace, "name", mcs.Name)
153 | return controllerruntime.Result{}, err
154 | }
155 | if exist {
156 | return c.handleMCSUpdate(ctx, mcs)
157 | }
158 | return c.handleMCSCreate(ctx, mcs)
159 | }
160 |
161 | func (c *MCSController) handleMCSCreate(
162 | ctx context.Context,
163 | mcs *networkingv1alpha1.MultiClusterService,
164 | ) (controllerruntime.Result, error) {
165 | klog.V(4).InfoS("Begin to handle MultiClusterService create event",
166 | "namespace", mcs.Namespace, "name", mcs.Name)
167 |
168 | status, err := c.MCSLoadBalancer.EnsureMCSLoadBalancer(ctx, mcs)
169 | if err != nil {
170 | klog.ErrorS(err, "failed to create loadBalancer with provider",
171 | "namespace", mcs.Namespace, "name", mcs.Name)
172 | return controllerruntime.Result{}, err
173 | }
174 | klog.InfoS("Success to create loadBalancer with provider",
175 | "namespace", mcs.Namespace, "name", mcs.Name)
176 |
177 | err = c.updateMCSStatus(ctx, mcs, status)
178 | if err != nil {
179 | klog.ErrorS(err, "failed to update MultiClusterService status",
180 | "namespace", mcs.Namespace, "name", mcs.Name)
181 | return controllerruntime.Result{}, err
182 | }
183 | return controllerruntime.Result{}, nil
184 | }
185 |
186 | func (c *MCSController) handleMCSUpdate(
187 | ctx context.Context,
188 | mcs *networkingv1alpha1.MultiClusterService,
189 | ) (controllerruntime.Result, error) {
190 | klog.V(4).InfoS("Begin to handle MultiClusterService update event",
191 | "namespace", mcs.Namespace, "name", mcs.Name)
192 |
193 | status, err := c.MCSLoadBalancer.UpdateMCSLoadBalancer(ctx, mcs)
194 | if err != nil {
195 | klog.ErrorS(err, "failed to update loadBalancer with provider",
196 | "namespace", mcs.Namespace, "name", mcs.Name)
197 | return controllerruntime.Result{}, err
198 | }
199 | klog.InfoS("Success to update loadBalancer with provider",
200 | "namespace", mcs.Namespace, "name", mcs.Name)
201 |
202 | err = c.updateMCSStatus(ctx, mcs, status)
203 | if err != nil {
204 | klog.ErrorS(err, "failed to update MultiClusterService status",
205 | "namespace", mcs.Namespace, "name", mcs.Name)
206 | return controllerruntime.Result{}, err
207 | }
208 | return controllerruntime.Result{}, nil
209 | }
210 |
211 | func (c *MCSController) updateMCSStatus(
212 | ctx context.Context,
213 | mcs *networkingv1alpha1.MultiClusterService,
214 | newStatus *corev1.LoadBalancerStatus,
215 | ) error {
216 | if newStatus == nil {
217 | return nil
218 | }
219 |
220 | if reflect.DeepEqual(mcs.Status.LoadBalancer, *newStatus) {
221 | return nil
222 | }
223 |
224 | return retry.RetryOnConflict(retry.DefaultRetry, func() (err error) {
225 | mcs.Status.LoadBalancer = *newStatus
226 | updateErr := c.Status().Update(ctx, mcs)
227 | if updateErr == nil {
228 | return nil
229 | }
230 |
231 | updated := &networkingv1alpha1.MultiClusterService{}
232 | if err = c.Get(ctx, client.ObjectKey{Namespace: mcs.Namespace, Name: mcs.Name}, updated); err == nil {
233 | mcs = updated
234 | } else {
235 | klog.Errorf("Failed to get updated MultiClusterService(%s/%s): %v", mcs.Namespace, mcs.Name, err)
236 | }
237 | return updateErr
238 | })
239 | }
240 |
241 | // SetupWithManager creates a controller and register to controller manager.
242 | func (c *MCSController) SetupWithManager(_ context.Context, mgr controllerruntime.Manager) error {
243 | mcsController, err := controller.New(ControllerName, mgr,
244 | controller.Options{
245 | Reconciler: c,
246 | RateLimiter: ratelimiterflag.DefaultControllerRateLimiter[controllerruntime.Request](c.RateLimiterOptions),
247 | })
248 | if err != nil {
249 | return err
250 | }
251 |
252 | if err = c.setupWatches(mcsController, mgr); err != nil {
253 | return err
254 | }
255 |
256 | return nil
257 | }
258 |
259 | func (c *MCSController) setupWatches(mcsController controller.Controller, mgr controllerruntime.Manager) error {
260 | mcsEventChan := make(chan event.TypedGenericEvent[*networkingv1alpha1.MultiClusterService])
261 | svcEventChan := make(chan event.TypedGenericEvent[*corev1.Service])
262 |
263 | mcsEventHandler := newMultiClusterServiceEventHandler()
264 | svcEventHandler := newServiceEventHandler(mcsEventChan, c.Client)
265 | epsEventHandler := newEndpointSlicesEventHandler(svcEventChan)
266 |
267 | if err := mcsController.Watch(source.Kind[*networkingv1alpha1.MultiClusterService](mgr.GetCache(), &networkingv1alpha1.MultiClusterService{}, mcsEventHandler)); err != nil {
268 | return err
269 | }
270 | if err := mcsController.Watch(source.Channel[*networkingv1alpha1.MultiClusterService](mcsEventChan, mcsEventHandler)); err != nil {
271 | return err
272 | }
273 | if err := mcsController.Watch(source.Kind[*corev1.Service](mgr.GetCache(), &corev1.Service{}, svcEventHandler)); err != nil {
274 | return err
275 | }
276 | if err := mcsController.Watch(source.Channel[*corev1.Service](svcEventChan, svcEventHandler)); err != nil {
277 | return err
278 | }
279 | if err := mcsController.Watch(source.Kind[*discoveryv1.EndpointSlice](mgr.GetCache(), &discoveryv1.EndpointSlice{}, epsEventHandler)); err != nil {
280 | return err
281 | }
282 | return nil
283 | }
284 |
--------------------------------------------------------------------------------
/pkg/controllers/serviceexportpropagation/eventhandlers.go:
--------------------------------------------------------------------------------
1 | package serviceexportpropagation
2 |
3 | import (
4 | "context"
5 |
6 | networkingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/networking/v1alpha1"
7 | workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
8 | corev1 "k8s.io/api/core/v1"
9 | "k8s.io/apimachinery/pkg/api/equality"
10 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
11 | "k8s.io/apimachinery/pkg/types"
12 | "k8s.io/client-go/util/workqueue"
13 | "k8s.io/klog/v2"
14 | "k8s.io/utils/strings/slices"
15 | "sigs.k8s.io/controller-runtime/pkg/client"
16 | "sigs.k8s.io/controller-runtime/pkg/event"
17 | "sigs.k8s.io/controller-runtime/pkg/handler"
18 | "sigs.k8s.io/controller-runtime/pkg/reconcile"
19 |
20 | "github.com/karmada-io/multicluster-cloud-provider/pkg/controllers/indexes"
21 | "github.com/karmada-io/multicluster-cloud-provider/pkg/util"
22 | )
23 |
24 | func newServiceEventHandler(ctx context.Context, client client.Client) handler.TypedEventHandler[*corev1.Service, reconcile.Request] {
25 | return &serviceEventHandler{
26 | ctx: ctx,
27 | client: client,
28 | }
29 | }
30 |
31 | var _ handler.TypedEventHandler[*corev1.Service, reconcile.Request] = (*serviceEventHandler)(nil)
32 |
33 | type serviceEventHandler struct {
34 | ctx context.Context
35 | client client.Client
36 | }
37 |
38 | func (h *serviceEventHandler) Create(_ context.Context, e event.TypedCreateEvent[*corev1.Service], queue workqueue.TypedRateLimitingInterface[reconcile.Request]) {
39 | mciList := &networkingv1alpha1.MultiClusterIngressList{}
40 | if err := h.client.List(h.ctx, mciList,
41 | client.InNamespace(e.Object.GetNamespace()),
42 | client.MatchingFields{indexes.IndexKeyServiceRefName: e.Object.GetName()}); err != nil {
43 | klog.Errorf("failed to fetch multiclusteringresses")
44 | return
45 | }
46 |
47 | if len(mciList.Items) > 0 {
48 | queue.Add(reconcile.Request{
49 | NamespacedName: types.NamespacedName{
50 | Namespace: e.Object.GetNamespace(),
51 | Name: e.Object.GetName(),
52 | }})
53 | return
54 | }
55 |
56 | mcs := &networkingv1alpha1.MultiClusterService{}
57 | if err := h.client.Get(h.ctx, types.NamespacedName{
58 | Namespace: e.Object.GetNamespace(),
59 | Name: e.Object.GetName(),
60 | }, mcs); err != nil {
61 | return
62 | }
63 |
64 | queue.Add(reconcile.Request{
65 | NamespacedName: types.NamespacedName{
66 | Namespace: e.Object.GetNamespace(),
67 | Name: e.Object.GetName(),
68 | }})
69 | }
70 |
71 | func (h *serviceEventHandler) Update(_ context.Context, _ event.TypedUpdateEvent[*corev1.Service], _ workqueue.TypedRateLimitingInterface[reconcile.Request]) {
72 | // We only need to create ServiceExport based on the service and propagate it to
73 | // member clusters. Therefore, we do not need to pay attention to service update.
74 | }
75 |
76 | func (h *serviceEventHandler) Delete(_ context.Context, _ event.TypedDeleteEvent[*corev1.Service], _ workqueue.TypedRateLimitingInterface[reconcile.Request]) {
77 | // We will add an ownerReference to the service object on the ServiceExport
78 | // object, so that cleanup will be handled by gc controller.
79 | }
80 |
81 | func (h *serviceEventHandler) Generic(_ context.Context, e event.TypedGenericEvent[*corev1.Service], queue workqueue.TypedRateLimitingInterface[reconcile.Request]) {
82 | queue.Add(reconcile.Request{
83 | NamespacedName: types.NamespacedName{
84 | Namespace: e.Object.GetNamespace(),
85 | Name: e.Object.GetName(),
86 | }})
87 | }
88 |
89 | func newMultiClusterIngressEventHandler(ctx context.Context, client client.Client, svcEventChan chan<- event.TypedGenericEvent[*corev1.Service], providerClassName string) handler.TypedEventHandler[*networkingv1alpha1.MultiClusterIngress, reconcile.Request] {
90 | return &multiClusterIngressEventHandler{
91 | ctx: ctx,
92 | client: client,
93 | svcEventChan: svcEventChan,
94 | ingClassName: providerClassName,
95 | }
96 | }
97 |
98 | var _ handler.TypedEventHandler[*networkingv1alpha1.MultiClusterIngress, reconcile.Request] = (*multiClusterIngressEventHandler)(nil)
99 |
100 | type multiClusterIngressEventHandler struct {
101 | ctx context.Context
102 | client client.Client
103 | svcEventChan chan<- event.TypedGenericEvent[*corev1.Service]
104 | ingClassName string
105 | }
106 |
107 | func (h *multiClusterIngressEventHandler) Create(_ context.Context, e event.TypedCreateEvent[*networkingv1alpha1.MultiClusterIngress], _ workqueue.TypedRateLimitingInterface[reconcile.Request]) {
108 | if !util.CheckIngressClassMatched(h.ctx, h.client, e.Object, h.ingClassName) {
109 | return
110 | }
111 | h.enqueueImpactedService(e.Object)
112 | }
113 |
114 | func (h *multiClusterIngressEventHandler) Update(_ context.Context, e event.TypedUpdateEvent[*networkingv1alpha1.MultiClusterIngress], _ workqueue.TypedRateLimitingInterface[reconcile.Request]) {
115 | mciOld := e.ObjectOld
116 | mciNew := e.ObjectNew
117 | if !util.CheckIngressClassMatched(h.ctx, h.client, mciNew, h.ingClassName) {
118 | return
119 | }
120 |
121 | // We only care about the service update:
122 | oldSvcRefs := indexes.BuildServiceRefIndexes(mciOld)
123 | newSvcRefs := indexes.BuildServiceRefIndexes(mciNew)
124 | if equality.Semantic.DeepEqual(oldSvcRefs, newSvcRefs) {
125 | return
126 | }
127 |
128 | var targetRefs []string
129 | for _, svc := range oldSvcRefs {
130 | if !slices.Contains(newSvcRefs, svc) {
131 | targetRefs = append(targetRefs, svc)
132 | }
133 | }
134 | for _, svc := range newSvcRefs {
135 | if !slices.Contains(oldSvcRefs, svc) {
136 | targetRefs = append(targetRefs, svc)
137 | }
138 | }
139 |
140 | for _, svc := range targetRefs {
141 | h.svcEventChan <- event.TypedGenericEvent[*corev1.Service]{
142 | Object: &corev1.Service{
143 | ObjectMeta: metav1.ObjectMeta{
144 | Namespace: mciNew.Namespace,
145 | Name: svc,
146 | }}}
147 | }
148 | }
149 |
150 | func (h *multiClusterIngressEventHandler) Delete(_ context.Context, e event.TypedDeleteEvent[*networkingv1alpha1.MultiClusterIngress], _ workqueue.TypedRateLimitingInterface[reconcile.Request]) {
151 | if !util.CheckIngressClassMatched(h.ctx, h.client, e.Object, h.ingClassName) {
152 | return
153 | }
154 | h.enqueueImpactedService(e.Object)
155 | }
156 |
157 | func (h *multiClusterIngressEventHandler) Generic(_ context.Context, _ event.TypedGenericEvent[*networkingv1alpha1.MultiClusterIngress], _ workqueue.TypedRateLimitingInterface[reconcile.Request]) {
158 | }
159 |
160 | func (h *multiClusterIngressEventHandler) enqueueImpactedService(mci *networkingv1alpha1.MultiClusterIngress) {
161 | svcRefs := indexes.BuildServiceRefIndexes(mci)
162 | for _, svc := range svcRefs {
163 | h.svcEventChan <- event.TypedGenericEvent[*corev1.Service]{
164 | Object: &corev1.Service{
165 | ObjectMeta: metav1.ObjectMeta{
166 | Namespace: mci.Namespace,
167 | Name: svc,
168 | }}}
169 | }
170 | }
171 |
172 | func newMultiClusterServiceEventHandler(
173 | ctx context.Context,
174 | client client.Client,
175 | svcEventChan chan<- event.TypedGenericEvent[*corev1.Service],
176 | ) handler.TypedEventHandler[*networkingv1alpha1.MultiClusterService, reconcile.Request] {
177 | return &multiClusterServiceEventHandler{
178 | ctx: ctx,
179 | client: client,
180 | svcEventChan: svcEventChan,
181 | }
182 | }
183 |
184 | var _ handler.TypedEventHandler[*networkingv1alpha1.MultiClusterService, reconcile.Request] = (*multiClusterServiceEventHandler)(nil)
185 |
186 | type multiClusterServiceEventHandler struct {
187 | ctx context.Context
188 | client client.Client
189 | svcEventChan chan<- event.TypedGenericEvent[*corev1.Service]
190 | }
191 |
192 | func (h *multiClusterServiceEventHandler) Create(_ context.Context, e event.TypedCreateEvent[*networkingv1alpha1.MultiClusterService], _ workqueue.TypedRateLimitingInterface[reconcile.Request]) {
193 | h.enqueueImpactedService(e.Object.GetNamespace(), e.Object.GetName())
194 | }
195 |
196 | func (h *multiClusterServiceEventHandler) Update(_ context.Context, e event.TypedUpdateEvent[*networkingv1alpha1.MultiClusterService], _ workqueue.TypedRateLimitingInterface[reconcile.Request]) {
197 | mcsOld := e.ObjectOld
198 | mcsNew := e.ObjectNew
199 |
200 | // Only care about the update events below:
201 | if equality.Semantic.DeepEqual(mcsOld.Annotations, mcsNew.Annotations) &&
202 | equality.Semantic.DeepEqual(mcsOld.Spec.Types, mcsNew.Spec.Types) &&
203 | equality.Semantic.DeepEqual(mcsOld.Spec.Ports, mcsNew.Spec.Ports) {
204 | return
205 | }
206 |
207 | h.enqueueImpactedService(mcsNew.Namespace, mcsNew.Name)
208 | }
209 |
210 | func (h *multiClusterServiceEventHandler) Delete(_ context.Context, e event.TypedDeleteEvent[*networkingv1alpha1.MultiClusterService], _ workqueue.TypedRateLimitingInterface[reconcile.Request]) {
211 | h.enqueueImpactedService(e.Object.GetNamespace(), e.Object.GetName())
212 | }
213 |
214 | func (h *multiClusterServiceEventHandler) Generic(_ context.Context, _ event.TypedGenericEvent[*networkingv1alpha1.MultiClusterService], _ workqueue.TypedRateLimitingInterface[reconcile.Request]) {
215 | }
216 |
217 | func (h *multiClusterServiceEventHandler) enqueueImpactedService(namespace, name string) {
218 | h.svcEventChan <- event.TypedGenericEvent[*corev1.Service]{
219 | Object: &corev1.Service{
220 | ObjectMeta: metav1.ObjectMeta{
221 | Namespace: namespace,
222 | Name: name,
223 | }}}
224 | }
225 |
226 | func newResourceBindingEventHandler(svcEventChan chan<- event.TypedGenericEvent[*corev1.Service]) handler.TypedEventHandler[*workv1alpha1.ResourceBinding, reconcile.Request] {
227 | return &resourceBindingEventHandler{
228 | svcEventChan: svcEventChan,
229 | }
230 | }
231 |
232 | var _ handler.TypedEventHandler[*workv1alpha1.ResourceBinding, reconcile.Request] = (*resourceBindingEventHandler)(nil)
233 |
234 | type resourceBindingEventHandler struct {
235 | svcEventChan chan<- event.TypedGenericEvent[*corev1.Service]
236 | }
237 |
238 | func (h *resourceBindingEventHandler) Create(_ context.Context, e event.TypedCreateEvent[*workv1alpha1.ResourceBinding], _ workqueue.TypedRateLimitingInterface[reconcile.Request]) {
239 | // The distribution feature involves directly creating rb objects,
240 | // so it is necessary to care about the rb creation event.
241 | if e.Object.Spec.Resource.Kind != "Service" {
242 | return
243 | }
244 | h.svcEventChan <- event.TypedGenericEvent[*corev1.Service]{
245 | Object: &corev1.Service{
246 | ObjectMeta: metav1.ObjectMeta{
247 | Namespace: e.Object.Spec.Resource.Namespace,
248 | Name: e.Object.Spec.Resource.Name,
249 | }}}
250 | }
251 |
252 | func (h *resourceBindingEventHandler) Update(_ context.Context, e event.TypedUpdateEvent[*workv1alpha1.ResourceBinding], _ workqueue.TypedRateLimitingInterface[reconcile.Request]) {
253 | rbOlb := e.ObjectOld
254 | rbNew := e.ObjectNew
255 |
256 | resource := rbNew.Spec.Resource
257 | if resource.Kind != "Service" {
258 | return
259 | }
260 |
261 | scheduleResultOld := getClusterResults(rbOlb.Spec)
262 | scheduleResultNew := getClusterResults(rbNew.Spec)
263 | if equality.Semantic.DeepEqual(scheduleResultOld, scheduleResultNew) {
264 | return
265 | }
266 |
267 | h.svcEventChan <- event.TypedGenericEvent[*corev1.Service]{
268 | Object: &corev1.Service{
269 | ObjectMeta: metav1.ObjectMeta{
270 | Namespace: resource.Namespace,
271 | Name: resource.Name,
272 | }}}
273 | }
274 |
275 | func (h *resourceBindingEventHandler) Delete(_ context.Context, _ event.TypedDeleteEvent[*workv1alpha1.ResourceBinding], _ workqueue.TypedRateLimitingInterface[reconcile.Request]) {
276 | // The deletion event of the resourceBinding will be
277 | // processed by the deletion event of service.
278 | }
279 |
280 | func (h *resourceBindingEventHandler) Generic(_ context.Context, _ event.TypedGenericEvent[*workv1alpha1.ResourceBinding], _ workqueue.TypedRateLimitingInterface[reconcile.Request]) {
281 | }
282 |
--------------------------------------------------------------------------------
/pkg/controllers/serviceexportpropagation/serviceexport_propagation_controller.go:
--------------------------------------------------------------------------------
1 | package serviceexportpropagation
2 |
3 | import (
4 | "context"
5 | "fmt"
6 |
7 | networkingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/networking/v1alpha1"
8 | policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1"
9 | workv1alpha1 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
10 | "github.com/karmada-io/karmada/pkg/sharedcli/ratelimiterflag"
11 | corev1 "k8s.io/api/core/v1"
12 | "k8s.io/apimachinery/pkg/api/equality"
13 | apierrors "k8s.io/apimachinery/pkg/api/errors"
14 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
15 | "k8s.io/apimachinery/pkg/types"
16 | "k8s.io/apimachinery/pkg/util/sets"
17 | "k8s.io/client-go/tools/record"
18 | "k8s.io/klog/v2"
19 | controllerruntime "sigs.k8s.io/controller-runtime"
20 | "sigs.k8s.io/controller-runtime/pkg/client"
21 | "sigs.k8s.io/controller-runtime/pkg/controller"
22 | "sigs.k8s.io/controller-runtime/pkg/event"
23 | "sigs.k8s.io/controller-runtime/pkg/source"
24 | mcsv1alpha1 "sigs.k8s.io/mcs-api/pkg/apis/v1alpha1"
25 |
26 | "github.com/karmada-io/multicluster-cloud-provider/pkg/controllers/indexes"
27 | "github.com/karmada-io/multicluster-cloud-provider/pkg/util"
28 | )
29 |
30 | // ControllerName it the controller name that will be used when reporting events.
31 | const ControllerName = "serviceexport-propagation-controller"
32 |
33 | const propagationPolicyPrefix = "propagate-service-"
34 |
35 | // Controller will propagate ServiceExport resource into member clusters.
36 | type Controller struct {
37 | client.Client
38 | EventRecorder record.EventRecorder
39 | RateLimiterOptions ratelimiterflag.Options
40 | ProviderClassName string
41 | }
42 |
43 | // Reconcile performs a full reconciliation for the Service object and
44 | // propagates ServiceExport resource into member clusters.
45 | func (c *Controller) Reconcile(ctx context.Context, req controllerruntime.Request) (controllerruntime.Result, error) {
46 | klog.V(4).InfoS("Reconciling Service", "namespace", req.Namespace, "name", req.Name)
47 |
48 | svc := &corev1.Service{}
49 | if err := c.Client.Get(ctx, req.NamespacedName, svc); err != nil {
50 | if apierrors.IsNotFound(err) {
51 | return controllerruntime.Result{}, nil
52 | }
53 | return controllerruntime.Result{}, err
54 | }
55 |
56 | if !svc.DeletionTimestamp.IsZero() {
57 | return controllerruntime.Result{}, nil
58 | }
59 |
60 | needEnsure, err := c.needEnsureServiceExport(ctx, svc.Namespace, svc.Name)
61 | if err != nil {
62 | return controllerruntime.Result{}, err
63 | }
64 |
65 | if needEnsure {
66 | return c.ensureServiceExport(ctx, svc)
67 | }
68 | return c.removeServiceExport(ctx, svc)
69 | }
70 |
71 | func (c *Controller) needEnsureServiceExport(ctx context.Context, svcNamespace, svcName string) (bool, error) {
72 | mciList := &networkingv1alpha1.MultiClusterIngressList{}
73 | if err := c.Client.List(ctx, mciList,
74 | client.InNamespace(svcNamespace),
75 | client.MatchingFields{indexes.IndexKeyServiceRefName: svcName}); err != nil {
76 | klog.Errorf("failed to fetch multiclusteringresses")
77 | return false, err
78 | }
79 |
80 | if len(mciList.Items) > 0 {
81 | return true, nil
82 | }
83 |
84 | mcs := &networkingv1alpha1.MultiClusterService{}
85 | if err := c.Client.Get(ctx, types.NamespacedName{Namespace: svcNamespace, Name: svcName}, mcs); err != nil {
86 | if apierrors.IsNotFound(err) {
87 | return false, nil
88 | }
89 | klog.Errorf("failed to get multiclusterservice")
90 | return false, err
91 | }
92 | return util.MCSContainLoadBalanceType(mcs), nil
93 | }
94 |
95 | func (c *Controller) ensureServiceExport(ctx context.Context, svc *corev1.Service) (controllerruntime.Result, error) {
96 | // 1. make sure serviceExport exist
97 | svcExport := &mcsv1alpha1.ServiceExport{}
98 | err := c.Client.Get(ctx, types.NamespacedName{Namespace: svc.Namespace, Name: svc.Name}, svcExport)
99 | if err != nil && !apierrors.IsNotFound(err) {
100 | klog.ErrorS(err, "failed to get serviceExport", "namespace", svc.Namespace, "name", svc.Name)
101 | return controllerruntime.Result{}, err
102 | }
103 |
104 | // 2. if serviceExport not exist, just create it
105 | if apierrors.IsNotFound(err) {
106 | svcExport = createServiceExportTemplate(svc)
107 | err = c.Client.Create(ctx, svcExport)
108 | if err != nil {
109 | klog.ErrorS(err, "failed to create serviceExport", "namespace", svc.Namespace, "name", svc.Name)
110 | return controllerruntime.Result{}, err
111 | }
112 | klog.V(4).InfoS("success to create serviceExport", "namespace", svc.Namespace, "name", svc.Name)
113 | }
114 |
115 | // 3. get service's scheduled result
116 | clusters, err := c.getBindingClusterResults(ctx, svc)
117 | if err != nil {
118 | return controllerruntime.Result{}, err
119 | }
120 | if len(clusters) == 0 {
121 | klog.V(4).InfoS("service's scheduled result length is zero", "namespace", svc.Namespace, "name", svc.Name)
122 | return controllerruntime.Result{}, nil
123 | }
124 | klog.V(4).InfoS("service's cluster results", "namespace", svc.Namespace, "name", svc.Name, "clusters", clusters)
125 |
126 | // 4. make sure propagationPolicy exist
127 | policyName := fmt.Sprintf("%s%s", propagationPolicyPrefix, svc.Name)
128 | policy := &policyv1alpha1.PropagationPolicy{}
129 | err = c.Client.Get(ctx, types.NamespacedName{Namespace: svc.Namespace, Name: policyName}, policy)
130 | if err != nil && !apierrors.IsNotFound(err) {
131 | klog.ErrorS(err, "failed to get policy", "namespace", svc.Namespace, "name", policyName)
132 | return controllerruntime.Result{}, err
133 | }
134 |
135 | // 5. if propagationPolicy not exist, just create it
136 | if apierrors.IsNotFound(err) {
137 | policy = createPropagationPolicyTemplate(policyName, svc, clusters)
138 | err = c.Client.Create(ctx, policy)
139 | if err != nil {
140 | klog.ErrorS(err, "failed to create policy", "namespace", svc.Namespace, "name", policyName)
141 | return controllerruntime.Result{}, err
142 | }
143 | klog.V(4).InfoS("success to create policy", "namespace", svc.Namespace, "name", policyName)
144 | return controllerruntime.Result{}, nil
145 | }
146 |
147 | // 6. propagationPolicy already exist, just update it
148 | if equality.Semantic.DeepEqual(policy.Spec.Placement.ClusterAffinity.ClusterNames, clusters) {
149 | klog.V(4).InfoS("there is no need to update policy", "namespace", svc.Namespace, "name", policyName)
150 | return controllerruntime.Result{}, nil
151 | }
152 | policyCopy := policy.DeepCopy()
153 | policyCopy.Spec.Placement.ClusterAffinity.ClusterNames = clusters
154 | err = c.Client.Update(ctx, policyCopy)
155 | if err != nil {
156 | klog.ErrorS(err, "failed to update policy", "namespace", svc.Namespace, "name", policyName)
157 | return controllerruntime.Result{}, err
158 | }
159 | klog.V(4).InfoS("success to update policy", "namespace", svc.Namespace, "name", policyName)
160 | return controllerruntime.Result{}, nil
161 | }
162 |
163 | func createServiceExportTemplate(svc *corev1.Service) *mcsv1alpha1.ServiceExport {
164 | return &mcsv1alpha1.ServiceExport{
165 | ObjectMeta: metav1.ObjectMeta{
166 | Namespace: svc.Namespace,
167 | Name: svc.Name,
168 | OwnerReferences: []metav1.OwnerReference{
169 | *metav1.NewControllerRef(svc, corev1.SchemeGroupVersion.WithKind("Service")),
170 | },
171 | },
172 | }
173 | }
174 |
175 | func createPropagationPolicyTemplate(policyName string, svc *corev1.Service, clusters []string) *policyv1alpha1.PropagationPolicy {
176 | return &policyv1alpha1.PropagationPolicy{
177 | ObjectMeta: metav1.ObjectMeta{
178 | Namespace: svc.Namespace,
179 | Name: policyName,
180 | OwnerReferences: []metav1.OwnerReference{
181 | *metav1.NewControllerRef(svc, corev1.SchemeGroupVersion.WithKind("Service")),
182 | },
183 | },
184 | Spec: policyv1alpha1.PropagationSpec{
185 | ResourceSelectors: []policyv1alpha1.ResourceSelector{{
186 | APIVersion: mcsv1alpha1.GroupVersion.String(),
187 | Kind: "ServiceExport",
188 | Name: svc.Name,
189 | }},
190 | Placement: policyv1alpha1.Placement{
191 | ClusterAffinity: &policyv1alpha1.ClusterAffinity{
192 | ClusterNames: clusters,
193 | }}},
194 | }
195 | }
196 |
197 | func (c *Controller) getBindingClusterResults(ctx context.Context, svc *corev1.Service) ([]string, error) {
198 | bindings := &workv1alpha1.ResourceBindingList{}
199 | err := c.Client.List(ctx, bindings, client.InNamespace(svc.Namespace))
200 | if err != nil {
201 | klog.ErrorS(err, "failed to list resourceBinding", "namespace", svc.Namespace)
202 | return nil, err
203 | }
204 |
205 | var targetBindingSpec workv1alpha1.ResourceBindingSpec
206 | for _, binding := range bindings.Items {
207 | resource := binding.Spec.Resource
208 | if resource.Kind == "Service" && resource.Name == svc.Name {
209 | targetBindingSpec = binding.Spec
210 | break
211 | }
212 | }
213 |
214 | return getClusterResults(targetBindingSpec), nil
215 | }
216 |
217 | func getClusterResults(rbSpec workv1alpha1.ResourceBindingSpec) []string {
218 | clusterResults := sets.NewString()
219 | for _, cluster := range rbSpec.Clusters {
220 | clusterResults.Insert(cluster.Name)
221 | }
222 | for _, requiredBy := range rbSpec.RequiredBy {
223 | for _, cluster := range requiredBy.Clusters {
224 | clusterResults.Insert(cluster.Name)
225 | }
226 | }
227 | return clusterResults.List()
228 | }
229 |
230 | func (c *Controller) removeServiceExport(ctx context.Context, svc *corev1.Service) (controllerruntime.Result, error) {
231 | err := c.deleteServiceExport(ctx, svc)
232 | if err != nil {
233 | return controllerruntime.Result{}, err
234 | }
235 |
236 | err = c.deletePropagationPolicy(ctx, svc)
237 | if err != nil {
238 | return controllerruntime.Result{}, err
239 | }
240 |
241 | return controllerruntime.Result{}, nil
242 | }
243 |
244 | func (c *Controller) deleteServiceExport(ctx context.Context, svc *corev1.Service) error {
245 | svcExport := &mcsv1alpha1.ServiceExport{}
246 | err := c.Client.Get(ctx, types.NamespacedName{Namespace: svc.Namespace, Name: svc.Name}, svcExport)
247 | if err != nil {
248 | if apierrors.IsNotFound(err) {
249 | return nil
250 | }
251 | klog.ErrorS(err, "failed to get serviceExport", "namespace", svc.Namespace, "name", svc.Name)
252 | return err
253 | }
254 |
255 | err = c.Client.Delete(ctx, svcExport)
256 | if err != nil && !apierrors.IsNotFound(err) {
257 | klog.ErrorS(err, "failed to delete serviceExport", "namespace", svc.Namespace, "name", svc.Name)
258 | return err
259 | }
260 | klog.V(4).InfoS("success to delete serviceExport", "namespace", svc.Namespace, "name", svc.Name)
261 | return nil
262 | }
263 |
264 | func (c *Controller) deletePropagationPolicy(ctx context.Context, svc *corev1.Service) error {
265 | policyName := fmt.Sprintf("%s%s", propagationPolicyPrefix, svc.Name)
266 | policy := &policyv1alpha1.PropagationPolicy{}
267 | err := c.Client.Get(ctx, types.NamespacedName{Namespace: svc.Namespace, Name: policyName}, policy)
268 | if err != nil {
269 | if apierrors.IsNotFound(err) {
270 | return nil
271 | }
272 | klog.ErrorS(err, "failed to get policy", "namespace", svc.Namespace, "name", policyName)
273 | return err
274 | }
275 |
276 | err = c.Client.Delete(ctx, policy)
277 | if err != nil && !apierrors.IsNotFound(err) {
278 | klog.ErrorS(err, "failed to delete policy", "namespace", svc.Namespace, "name", policyName)
279 | return err
280 | }
281 | klog.V(4).InfoS("success to delete policy", "namespace", svc.Namespace, "name", policyName)
282 | return nil
283 | }
284 |
285 | // SetupWithManager creates a controller and register to controller manager.
286 | func (c *Controller) SetupWithManager(ctx context.Context, mgr controllerruntime.Manager) error {
287 | serviceExportController, err := controller.New(ControllerName, mgr,
288 | controller.Options{
289 | Reconciler: c,
290 | RateLimiter: ratelimiterflag.DefaultControllerRateLimiter[controllerruntime.Request](c.RateLimiterOptions),
291 | })
292 | if err != nil {
293 | return err
294 | }
295 |
296 | if err = c.setupWatches(ctx, serviceExportController, mgr); err != nil {
297 | return err
298 | }
299 |
300 | return nil
301 | }
302 |
303 | func (c *Controller) setupWatches(ctx context.Context, serviceExportController controller.Controller, mgr controllerruntime.Manager) error {
304 | svcEventChan := make(chan event.TypedGenericEvent[*corev1.Service])
305 |
306 | svcEventHandler := newServiceEventHandler(ctx, c.Client)
307 | mciEventHandler := newMultiClusterIngressEventHandler(ctx, c.Client, svcEventChan, c.ProviderClassName)
308 | mcsEventHandler := newMultiClusterServiceEventHandler(ctx, c.Client, svcEventChan)
309 | rbEventHandler := newResourceBindingEventHandler(svcEventChan)
310 |
311 | if err := serviceExportController.Watch(source.Kind[*corev1.Service](mgr.GetCache(), &corev1.Service{}, svcEventHandler)); err != nil {
312 | return err
313 | }
314 | if err := serviceExportController.Watch(source.Channel[*corev1.Service](svcEventChan, svcEventHandler)); err != nil {
315 | return err
316 | }
317 | if err := serviceExportController.Watch(source.Kind[*networkingv1alpha1.MultiClusterIngress](mgr.GetCache(), &networkingv1alpha1.MultiClusterIngress{}, mciEventHandler)); err != nil {
318 | return err
319 | }
320 | if err := serviceExportController.Watch(source.Kind[*networkingv1alpha1.MultiClusterService](mgr.GetCache(), &networkingv1alpha1.MultiClusterService{}, mcsEventHandler)); err != nil {
321 | return err
322 | }
323 | if err := serviceExportController.Watch(source.Kind[*workv1alpha1.ResourceBinding](mgr.GetCache(), &workv1alpha1.ResourceBinding{}, rbEventHandler)); err != nil {
324 | return err
325 | }
326 | return nil
327 | }
328 |
--------------------------------------------------------------------------------
/pkg/util/ingressclass.go:
--------------------------------------------------------------------------------
1 | package util
2 |
3 | import (
4 | "context"
5 |
6 | networkingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/networking/v1alpha1"
7 | "github.com/pkg/errors"
8 | networkingv1 "k8s.io/api/networking/v1"
9 | "k8s.io/apimachinery/pkg/types"
10 | "k8s.io/klog/v2"
11 | "sigs.k8s.io/controller-runtime/pkg/client"
12 | )
13 |
14 | // GetDefaultIngressClass returns the default IngressClass form the list of IngressClasses.
15 | // If multiple IngressClasses are marked as the default, it returns an error.
16 | // If no IngressClass is marked as the default, it returns an empty string.
17 | func GetDefaultIngressClass(ctx context.Context, c client.Client) (string, error) {
18 | var defaultClass string
19 | var defaultClassFound bool
20 |
21 | ingClassList := &networkingv1.IngressClassList{}
22 | if err := c.List(ctx, ingClassList); err != nil {
23 | return "", err
24 | }
25 |
26 | for _, ingClass := range ingClassList.Items {
27 | if ingClass.Annotations[networkingv1.AnnotationIsDefaultIngressClass] != "true" {
28 | continue
29 | }
30 | if defaultClassFound {
31 | return "", errors.Errorf("multiple default IngressClasses found")
32 | }
33 | defaultClass = ingClass.Name
34 | defaultClassFound = true
35 | }
36 | return defaultClass, nil
37 | }
38 |
39 | // CheckIngressClassMatched checks weather the .spec.controller value of the IngressClass
40 | // referenced in input mci Object matched the input ingClassName value.
41 | func CheckIngressClassMatched(ctx context.Context, c client.Client, mci *networkingv1alpha1.MultiClusterIngress, ingClassName string) bool {
42 | mciIngressClassName := mci.Spec.IngressClassName
43 |
44 | if mciIngressClassName == nil {
45 | defaultIngressClassName, err := GetDefaultIngressClass(ctx, c)
46 | if err != nil {
47 | klog.ErrorS(err, "failed to get default IngressClass")
48 | return false
49 | }
50 | if defaultIngressClassName == "" {
51 | return false
52 | }
53 | mciIngressClassName = &defaultIngressClassName
54 | }
55 |
56 | ingClassKey := types.NamespacedName{Name: *mciIngressClassName}
57 | ingClass := &networkingv1.IngressClass{}
58 | if err := c.Get(ctx, ingClassKey, ingClass); err != nil {
59 | klog.Errorf("Failed to get target IngressClass %s", ingClassKey.String())
60 | return false
61 | }
62 |
63 | return ingClass.Spec.Controller == ingClassName
64 | }
65 |
--------------------------------------------------------------------------------
/pkg/util/multiclusterservice.go:
--------------------------------------------------------------------------------
1 | package util
2 |
3 | import networkingv1alpha1 "github.com/karmada-io/karmada/pkg/apis/networking/v1alpha1"
4 |
5 | // MCSContainLoadBalanceType checks weather the MultiClusterService contains LoadBalancer type.
6 | func MCSContainLoadBalanceType(mcs *networkingv1alpha1.MultiClusterService) bool {
7 | for _, t := range mcs.Spec.Types {
8 | if t == networkingv1alpha1.ExposureTypeLoadBalancer {
9 | return true
10 | }
11 | }
12 | return false
13 | }
14 |
--------------------------------------------------------------------------------
/plugins.go:
--------------------------------------------------------------------------------
1 | package multiclusterprovider
2 |
3 | import (
4 | "fmt"
5 | "io"
6 | "os"
7 | "sync"
8 |
9 | "k8s.io/klog/v2"
10 | )
11 |
12 | // Factory is a function that returns a multiclusterprovider.Interface.
13 | // The config parameter provides an io.Reader handler to the factory in
14 | // order to load specific configurations. If no configuration is provided
15 | // the parameter is nil.
16 | type Factory func(config io.Reader) (Interface, error)
17 |
18 | var (
19 | providersMutex sync.Mutex
20 | providers = make(map[string]Factory)
21 | )
22 |
23 | // RegisterMultiClusterProvider registers a multiclusterprovider.Factory by name.
24 | // This is expected to happen during app startup.
25 | func RegisterMultiClusterProvider(name string, cloudFactory Factory) {
26 | providersMutex.Lock()
27 | defer providersMutex.Unlock()
28 |
29 | if _, found := providers[name]; found {
30 | klog.Fatalf("Multicluster provider %q was registered twice", name)
31 | }
32 | klog.V(1).Infof("Registered multicluster provider %q", name)
33 | providers[name] = cloudFactory
34 | }
35 |
36 | // InitMultiClusterProvider creates an instance of the named multicluster provider.
37 | func InitMultiClusterProvider(name string, configFilePath string) (Interface, error) {
38 | var provider Interface
39 | var err error
40 |
41 | if name == "" {
42 | return nil, nil
43 | }
44 |
45 | if configFilePath == "" {
46 | provider, err = createMultiClusterProvider(name, nil)
47 | } else {
48 | var config *os.File
49 | config, err = os.Open(configFilePath)
50 | if err != nil {
51 | klog.Fatalf("Couldn't open multicluster provider configuration %q: %#v", configFilePath, err)
52 | }
53 |
54 | defer func() {
55 | _ = config.Close()
56 | }()
57 | provider, err = createMultiClusterProvider(name, config)
58 | }
59 |
60 | if err != nil {
61 | return nil, fmt.Errorf("could not init multicluster provider %q: %s", name, err)
62 | }
63 | if provider == nil {
64 | return nil, fmt.Errorf("unknown multicluster provider %q", name)
65 | }
66 | return provider, nil
67 | }
68 |
69 | // createMultiClusterProvider creates an instance of the named multicluster provider,
70 | // or nil if the name is unknown. The return error is only used if the named provider
71 | // was known but failed to initialize. The config parameter specifies the io.Reader
72 | // handler of the configuration file for the multicluster provider, or ni for no configuration.
73 | func createMultiClusterProvider(name string, config io.Reader) (Interface, error) {
74 | providersMutex.Lock()
75 | defer providersMutex.Unlock()
76 |
77 | f, found := providers[name]
78 | if !found {
79 | return nil, nil
80 | }
81 | return f(config)
82 | }
83 |
--------------------------------------------------------------------------------