├── .github ├── dependabot.yml └── workflows │ ├── build.yaml │ ├── codeql-analysis.yml │ ├── dependabot-auto-approve.yaml │ ├── dependabot-auto-merge.yaml │ └── semgrep.yml ├── .gitignore ├── Dockerfile ├── LICENSE ├── Makefile ├── README.md ├── backoff ├── backoff.go ├── retry.go └── retry_test.go ├── config.go ├── config_test.go ├── deploy ├── example │ ├── README.md │ ├── kube-system │ │ └── kustomization.yaml │ └── namespaced │ │ ├── deployment-patch.yaml │ │ ├── kustomization.yaml │ │ ├── resources │ │ └── semaphore-service-mirror-config.json │ │ └── secrets │ │ └── remote-kube-token └── kustomize │ ├── cluster │ ├── kustomization.yaml │ └── remote-auth.yaml │ └── namespaced │ ├── deployment.yaml │ ├── kustomization.yaml │ └── rbac.yaml ├── global_runner.go ├── global_runner_test.go ├── globalservice.go ├── globalservice_test.go ├── go.mod ├── go.sum ├── kube ├── client.go ├── common.go ├── endpoints_watcher.go ├── endpointslice_watcher.go └── service_watcher.go ├── log └── logger.go ├── main.go ├── metrics ├── kube_client.go ├── kube_watcher.go └── queue.go ├── mirror_runner.go ├── mirror_runner_test.go ├── queue.go ├── runner.go ├── test_utils.go ├── utils.go └── utils_test.go /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # See GitHub's docs for more information on this file: 2 | # https://docs.github.com/en/free-pro-team@latest/github/administering-a-repository/configuration-options-for-dependency-updates 3 | version: 2 4 | updates: 5 | # Maintain dependencies for GitHub Actions 6 | - package-ecosystem: "github-actions" 7 | directory: "/" 8 | schedule: 9 | interval: "monthly" 10 | 11 | # Maintain dependencies for Go modules 12 | - package-ecosystem: "gomod" 13 | directory: "/" 14 | schedule: 15 | interval: "monthly" 16 | -------------------------------------------------------------------------------- /.github/workflows/build.yaml: -------------------------------------------------------------------------------- 1 | name: build 2 | 3 | on: 4 | push: 5 | branches: 6 | - "*" 7 | tags: 8 | - "v*" 9 | pull_request: 10 | branches: 11 | - "master" 12 | 13 | env: 14 | REGISTRY: quay.io 15 | IMAGE_NAME: ${{ github.repository }} 16 | 17 | jobs: 18 | docker: 19 | runs-on: ubuntu-latest 20 | steps: 21 | - name: Checkout 22 | uses: actions/checkout@v4 23 | - name: Set up QEMU 24 | uses: docker/setup-qemu-action@v3 25 | - name: Set up Docker Buildx 26 | uses: docker/setup-buildx-action@v3 27 | - name: Extract metadata (tags, labels) for Docker 28 | id: meta 29 | uses: docker/metadata-action@v5 30 | with: 31 | images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} 32 | - name: Login to Quay.io Container Registry 33 | if: github.actor != 'dependabot[bot]' 34 | uses: docker/login-action@v3 35 | with: 36 | registry: quay.io 37 | username: utilitywarehouse+drone_ci 38 | password: ${{ secrets.SYSTEM_QUAY_TOKEN }} 39 | - name: Build and push Docker image 40 | if: github.actor != 'dependabot[bot]' 41 | uses: docker/build-push-action@v6 42 | with: 43 | context: . 44 | push: true 45 | tags: ${{ steps.meta.outputs.tags }} 46 | labels: ${{ steps.meta.outputs.labels }} 47 | -------------------------------------------------------------------------------- /.github/workflows/codeql-analysis.yml: -------------------------------------------------------------------------------- 1 | # For most projects, this workflow file will not need changing; you simply need 2 | # to commit it to your repository. 3 | # 4 | # You may wish to alter this file to override the set of languages analyzed, 5 | # or to provide custom queries or build logic. 6 | # 7 | # ******** NOTE ******** 8 | # We have attempted to detect the languages in your repository. Please check 9 | # the `language` matrix defined below to confirm you have the correct set of 10 | # supported CodeQL languages. 11 | # 12 | name: "CodeQL" 13 | 14 | on: 15 | push: 16 | branches: [master] 17 | pull_request: 18 | # The branches below must be a subset of the branches above 19 | branches: [master] 20 | schedule: 21 | - cron: "30 22 * * 0" 22 | 23 | jobs: 24 | analyze: 25 | name: Analyze 26 | runs-on: ubuntu-latest 27 | permissions: 28 | actions: read 29 | contents: read 30 | security-events: write 31 | 32 | strategy: 33 | fail-fast: false 34 | matrix: 35 | language: ["go"] 36 | # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ] 37 | # Learn more: 38 | # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed 39 | 40 | steps: 41 | - name: Checkout repository 42 | uses: actions/checkout@v4 43 | 44 | # Initializes the CodeQL tools for scanning. 45 | - name: Initialize CodeQL 46 | uses: github/codeql-action/init@v3 47 | with: 48 | languages: ${{ matrix.language }} 49 | # If you wish to specify custom queries, you can do so here or in a config file. 50 | # By default, queries listed here will override any specified in a config file. 51 | # Prefix the list here with "+" to use these queries and those in the config file. 52 | # queries: ./path/to/local/query, your-org/your-repo/queries@main 53 | 54 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). 55 | # If this step fails, then you should remove it and run the build manually (see below) 56 | - name: Autobuild 57 | uses: github/codeql-action/autobuild@v3 58 | 59 | # ℹ️ Command-line programs to run using the OS shell. 60 | # 📚 https://git.io/JvXDl 61 | 62 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines 63 | # and modify them (or add more) to build your code if your project 64 | # uses a compiled language 65 | 66 | #- run: | 67 | # make bootstrap 68 | # make release 69 | 70 | - name: Perform CodeQL Analysis 71 | uses: github/codeql-action/analyze@v3 72 | -------------------------------------------------------------------------------- /.github/workflows/dependabot-auto-approve.yaml: -------------------------------------------------------------------------------- 1 | # https://docs.github.com/en/code-security/dependabot/working-with-dependabot/automating-dependabot-with-github-actions#approve-a-pull-request 2 | name: Dependabot auto-approve 3 | on: pull_request 4 | 5 | permissions: 6 | pull-requests: write 7 | 8 | jobs: 9 | dependabot: 10 | runs-on: ubuntu-latest 11 | if: ${{ github.actor == 'dependabot[bot]' }} 12 | steps: 13 | - name: Dependabot metadata 14 | id: metadata 15 | uses: dependabot/fetch-metadata@v2.4.0 16 | with: 17 | github-token: "${{ secrets.GITHUB_TOKEN }}" 18 | - name: Approve a PR 19 | run: gh pr review --approve "$PR_URL" 20 | env: 21 | PR_URL: ${{github.event.pull_request.html_url}} 22 | GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} 23 | -------------------------------------------------------------------------------- /.github/workflows/dependabot-auto-merge.yaml: -------------------------------------------------------------------------------- 1 | # https://docs.github.com/en/code-security/dependabot/working-with-dependabot/automating-dependabot-with-github-actions#enable-auto-merge-on-a-pull-request 2 | name: Dependabot auto-merge 3 | on: pull_request 4 | 5 | permissions: 6 | pull-requests: write 7 | contents: write 8 | 9 | jobs: 10 | dependabot: 11 | runs-on: ubuntu-latest 12 | if: ${{ github.actor == 'dependabot[bot]' }} 13 | steps: 14 | - name: Dependabot metadata 15 | id: metadata 16 | uses: dependabot/fetch-metadata@v2.4.0 17 | with: 18 | github-token: "${{ secrets.GITHUB_TOKEN }}" 19 | - name: Enable auto-merge for Dependabot PRs 20 | run: gh pr merge --auto --merge "$PR_URL" 21 | env: 22 | PR_URL: ${{github.event.pull_request.html_url}} 23 | GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} 24 | -------------------------------------------------------------------------------- /.github/workflows/semgrep.yml: -------------------------------------------------------------------------------- 1 | 2 | # Name of this GitHub Actions workflow. 3 | name: Semgrep 4 | 5 | on: 6 | # Scan changed files in PRs (diff-aware scanning): 7 | pull_request: {} 8 | # Scan on-demand through GitHub Actions interface: 9 | workflow_dispatch: {} 10 | # Scan mainline branches and report all findings: 11 | push: 12 | branches: 13 | - main 14 | - master 15 | # Schedule the CI job (this method uses cron syntax): 16 | schedule: 17 | - cron: '30 14 * * *' 18 | # or whatever time works best for your team. 19 | 20 | jobs: 21 | semgrep: 22 | # User definable name of this GitHub Actions job. 23 | name: semgrep/ci 24 | # If you are self-hosting, change the following `runs-on` value: 25 | runs-on: ubuntu-latest 26 | 27 | container: 28 | # A Docker image with Semgrep installed. Do not change this. 29 | image: returntocorp/semgrep 30 | 31 | # Skip any PR created by dependabot to avoid permission issues: 32 | if: (github.actor != 'dependabot[bot]') 33 | 34 | steps: 35 | # Fetch project source with GitHub Actions Checkout. 36 | - uses: actions/checkout@v4 37 | # Run the "semgrep ci" command on the command line of the docker image. 38 | - run: semgrep scan --config auto 39 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | semaphore-service-mirror 2 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.22-alpine AS build 2 | WORKDIR /go/src/github.com/utilitywarehouse/semaphore-service-mirror 3 | COPY . /go/src/github.com/utilitywarehouse/semaphore-service-mirror 4 | ENV CGO_ENABLED 0 5 | RUN go test ./... 6 | RUN go build -o /semaphore-service-mirror . 7 | 8 | FROM alpine:3.19 9 | COPY --from=build /semaphore-service-mirror /semaphore-service-mirror 10 | CMD [ "/semaphore-service-mirror" ] 11 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Utility Warehouse 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | SHELL := /bin/bash 2 | 3 | release: 4 | @sd "newTag: master" "newTag: $(VERSION)" deploy/kustomize/namespaced/kustomization.yaml 5 | @git add deploy/kustomize/namespaced/kustomization.yaml 6 | @git commit -m "Release $(VERSION)" 7 | @git tag -m "Release $(VERSION)" -a $(VERSION) 8 | @sd "newTag: $(VERSION)" "newTag: master" deploy/kustomize/namespaced/kustomization.yaml 9 | @git add deploy/kustomize/namespaced/kustomization.yaml 10 | @git commit -m "Clean up release $(VERSION)" 11 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # semaphore-service-mirror 2 | 3 | Small app that watches for kubernetes services and endpoints in a target cluster 4 | and mirrors them in a local namespace. Can be used in conjunction with coredns 5 | in cases where pod networks are reachable between clusters and one needs to be 6 | able to route virtual services for remote pods. 7 | 8 | ## Usage 9 | ``` 10 | Usage of ./semaphore-service-mirror: 11 | -config string 12 | (required)Path to the json config file 13 | -kube-config string 14 | Path of a kube config file, if not provided the app will try to get in cluster config 15 | -label-selector string 16 | Label of services and endpoints to watch and mirror 17 | -log-level string 18 | Log level (default "info") 19 | -mirror-ns string 20 | The namespace to create dummy mirror services in 21 | ``` 22 | 23 | You can set most flags via envvars instead, format: "SSM_FLAG_NAME". Example: 24 | `-config` can be set as `SSM_CONFIG` and `-kube-config` can be set as 25 | `SSM_KUBE_CONFIG`. 26 | 27 | If both are present, flags take precedence over envvars. 28 | 29 | The only mandatory flag is `-config` to point to a json formatted config file. 30 | Label selector and mirror namespace must also be set, but there is the option to 31 | do this via the json config (more details in the next section below). Flags will 32 | take precedence over static configuration from the file. 33 | 34 | ## Configuration file 35 | 36 | The operator expects a configuration file in json format. Here is a description 37 | of the configuration keys by scope: 38 | 39 | ### Global 40 | Contains configuration globally shared by all runners. 41 | 42 | * `globalSvcLabelSelector`: Labels used to select global services 43 | * `globalSvcRoutingStrategyLabel`: Labels used to instruct controller to try 44 | utilising Kubernetes topology aware hints to select local cluster targets 45 | first when routing global services. 46 | * `mirrorSvcLabelSelector`: Label used to select services to mirror 47 | * `mirrorNamespace`: Namespace used to locate/place mirrored objects 48 | * `serviceSync`: Whether to sync services on startup and delete records that 49 | cannot be located based on the label selector. Defaults to false 50 | 51 | ### Local Cluster 52 | Contains configuration needed to manage resources in the local cluster, where 53 | this operator runs. 54 | 55 | * `name`: A name for the local cluster 56 | * `zones`: A list of the availability zones for the local cluster. This will be 57 | used to allow topology aware routing for global services and the values should 58 | derive from kuberenetes nodes' `topology.kubernetes.io/zone` label. 59 | * `kubeConfigPath`: Path to a kube config file to access the local cluster. If 60 | not specified the operator will try to use in-cluster configuration with the 61 | pod's service account. 62 | 63 | ### Remote clusters 64 | Contains a list of keys to configure access to all remote cluster. Each list can 65 | include the following: 66 | 67 | * `name`: A name for the remote cluster 68 | * `kubeConfigPath`: Path to a kube config file to access the remote cluster. 69 | * `remoteAPIURL`: Address of the remote cluster API server 70 | * `remoteCAURL`: Address from where to fetch the public CA certificate to talk 71 | to the remote API server. 72 | * `remoteSATokenPiath`: Path to a service account token that will be used to 73 | access remote cluster resources. 74 | * `resyncPeriod`: Will trigger an `onUpdate` event for everything that is stored 75 | in the respective watchers cache. Defaults to 0 which equals disabled. 76 | * `servicePrefix`: How to prefix service names mirrored from that remote 77 | locally. 78 | 79 | Either `kubeConfigPath` or `remoteAPIURL`,`remoteCAURL` and `remoteSATokenPiath` 80 | should be set to be able to successfully create a client to talk to the remote 81 | cluster. 82 | 83 | ### Example 84 | ``` 85 | { 86 | "global": { 87 | "globalSvcLabelSelector": "mirror.semaphore.uw.io/global-service=true", 88 | "globalSvcRoutingStrategyLabel": "mirror.semaphore.uw.io/global-service-routing-strategy=local-first", 89 | "mirrorSvcLabelSelector": "mirror.semaphore.uw.io/mirror-service=true", 90 | "mirrorNamespace": "semaphore", 91 | "serviceSync": true, 92 | "endpointSliceSync": true 93 | }, 94 | "localCluster": { 95 | "name": "local", 96 | "kubeConfigPath": "/path/to/local/kubeconfig" 97 | }, 98 | "remoteClusters": [ 99 | { 100 | "name": "clusterA", 101 | "remoteCAURL": "remote_ca_url", 102 | "remoteAPIURL": "remote_api_url", 103 | "remoteSATokenPath": "/path/to/token", 104 | "resyncPeriod": "10s", 105 | "servicePrefix": "cluster-A" 106 | } 107 | ] 108 | } 109 | ``` 110 | 111 | ## Generating mirrored service names 112 | 113 | In order to make regex matching easier for dns rewrite purposes (see coredns 114 | example bellow) we use a hardcoded separator between service names and 115 | namespaces on the generated name for mirrored service: `73736d`. 116 | 117 | The format of the generated name is: `--73736d-`. 118 | 119 | It's possible for this name to exceed the 63 character limit imposed by 120 | Kubernetes so the operator should have a Gatekeeper / Kyverno rule to guard 121 | against exceeding this Service name length. 122 | 123 | ## Coredns config example 124 | 125 | To create a smoother experience when accessing a service coredns can be 126 | configured using the `rewrite` functionality: 127 | ``` 128 | cluster.example { 129 | errors 130 | health 131 | rewrite continue { 132 | name regex ([a-zA-Z0-9-_]*\.)?([a-zA-Z0-9-_]*)\.([a-zv0-9-_]*)\.svc\.cluster\.example {1}example-{3}-73736d-{2}..svc.cluster.local 133 | answer name ([a-zA-Z0-9-_]*\.)?example-([a-zA-Z0-9-_]*)-73736d-([a-zA-Z0-9-_]*)\.\.svc\.cluster\.local {1}{3}.{2}.svc.cluster.example 134 | } 135 | kubernetes cluster.local in-addr.arpa ip6.arpa { 136 | pods insecure 137 | endpoint_pod_names 138 | upstream 139 | fallthrough in-addr.arpa ip6.arpa 140 | } 141 | forward . /etc/resolv.conf 142 | cache 30 143 | loop 144 | reload 145 | loadbalance 146 | } 147 | .:53 { 148 | errors 149 | health 150 | kubernetes cluster.local in-addr.arpa ip6.arpa { 151 | pods insecure 152 | endpoint_pod_names 153 | upstream 154 | fallthrough in-addr.arpa ip6.arpa 155 | } 156 | 157 | prometheus :9153 158 | forward . /etc/resolv.conf 159 | cache 30 160 | loop 161 | reload 162 | loadbalance 163 | } 164 | ``` 165 | that way all queries for services under domain cluster.target will be rewritten 166 | to match services on the local namespace that the services are mirrored. 167 | 168 | * note that `` and `` should be replaced with a name for the 169 | target cluster and the local namespace that contains the mirrored services. 170 | * note that the above example assumes that you are running the mirroring service 171 | with a prefix flag that matches the target cluster name. 172 | 173 | ## Global Services 174 | 175 | The operator is also watching services based on a separate label, in order to 176 | create global services. A global service will gather endpoints from multiple 177 | remote clusters that live under the "same" namespace and name, into a single 178 | ocal service with endpoints in multiple clusters. For that purpose, it will 179 | create a single ClusterIP (or headless) service and mirror endpointslices from 180 | remote clusters to target the new "global" service. 181 | 182 | The format of the name used for the global service is: 183 | `gl--73736d-`. 184 | 185 | For example, if we have the following services: 186 | - cluster: cA, namespace: example-ns, name: my-svc, endpoints: [eA] 187 | - cluster: cB, namespace: example-ns, name: my-svc, endpoints: [eB1, eB2] 188 | The operator will create a global service under the local "semaphore" namespace 189 | with a corresponding list of endpoints: [eA, eB1, eB2]. 190 | 191 | * Global services will include endpoints from the local cluster as well, 192 | provided they are using the mirror label. 193 | * Global services will try to utilise Kubernetes topology aware hints to route 194 | to local endpoints first. 195 | 196 | ### CoreDNS config for Global services 197 | 198 | In order to be able to resolve the global services under `cluster.global` 199 | domain, the following CoreDNS block is needed: 200 | ``` 201 | cluster.global { 202 | cache 30 203 | errors 204 | forward . /etc/resolv.conf 205 | kubernetes cluster.local { 206 | pods insecure 207 | endpoint_pod_names 208 | } 209 | loadbalance 210 | loop 211 | prometheus 212 | reload 213 | rewrite continue { 214 | name regex ([\w-]*\.)?([\w-]*)\.([\w-]*)\.svc\.cluster\.global {1}gl-{3}-73736d-{2}.sys-semaphore.svc.cluster.local 215 | answer name ([\w-]*\.)?gl-([\w-]*)-73736d-([\w-]*)\.sys-semaphore\.svc\.cluster\.local {1}{3}.{2}.svc.cluster.global 216 | } 217 | } 218 | ``` 219 | 220 | ### Topology routing 221 | 222 | In some cases, it is preferable to route to endpoints which live closer to the 223 | caller when addressing global services (first hit available endpoints in the 224 | same cluster). For that purpose, one can use a label to instruct the controller 225 | to set `service.kubernetes.io/topology-aware-hints=auto` label in the generated 226 | global service and instruct Kubernetes to use topology hints for routing traffic 227 | to the service. In order for the hints to be effective, the operator reads the 228 | local configuration `zones` field and uses the list of zones defined there as 229 | hints for local endpoints. If this is not set, a dummy value will be used and 230 | topology aware routing will not be feasible. The operator also uses the dummy 231 | "remote" zone value as a hint for endpoits mirrored from remote clusters, to 232 | make sure that no routing decisions will be made on those and kube-proxy will 233 | not complain about missing hints. 234 | The label to enable the above is configurable via `globalSvcTopologyLabel` field 235 | in the global configuration. 236 | 237 | ### Fungible values 238 | 239 | Since service endpoints that will be involved in a global service come from 240 | multiple services in different clusters, based on the service name and 241 | namespace, certain parameters need to match across all those service 242 | definitions. In particular, service ports and topology labels values are 243 | fungible and if their values differ between definitions of services that feed 244 | endpoints to the same global service, there will be a race between services to 245 | force their attributes to the global service. For a predictable behaviour, make 246 | sure that ports match between services and either all or none set the topology 247 | label. 248 | 249 | ## Metrics 250 | 251 | There are separate metrics available that one can use to determine the status 252 | of the controller. The available metrics can give a visibility on errors from 253 | the Kubernetes clients, the watchers and the controller's queues. 254 | 255 | ### Kubernetes Client Metrics 256 | 257 | - `semaphore_service_mirror_kube_http_request_total`: Total number of HTTP 258 | requests to the Kubernetes API by host, code and method. 259 | - `semaphore_service_mirror_kube_http_request_duration_seconds`: Histogram of 260 | latencies for HTTP requests to the Kubernetes API by host and method 261 | 262 | ### Kubernetes Watcher Metrics 263 | 264 | - `semaphore_service_mirror_kube_watcher_objects`: Number of objects watched by 265 | watcher and kind 266 | - `semaphore_service_mirror_kube_watcher_events_total`: Number of events handled 267 | by watcher, kind and event_type 268 | 269 | Because the controller runs multiple watchers in parallel, both for watching the 270 | remote clusters and the mirrored local objects, we use 2 labels to be able to 271 | distinguish between them. 272 | - `watcher` label follows the pattern `-[mirror]`. 273 | For example `watcher="aws-serviceWatcher"` will contain metrics for watching 274 | services on a cluster called "aws", and `watcher="aws-mirrorServiceWatcher"` 275 | will contain metrics for the mirrored local services from "aws" cluster. 276 | - `runner` label follows the pattern `[mirror|global]-` and should 277 | help distinguish if a watcher is used to create service mirrors or global 278 | services. 279 | Based on the above, one could use the following expression: 280 | `semaphore_service_mirror_kube_watcher_objects{watcher=~".*-mirror.*"} - ignoring(watcher) semaphore_service_mirror_kube_watcher_objects{watcher!~".*-mirror.*"}` 281 | to monitor if controllers are lagging. The `runner` label comes handy in the 282 | above query, to avoid finding duplicate series for the match group. 283 | 284 | ### Queue Metrics 285 | 286 | - `semaphore_service_mirror_queue_depth`: Workqueue depth, by queue name. 287 | - `semaphore_service_mirror_queue_adds_total`: Workqueue adds, by queue name. 288 | - `semaphore_service_mirror_queue_latency_duration_seconds`: Workqueue latency, 289 | by queue name. 290 | - `semaphore_service_mirror_queue_work_duration_seconds`: Workqueue work 291 | duration, by queue name. 292 | - `semaphore_service_mirror_queue_unfinished_work_seconds`: Unfinished work in 293 | seconds, by queue name. 294 | - `semaphore_service_mirror_queue_longest_running_processor_seconds`: Longest 295 | running processor, by queue name. 296 | - `semaphore_service_mirror_queue_retries_total`: Workqueue retries, by queue 297 | name. 298 | - `semaphore_service_mirror_queue_requeued_items`: Items that have been requeued 299 | but not reconciled yet, by queue name. 300 | -------------------------------------------------------------------------------- /backoff/backoff.go: -------------------------------------------------------------------------------- 1 | // Package backoff includes this backoff function copied from: https://github.com/jpillora/backoff/blob/d80867952dff4e2fbfb4280ded4ff94d67790457/backoff.go 2 | package backoff 3 | 4 | import ( 5 | "math" 6 | "math/rand" 7 | "sync/atomic" 8 | "time" 9 | ) 10 | 11 | // Backoff is a time.Duration counter, starting at Min. After every call to 12 | // the Duration method the current timing is multiplied by Factor, but it 13 | // never exceeds Max. 14 | // 15 | // Backoff is not generally concurrent-safe, but the ForAttempt method can 16 | // be used concurrently. 17 | type Backoff struct { 18 | attempt uint64 19 | // Factor is the multiplying factor for each increment step 20 | Factor float64 21 | // Jitter eases contention by randomizing backoff steps 22 | Jitter bool 23 | // Min and Max are the minimum and maximum values of the counter 24 | Min, Max time.Duration 25 | } 26 | 27 | // Duration returns the duration for the current attempt before incrementing 28 | // the attempt counter. See ForAttempt. 29 | func (b *Backoff) Duration() time.Duration { 30 | d := b.ForAttempt(float64(atomic.AddUint64(&b.attempt, 1) - 1)) 31 | return d 32 | } 33 | 34 | const maxInt64 = float64(math.MaxInt64 - 512) 35 | 36 | // ForAttempt returns the duration for a specific attempt. This is useful if 37 | // you have a large number of independent Backoffs, but don't want use 38 | // unnecessary memory storing the Backoff parameters per Backoff. The first 39 | // attempt should be 0. 40 | // 41 | // ForAttempt is concurrent-safe. 42 | func (b *Backoff) ForAttempt(attempt float64) time.Duration { 43 | // Zero-values are nonsensical, so we use 44 | // them to apply defaults 45 | min := b.Min 46 | if min <= 0 { 47 | min = 100 * time.Millisecond 48 | } 49 | max := b.Max 50 | if max <= 0 { 51 | max = 10 * time.Second 52 | } 53 | if min >= max { 54 | // short-circuit 55 | return max 56 | } 57 | factor := b.Factor 58 | if factor <= 0 { 59 | factor = 2 60 | } 61 | //calculate this duration 62 | minf := float64(min) 63 | durf := minf * math.Pow(factor, attempt) 64 | if b.Jitter { 65 | durf = rand.Float64()*(durf-minf) + minf 66 | } 67 | //ensure float64 wont overflow int64 68 | if durf > maxInt64 { 69 | return max 70 | } 71 | dur := time.Duration(durf) 72 | //keep within bounds 73 | if dur < min { 74 | return min 75 | } 76 | if dur > max { 77 | return max 78 | } 79 | return dur 80 | } 81 | 82 | // Reset restarts the current attempt counter at zero. 83 | func (b *Backoff) Reset() { 84 | atomic.StoreUint64(&b.attempt, 0) 85 | } 86 | 87 | // Attempt returns the current attempt counter value. 88 | func (b *Backoff) Attempt() float64 { 89 | return float64(atomic.LoadUint64(&b.attempt)) 90 | } 91 | 92 | // Copy returns a backoff with equals constraints as the original 93 | func (b *Backoff) Copy() *Backoff { 94 | return &Backoff{ 95 | Factor: b.Factor, 96 | Jitter: b.Jitter, 97 | Min: b.Min, 98 | Max: b.Max, 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /backoff/retry.go: -------------------------------------------------------------------------------- 1 | package backoff 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/utilitywarehouse/semaphore-service-mirror/log" 7 | ) 8 | 9 | type operation func() error 10 | 11 | const ( 12 | defaultBackoffJitter = true 13 | defaultBackoffMin = 2 * time.Second 14 | defaultBackoffMax = 1 * time.Minute 15 | ) 16 | 17 | // Retry will use the default backoff values to retry the passed operation 18 | func Retry(op operation, description string) { 19 | b := &Backoff{ 20 | Jitter: defaultBackoffJitter, 21 | Min: defaultBackoffMin, 22 | Max: defaultBackoffMax, 23 | } 24 | RetryWithBackoff(op, b, description) 25 | } 26 | 27 | // RetryWithBackoff will retry the passed function (operation) using the given 28 | // backoff 29 | func RetryWithBackoff(op operation, b *Backoff, description string) { 30 | b.Reset() 31 | for { 32 | err := op() 33 | if err == nil { 34 | return 35 | } 36 | d := b.Duration() 37 | log.Logger.Error("Retry failed", 38 | "description", description, 39 | "error", err, 40 | "backoff", d, 41 | ) 42 | time.Sleep(d) 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /backoff/retry_test.go: -------------------------------------------------------------------------------- 1 | package backoff 2 | 3 | import ( 4 | "errors" 5 | "testing" 6 | "time" 7 | 8 | "github.com/stretchr/testify/assert" 9 | "github.com/utilitywarehouse/semaphore-service-mirror/log" 10 | ) 11 | 12 | var testFuncCallCounter int 13 | var successThreshold int 14 | 15 | func testFunc() error { 16 | testFuncCallCounter++ 17 | 18 | if testFuncCallCounter >= successThreshold { 19 | return nil 20 | } 21 | return errors.New("error") 22 | 23 | } 24 | 25 | func TestRetryWithBackoff(t *testing.T) { 26 | log.InitLogger("retry-test", "info") 27 | b := &Backoff{ 28 | Jitter: false, 29 | Min: 10 * time.Millisecond, 30 | Max: 1 * time.Second, 31 | } 32 | successThreshold = 3 33 | 34 | // Retrying testFunc should fail 2 times before hitting the success 35 | // threshold 36 | RetryWithBackoff(testFunc, b, "test func") 37 | assert.Equal(t, testFuncCallCounter, 3) // should be 3 after 2 consecutive fails 38 | assert.Equal(t, b.Duration(), 40*time.Millisecond) // should be 40 millisec after failing for 10 and 20 and without a jitter 39 | } 40 | -------------------------------------------------------------------------------- /config.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "time" 7 | ) 8 | 9 | const ( 10 | defaultWGDeviceMTU = 1420 11 | defaultWGListenPort = 51820 12 | ) 13 | 14 | // Duration is a helper to unmarshal time.Duration from json 15 | // https://stackoverflow.com/questions/48050945/how-to-unmarshal-json-into-durations/54571600#54571600 16 | type Duration struct { 17 | time.Duration 18 | } 19 | 20 | // MarshalJSON calls json Marshall on Duration 21 | func (d Duration) MarshalJSON() ([]byte, error) { 22 | return json.Marshal(d.String()) 23 | } 24 | 25 | // UnmarshalJSON provides handling of time.Duration when unmarshalling 26 | func (d *Duration) UnmarshalJSON(b []byte) error { 27 | var v interface{} 28 | if err := json.Unmarshal(b, &v); err != nil { 29 | return err 30 | } 31 | switch value := v.(type) { 32 | case float64: 33 | d.Duration = time.Duration(value) 34 | return nil 35 | case string: 36 | tmp, err := time.ParseDuration(value) 37 | if err != nil { 38 | return err 39 | } 40 | d.Duration = tmp 41 | return nil 42 | default: 43 | return fmt.Errorf("Invalid duration of type %v", value) 44 | } 45 | } 46 | 47 | // globalConfig will keep configuration that applies globally on the operator 48 | type globalConfig struct { 49 | GlobalSvcLabelSelector string `json:"globalSvcLabelSelector"` // Label used to select global services to mirror 50 | GlobalSvcRoutingStrategyLabel string `json:"globalSvcRoutingStrategyLabel"` // Label used to enable topology aware hints for global services 51 | MirrorSvcLabelSelector string `json:"mirrorSvcLabelSelector"` // Label used to select remote services to mirror 52 | MirrorNamespace string `json:"mirrorNamespace"` // Local namespace to mirror remote services 53 | ServiceSync bool `json:"serviceSync"` // sync services on startup 54 | EndpointSliceSync bool `json:"endpointSliceSync"` // sync endpointslices (for global services) at startup 55 | } 56 | 57 | type localClusterConfig struct { 58 | Name string `json:"name"` 59 | KubeConfigPath string `json:"kubeConfigPath"` 60 | Zones []string `json:"zones"` 61 | } 62 | 63 | type remoteClusterConfig struct { 64 | Name string `json:"name"` 65 | KubeConfigPath string `json:"kubeConfigPath"` 66 | RemoteAPIURL string `json:"remoteAPIURL"` 67 | RemoteCAURL string `json:"remoteCAURL"` 68 | RemoteSATokenPath string `json:"remoteSATokenPath"` 69 | ResyncPeriod Duration `json:"resyncPeriod"` 70 | ServicePrefix string `json:"servicePrefix"` // How to prefix services mirrored from this cluster locally 71 | } 72 | 73 | // Config holds the application configuration 74 | type Config struct { 75 | Global globalConfig `json:"global"` 76 | LocalCluster localClusterConfig `json:"localCluster"` 77 | RemoteClusters []*remoteClusterConfig `json:"remoteClusters"` 78 | } 79 | 80 | func parseConfig(rawConfig []byte, flagGlobalSvcLabelSelector, flagGlobalSvcRoutingStrategyLabel, flagMirrorSvcLabelSelector, flagMirrorNamespace string) (*Config, error) { 81 | conf := &Config{} 82 | if err := json.Unmarshal(rawConfig, conf); err != nil { 83 | return nil, fmt.Errorf("error unmarshalling config: %v", err) 84 | } 85 | // Override global config via flags/env vars and check 86 | if flagMirrorSvcLabelSelector != "" { 87 | conf.Global.MirrorSvcLabelSelector = flagMirrorSvcLabelSelector 88 | } 89 | if conf.Global.MirrorSvcLabelSelector == "" { 90 | return nil, fmt.Errorf("Label selector for service mirroring should be specified either via global json config, env vars or flag") 91 | } 92 | if flagGlobalSvcLabelSelector != "" { 93 | conf.Global.GlobalSvcLabelSelector = flagGlobalSvcLabelSelector 94 | } 95 | if conf.Global.GlobalSvcLabelSelector == "" { 96 | return nil, fmt.Errorf("Label selector for global services should be specified either via global json config, env vars or flag") 97 | } 98 | if flagGlobalSvcRoutingStrategyLabel != "" { 99 | conf.Global.GlobalSvcRoutingStrategyLabel = flagGlobalSvcRoutingStrategyLabel 100 | } 101 | if conf.Global.GlobalSvcRoutingStrategyLabel == "" { 102 | return nil, fmt.Errorf("Label to enable topology aware hints for global services should be specified either via global json config, env vars or flag") 103 | } 104 | if flagMirrorNamespace != "" { 105 | conf.Global.MirrorNamespace = flagMirrorNamespace 106 | } 107 | if conf.Global.MirrorNamespace == "" { 108 | return nil, fmt.Errorf("Local mirroring namespace should be specified either via global json config, env vars or flag") 109 | } 110 | if conf.LocalCluster.Name == "" { 111 | return nil, fmt.Errorf("Configuration is missing local cluster name") 112 | } 113 | // If local cluster zones are not set, default to a dummy value, so that kube-proxy does not complain 114 | if len(conf.LocalCluster.Zones) == 0 { 115 | conf.LocalCluster.Zones = []string{"local"} 116 | } 117 | 118 | // Check for mandatory remote config. 119 | if len(conf.RemoteClusters) < 1 { 120 | return nil, fmt.Errorf("No remote cluster configuration defined") 121 | } 122 | for _, r := range conf.RemoteClusters { 123 | if r.Name == "" { 124 | return nil, fmt.Errorf("Configuration is missing remote cluster name") 125 | } 126 | if (r.RemoteAPIURL == "" || r.RemoteCAURL == "" || r.RemoteSATokenPath == "") && r.KubeConfigPath == "" { 127 | return nil, fmt.Errorf("Insufficient configuration to create remote cluster client. Set kubeConfigPath or remoteAPIURL and remoteCAURL and remoteSATokenPath") 128 | } 129 | if r.ServicePrefix == "" { 130 | return nil, fmt.Errorf("Configuration is missing a service prefix for services mirrored from the remote") 131 | } 132 | } 133 | return conf, nil 134 | } 135 | -------------------------------------------------------------------------------- /config_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | "time" 7 | 8 | "github.com/stretchr/testify/assert" 9 | ) 10 | 11 | var ( 12 | testFlagGlobalSvcLabelSelector = "flag-global-label" 13 | testFlagGlobalSvcTopologyLabel = "flag-global-topology-label" 14 | testFlagMirrorSvcLabelSelector = "flag-mirror-label" 15 | testFlagMirrorNamespace = "flag-namespace" 16 | ) 17 | 18 | func TestConfig(t *testing.T) { 19 | emptyConfig := []byte(` 20 | { 21 | "global": {} 22 | } 23 | `) 24 | _, err := parseConfig(emptyConfig, testFlagGlobalSvcLabelSelector, testFlagGlobalSvcTopologyLabel, testFlagMirrorSvcLabelSelector, testFlagMirrorNamespace) 25 | assert.Equal(t, fmt.Errorf("Configuration is missing local cluster name"), err) 26 | 27 | globalConfigOnly := []byte(` 28 | { 29 | "global": {}, 30 | "localCluster":{ 31 | "name": "local_cluster" 32 | } 33 | } 34 | `) 35 | _, err = parseConfig(globalConfigOnly, testFlagGlobalSvcLabelSelector, testFlagGlobalSvcTopologyLabel, testFlagMirrorSvcLabelSelector, testFlagMirrorNamespace) 36 | assert.Equal(t, fmt.Errorf("No remote cluster configuration defined"), err) 37 | 38 | emptyRemoteConfigName := []byte(` 39 | { 40 | "localCluster":{ 41 | "name": "local_cluster" 42 | }, 43 | "remoteClusters": [ 44 | { 45 | "name": "" 46 | } 47 | ] 48 | } 49 | `) 50 | _, err = parseConfig(emptyRemoteConfigName, testFlagGlobalSvcLabelSelector, testFlagGlobalSvcTopologyLabel, testFlagMirrorSvcLabelSelector, testFlagMirrorNamespace) 51 | assert.Equal(t, fmt.Errorf("Configuration is missing remote cluster name"), err) 52 | insufficientRemoteKubeConfigPath := []byte(` 53 | { 54 | "localCluster": { 55 | "name": "local_cluster", 56 | "kubeConfigPath": "/path/to/kube/config" 57 | }, 58 | "remoteClusters": [ 59 | { 60 | "name": "remote_cluster_1", 61 | "remoteCAURL": "remote_ca_url", 62 | "remoteAPIURL": "remote_api_url" 63 | } 64 | ] 65 | } 66 | `) 67 | _, err = parseConfig(insufficientRemoteKubeConfigPath, testFlagGlobalSvcLabelSelector, testFlagGlobalSvcTopologyLabel, testFlagMirrorSvcLabelSelector, testFlagMirrorNamespace) 68 | assert.Equal(t, fmt.Errorf("Insufficient configuration to create remote cluster client. Set kubeConfigPath or remoteAPIURL and remoteCAURL and remoteSATokenPath"), err) 69 | 70 | rawFullConfig := []byte(` 71 | { 72 | "global": { 73 | "globalSvcLabelSelector": "globalLabel", 74 | "globalSvcRoutingStrategyLabel": "globalTopologyLabel", 75 | "mirrorSvcLabelSelector": "mirrorLabel", 76 | "mirrorNamespace": "sys-semaphore", 77 | "serviceSync": true 78 | }, 79 | "localCluster": { 80 | "name": "local_cluster", 81 | "kubeConfigPath": "/path/to/kube/config" 82 | }, 83 | "remoteClusters": [ 84 | { 85 | "name": "remote_cluster_1", 86 | "remoteCAURL": "remote_ca_url", 87 | "remoteAPIURL": "remote_api_url", 88 | "remoteSATokenPath": "/path/to/token", 89 | "resyncPeriod": "10s", 90 | "servicePrefix": "cluster-1" 91 | }, 92 | { 93 | "name": "remote_cluster_2", 94 | "kubeConfigPath": "/path/to/kube/config", 95 | "servicePrefix": "cluster-2" 96 | } 97 | ] 98 | } 99 | `) 100 | config, err := parseConfig(rawFullConfig, "", "", "", "") 101 | assert.Equal(t, nil, err) 102 | assert.Equal(t, "globalLabel", config.Global.GlobalSvcLabelSelector) 103 | assert.Equal(t, "globalTopologyLabel", config.Global.GlobalSvcRoutingStrategyLabel) 104 | assert.Equal(t, "mirrorLabel", config.Global.MirrorSvcLabelSelector) 105 | assert.Equal(t, "sys-semaphore", config.Global.MirrorNamespace) 106 | assert.Equal(t, true, config.Global.ServiceSync) 107 | assert.Equal(t, "local_cluster", config.LocalCluster.Name) 108 | assert.Equal(t, "/path/to/kube/config", config.LocalCluster.KubeConfigPath) 109 | assert.Equal(t, 2, len(config.RemoteClusters)) 110 | assert.Equal(t, "remote_ca_url", config.RemoteClusters[0].RemoteCAURL) 111 | assert.Equal(t, "remote_api_url", config.RemoteClusters[0].RemoteAPIURL) 112 | assert.Equal(t, "/path/to/token", config.RemoteClusters[0].RemoteSATokenPath) 113 | assert.Equal(t, "", config.RemoteClusters[0].KubeConfigPath) 114 | assert.Equal(t, Duration{10 * time.Second}, config.RemoteClusters[0].ResyncPeriod) 115 | assert.Equal(t, "cluster-1", config.RemoteClusters[0].ServicePrefix) 116 | assert.Equal(t, "remote_cluster_2", config.RemoteClusters[1].Name) 117 | assert.Equal(t, "", config.RemoteClusters[1].RemoteCAURL) 118 | assert.Equal(t, "", config.RemoteClusters[1].RemoteAPIURL) 119 | assert.Equal(t, "", config.RemoteClusters[1].RemoteSATokenPath) 120 | assert.Equal(t, "/path/to/kube/config", config.RemoteClusters[1].KubeConfigPath) 121 | assert.Equal(t, Duration{0}, config.RemoteClusters[1].ResyncPeriod) 122 | assert.Equal(t, "cluster-2", config.RemoteClusters[1].ServicePrefix) 123 | 124 | } 125 | -------------------------------------------------------------------------------- /deploy/example/README.md: -------------------------------------------------------------------------------- 1 | Example deployment using kustomize base. 2 | 3 | The example under this directory deploys: 4 | - A service account under `kube-system` namespace, which should be used from 5 | remote service-mirror controllers to gain access to local cluster resources. 6 | The user will need to grab the token generated for the service account and 7 | feed it as a secret to remote deployments. 8 | - The controller deployment under any namespace for a single controller watching 9 | a single remote cluster for mirrors. 10 | -------------------------------------------------------------------------------- /deploy/example/kube-system/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | resources: 4 | - ../../kustomize/cluster/ 5 | -------------------------------------------------------------------------------- /deploy/example/namespaced/deployment-patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: semaphore-service-mirror 5 | spec: 6 | template: 7 | spec: 8 | containers: 9 | - name: semaphore-service-mirror 10 | volumeMounts: 11 | - name: token 12 | mountPath: /etc/semaphore-service-mirror/tokens/c1 13 | readOnly: true 14 | volumes: 15 | - name: token 16 | secret: 17 | secretName: remote-kube-token 18 | - name: config 19 | configMap: 20 | name: semaphore-service-mirror-config 21 | -------------------------------------------------------------------------------- /deploy/example/namespaced/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | resources: 4 | - ../../kustomize/namespaced/ 5 | patchesStrategicMerge: 6 | - deployment-patch.yaml 7 | configMapGenerator: 8 | - name: semaphore-service-mirror-config 9 | files: 10 | - config.json=resources/semaphore-service-mirror-config.json 11 | secretGenerator: 12 | - name: remote-kube-token 13 | type: Opaque 14 | files: 15 | - token=secrets/remote-kube-token 16 | -------------------------------------------------------------------------------- /deploy/example/namespaced/resources/semaphore-service-mirror-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "global": { 3 | "globalSvcLabelSelector": "mirror.semaphore.uw.io/global-service=true", 4 | "globalSvcRoutingStrategyLabel": "mirror.semaphore.uw.io/global-service-routing-strategy=local-first", 5 | "mirrorSvcLabelSelector": "uw.systems/mirror=true", 6 | "mirrorNamespace": "namespaced", 7 | "serviceSync": true, 8 | "endpointSliceSync": true 9 | }, 10 | "localCluster": { 11 | "name": "local", 12 | "zones": [ 13 | "europe-west2-a", 14 | "europe-west2-b", 15 | "europe-west2-c" 16 | ] 17 | }, 18 | "remoteClusters": [ 19 | { 20 | "name": "c1", 21 | "remoteAPIURL": "https://elb.master.k8s.exp-1.c1.uw.systems", 22 | "remoteCAURL": "https://kube-ca-cert.exp-1.c1.uw.systems", 23 | "remoteSATokenPath": "/etc/semaphore-service-mirror/tokens/c1/token", 24 | "servicePrefix": "c1" 25 | } 26 | ] 27 | } 28 | -------------------------------------------------------------------------------- /deploy/example/namespaced/secrets/remote-kube-token: -------------------------------------------------------------------------------- 1 | # kubectl --namespace=kube-system get secret semaphore-service-mirror-remote-token-xxxx -o json | jq -r .data.token | base64 -d 2 | token 3 | -------------------------------------------------------------------------------- /deploy/kustomize/cluster/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | resources: 4 | - remote-auth.yaml 5 | -------------------------------------------------------------------------------- /deploy/kustomize/cluster/remote-auth.yaml: -------------------------------------------------------------------------------- 1 | # Cluster role and serviceaccount to be used by remote instances of 2 | # semaphore-service-mirror in order to be able to mirror from this cluster 3 | kind: ClusterRole 4 | apiVersion: rbac.authorization.k8s.io/v1 5 | metadata: 6 | name: semaphore-service-mirror-remote 7 | rules: 8 | - apiGroups: [""] 9 | resources: 10 | - services 11 | - endpoints 12 | verbs: 13 | - get 14 | - list 15 | - watch 16 | - apiGroups: ["discovery.k8s.io"] 17 | resources: 18 | - endpointslices 19 | verbs: 20 | - get 21 | - list 22 | - watch 23 | --- 24 | kind: ClusterRoleBinding 25 | apiVersion: rbac.authorization.k8s.io/v1 26 | metadata: 27 | name: semaphore-service-mirror-remote 28 | subjects: 29 | - kind: ServiceAccount 30 | name: semaphore-service-mirror-remote 31 | namespace: kube-system 32 | roleRef: 33 | kind: ClusterRole 34 | name: semaphore-service-mirror-remote 35 | apiGroup: rbac.authorization.k8s.io 36 | --- 37 | apiVersion: v1 38 | kind: ServiceAccount 39 | metadata: 40 | name: semaphore-service-mirror-remote 41 | namespace: kube-system 42 | --- 43 | apiVersion: v1 44 | kind: Secret 45 | metadata: 46 | name: semaphore-service-mirror-remote-token 47 | annotations: 48 | kubernetes.io/service-account.name: semaphore-service-mirror-remote 49 | type: kubernetes.io/service-account-token 50 | -------------------------------------------------------------------------------- /deploy/kustomize/namespaced/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: semaphore-service-mirror 5 | labels: 6 | app: semaphore-service-mirror 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: semaphore-service-mirror 12 | template: 13 | metadata: 14 | labels: 15 | app: semaphore-service-mirror 16 | annotations: 17 | prometheus.io/scrape: "true" 18 | prometheus.io/path: /metrics 19 | prometheus.io/port: "8080" 20 | spec: 21 | serviceAccountName: semaphore-service-mirror 22 | containers: 23 | - name: semaphore-service-mirror 24 | image: semaphore-service-mirror 25 | env: 26 | - name: SSM_CONFIG 27 | value: "/etc/semaphore-service-mirror/config.json" 28 | volumeMounts: 29 | - name: config 30 | mountPath: /etc/semaphore-service-mirror/ 31 | ports: 32 | - name: http 33 | containerPort: 8080 34 | livenessProbe: 35 | httpGet: 36 | path: /healthz 37 | port: http 38 | periodSeconds: 10 39 | failureThreshold: 6 40 | initialDelaySeconds: 30 41 | successThreshold: 1 42 | timeoutSeconds: 1 43 | volumes: 44 | - name: config 45 | configMap: 46 | name: semaphore-service-mirror-config 47 | -------------------------------------------------------------------------------- /deploy/kustomize/namespaced/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | resources: 4 | - deployment.yaml 5 | - rbac.yaml 6 | images: 7 | - name: semaphore-service-mirror 8 | newName: quay.io/utilitywarehouse/semaphore-service-mirror 9 | newTag: master 10 | -------------------------------------------------------------------------------- /deploy/kustomize/namespaced/rbac.yaml: -------------------------------------------------------------------------------- 1 | kind: Role 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: semaphore-service-mirror 5 | rules: 6 | - apiGroups: [""] 7 | resources: 8 | - services 9 | - endpoints 10 | verbs: 11 | - get 12 | - list 13 | - watch 14 | - create 15 | - update 16 | - delete 17 | - apiGroups: ["discovery.k8s.io"] 18 | resources: 19 | - endpointslices 20 | verbs: 21 | - get 22 | - list 23 | - watch 24 | - create 25 | - update 26 | - delete 27 | --- 28 | kind: RoleBinding 29 | apiVersion: rbac.authorization.k8s.io/v1 30 | metadata: 31 | name: semaphore-service-mirror 32 | subjects: 33 | - kind: ServiceAccount 34 | name: semaphore-service-mirror 35 | roleRef: 36 | kind: Role 37 | name: semaphore-service-mirror 38 | apiGroup: rbac.authorization.k8s.io 39 | --- 40 | apiVersion: v1 41 | kind: ServiceAccount 42 | metadata: 43 | name: semaphore-service-mirror 44 | -------------------------------------------------------------------------------- /global_runner.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "time" 7 | 8 | v1 "k8s.io/api/core/v1" 9 | discoveryv1 "k8s.io/api/discovery/v1" 10 | "k8s.io/apimachinery/pkg/api/errors" 11 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 12 | "k8s.io/apimachinery/pkg/labels" 13 | "k8s.io/apimachinery/pkg/watch" 14 | "k8s.io/client-go/kubernetes" 15 | "k8s.io/client-go/tools/cache" 16 | 17 | "github.com/utilitywarehouse/semaphore-service-mirror/kube" 18 | "github.com/utilitywarehouse/semaphore-service-mirror/log" 19 | ) 20 | 21 | // GlobalRunner watches a cluster for global services and mirrors the found 22 | // configuration under a local namespace 23 | type GlobalRunner struct { 24 | ctx context.Context 25 | client kubernetes.Interface 26 | globalServiceStore *GlobalServiceStore 27 | serviceQueue *queue 28 | serviceWatcher *kube.ServiceWatcher 29 | endpointSliceQueue *queue 30 | endpointSliceWatcher *kube.EndpointSliceWatcher 31 | mirrorEndpointSliceWatcher *kube.EndpointSliceWatcher 32 | name string 33 | namespace string 34 | labelselector string 35 | sync bool 36 | syncMirrorLabels map[string]string // Labels used to watch mirrore endpointslices and delete stale objects on startup 37 | initialised bool // Flag to turn on after the successful initialisation of the runner. 38 | local bool // Flag to identify if the runner is running against a local or remote cluster 39 | routingStrategyLabel labels.Selector // Label to identify services that want to utilise topology hints 40 | } 41 | 42 | func newGlobalRunner(client, watchClient kubernetes.Interface, name, namespace, labelselector string, resyncPeriod time.Duration, gst *GlobalServiceStore, local bool, rsl labels.Selector, sync bool) *GlobalRunner { 43 | mirrorLabels := map[string]string{ 44 | "mirrored-endpoint-slice": "true", 45 | "mirror-endpointslice-sync-name": name, 46 | } 47 | runner := &GlobalRunner{ 48 | ctx: context.Background(), 49 | client: client, 50 | name: name, 51 | namespace: namespace, 52 | globalServiceStore: gst, 53 | initialised: false, 54 | local: local, 55 | routingStrategyLabel: rsl, 56 | sync: sync, 57 | syncMirrorLabels: mirrorLabels, 58 | } 59 | runner.serviceQueue = newQueue(fmt.Sprintf("%s-global-service", name), runner.reconcileGlobalService) 60 | runner.endpointSliceQueue = newQueue(fmt.Sprintf("%s-endpointslice", name), runner.reconcileEndpointSlice) 61 | runnerName := fmt.Sprintf("global-%s", name) 62 | 63 | // Create and initialize a service watcher 64 | serviceWatcher := kube.NewServiceWatcher( 65 | fmt.Sprintf("%s-serviceWatcher", name), 66 | watchClient, 67 | resyncPeriod, 68 | runner.ServiceEventHandler, 69 | labelselector, 70 | metav1.NamespaceAll, 71 | runnerName, 72 | ) 73 | runner.serviceWatcher = serviceWatcher 74 | runner.serviceWatcher.Init() 75 | 76 | // Create and initialize an endpointslice watcher 77 | endpointSliceWatcher := kube.NewEndpointSliceWatcher( 78 | fmt.Sprintf("%s-endpointSliceWatcher", name), 79 | watchClient, 80 | resyncPeriod, 81 | runner.EndpointSliceEventHandler, 82 | labelselector, 83 | metav1.NamespaceAll, 84 | runnerName, 85 | ) 86 | runner.endpointSliceWatcher = endpointSliceWatcher 87 | runner.endpointSliceWatcher.Init() 88 | 89 | // Create and initialize an endpointslice watcher for mirrored endpointslices 90 | mirrorEndpointSliceWatcher := kube.NewEndpointSliceWatcher( 91 | fmt.Sprintf("%s-mirrorEndpointSliceWatcher", name), 92 | watchClient, 93 | resyncPeriod, 94 | nil, 95 | labels.Set(mirrorLabels).String(), 96 | namespace, 97 | runnerName, 98 | ) 99 | runner.mirrorEndpointSliceWatcher = mirrorEndpointSliceWatcher 100 | runner.mirrorEndpointSliceWatcher.Init() 101 | 102 | return runner 103 | } 104 | 105 | // Run starts the watchers and queues of the runner 106 | func (gr *GlobalRunner) Run() error { 107 | go gr.serviceWatcher.Run() 108 | // At this point the runner should be considered initialised and live. 109 | gr.initialised = true 110 | stopCh := make(chan struct{}) 111 | if ok := cache.WaitForNamedCacheSync("serviceWatcher", stopCh, gr.serviceWatcher.HasSynced); !ok { 112 | return fmt.Errorf("failed to wait for service caches to sync") 113 | } 114 | 115 | go gr.endpointSliceWatcher.Run() 116 | go gr.mirrorEndpointSliceWatcher.Run() 117 | // We need to wait fot endpoinslices watchers to sync before we sync 118 | if ok := cache.WaitForNamedCacheSync(fmt.Sprintf("gl-%s-endpointSliceWatcher", gr.name), stopCh, gr.endpointSliceWatcher.HasSynced); !ok { 119 | return fmt.Errorf("failed to wait for endpintslices caches to sync") 120 | } 121 | if ok := cache.WaitForNamedCacheSync(fmt.Sprintf("mirror-%s-endpointSliceWatcher", gr.name), stopCh, gr.mirrorEndpointSliceWatcher.HasSynced); !ok { 122 | return fmt.Errorf("failed to wait for mirror endpintslices caches to sync") 123 | } 124 | // After endpointslice store syncs, perform a sync to delete stale mirrors 125 | if gr.sync { 126 | log.Logger.Info("Syncing endpointslices", "runner", gr.name) 127 | if err := gr.EndpointSliceSync(); err != nil { 128 | log.Logger.Warn( 129 | "Error syncing endpointslices, skipping..", 130 | "err", err, 131 | "runner", gr.name, 132 | ) 133 | } 134 | } 135 | 136 | go gr.serviceQueue.Run() 137 | go gr.endpointSliceQueue.Run() 138 | 139 | return nil 140 | } 141 | 142 | // Stop stops watchers and runners 143 | func (gr *GlobalRunner) Stop() { 144 | gr.serviceQueue.Stop() 145 | gr.serviceWatcher.Stop() 146 | gr.endpointSliceQueue.Stop() 147 | gr.endpointSliceWatcher.Stop() 148 | } 149 | 150 | // Initialised returns true when the runner is successfully initialised 151 | func (gr *GlobalRunner) Initialised() bool { 152 | return gr.initialised 153 | } 154 | 155 | func (gr *GlobalRunner) reconcileGlobalService(name, namespace string) error { 156 | globalSvcName := generateGlobalServiceName(name, namespace) 157 | // Get the remote service 158 | log.Logger.Info("getting remote service", "namespace", namespace, "name", name, "runner", gr.name) 159 | remoteSvc, err := gr.getRemoteService(name, namespace) 160 | if errors.IsNotFound(err) { 161 | // If the remote service doesn't exist delete the cluster for 162 | // the service in the globalServiceStore 163 | log.Logger.Debug("deleting from global store", "namespace", namespace, "name", name, "runner", gr.name) 164 | gsvc := gr.globalServiceStore.DeleteClusterServiceTarget(name, namespace, gr.name) 165 | // If the returned global service is nil, then we should try to 166 | // delete the local service. If the service is already deleted 167 | // continue 168 | if gsvc == nil { 169 | log.Logger.Info("global service not found, deleting local service", "namespace", gr.namespace, "name", globalSvcName, "runner", gr.name) 170 | if err := kube.DeleteService(gr.ctx, gr.client, globalSvcName, gr.namespace); err != nil && !errors.IsNotFound(err) { 171 | return fmt.Errorf("deleting service %s/%s: %v", gr.namespace, globalSvcName, err) 172 | } 173 | return nil // return on successful service deletion, nothing else to do here. 174 | } 175 | } else if err != nil { 176 | return fmt.Errorf("getting remote service: %v", err) 177 | } 178 | // If the remote service wasn't deleted, try to add it to the store 179 | if remoteSvc != nil { 180 | setServiceTopologyHints := matchSelector(gr.routingStrategyLabel, remoteSvc) 181 | _, err := gr.globalServiceStore.AddOrUpdateClusterServiceTarget(remoteSvc, gr.name, setServiceTopologyHints) 182 | if err != nil { 183 | return fmt.Errorf("failed to create/update service: %v", err) 184 | } 185 | } 186 | gsvc, err := gr.globalServiceStore.Get(name, namespace) 187 | if err != nil { 188 | return fmt.Errorf("finding global service in the store: %v", err) 189 | } 190 | log.Logger.Debug("global service found", "name", gsvc.name, "runner", gr.name) 191 | // If the global service doesn't exist, create it. Otherwise, update it. 192 | globalSvc, err := kube.GetService(gr.ctx, gr.client, globalSvcName, gr.namespace) 193 | if errors.IsNotFound(err) { 194 | log.Logger.Info("local service not found, creating service", "namespace", gr.namespace, "name", gsvc.name, "runner", gr.name) 195 | if _, err := kube.CreateService(gr.ctx, gr.client, globalSvcName, gr.namespace, gsvc.labels, gsvc.annotations, remoteSvc.Spec.Ports, gsvc.headless); err != nil { 196 | return fmt.Errorf("creating service %s/%s: %v", gr.namespace, globalSvcName, err) 197 | } 198 | } else if err != nil { 199 | return fmt.Errorf("getting service %s/%s: %v", gr.namespace, globalSvcName, err) 200 | } else { 201 | log.Logger.Info("local service found, updating service", "namespace", gr.namespace, "name", gsvc.name, "runner", gr.name) 202 | if _, err := gr.updateGlobalService(globalSvc, gsvc.ports, gsvc.annotations); err != nil { 203 | return fmt.Errorf("updating service %s/%s: %v", gr.namespace, globalSvcName, err) 204 | } 205 | } 206 | return nil 207 | } 208 | 209 | func (gr *GlobalRunner) getRemoteService(name, namespace string) (*v1.Service, error) { 210 | return gr.serviceWatcher.Get(name, namespace) 211 | } 212 | 213 | // updateGlobalService is UpdateService that will also update the annotations to reflect clusters 214 | func (gr *GlobalRunner) updateGlobalService(service *v1.Service, ports []v1.ServicePort, annotations map[string]string) (*v1.Service, error) { 215 | service.ObjectMeta.Annotations = annotations 216 | return kube.UpdateService(gr.ctx, gr.client, service, ports) 217 | } 218 | 219 | // ServiceEventHandler adds Service resource events to the respective queue 220 | func (gr *GlobalRunner) ServiceEventHandler(eventType watch.EventType, old *v1.Service, new *v1.Service) { 221 | switch eventType { 222 | case watch.Added: 223 | log.Logger.Debug("service added", "namespace", new.Namespace, "name", new.Name, "runner", gr.name) 224 | gr.serviceQueue.Add(new) 225 | case watch.Modified: 226 | log.Logger.Debug("service modified", "namespace", new.Namespace, "name", new.Name, "runner", gr.name) 227 | gr.serviceQueue.Add(new) 228 | case watch.Deleted: 229 | log.Logger.Debug("service deleted", "namespace", old.Namespace, "name", old.Name, "runner", gr.name) 230 | gr.serviceQueue.Add(old) 231 | default: 232 | log.Logger.Info("Unknown service event received: %v", eventType, "runner", gr.name) 233 | } 234 | } 235 | 236 | func (gr *GlobalRunner) getRemoteEndpointSlice(name, namespace string) (*discoveryv1.EndpointSlice, error) { 237 | return gr.endpointSliceWatcher.Get(name, namespace) 238 | } 239 | 240 | // EndpointSliceSync checks for stale mirrors (endpointslices) under the local 241 | // namespace and deletes them 242 | func (gr *GlobalRunner) EndpointSliceSync() error { 243 | storeEnpointSlices, err := gr.endpointSliceWatcher.List() 244 | if err != nil { 245 | return err 246 | } 247 | 248 | mirrorEndpointSliceList := []string{} 249 | for _, es := range storeEnpointSlices { 250 | mirrorEndpointSliceList = append( 251 | mirrorEndpointSliceList, 252 | generateGlobalEndpointSliceName(es.Name), 253 | ) 254 | } 255 | 256 | currEndpointSlices, err := gr.mirrorEndpointSliceWatcher.List() 257 | if err != nil { 258 | return err 259 | } 260 | 261 | for _, es := range currEndpointSlices { 262 | _, inSlice := inSlice(mirrorEndpointSliceList, es.Name) 263 | if !inSlice { 264 | log.Logger.Info( 265 | "Deleting old endpointslice", 266 | "service", es.Name, 267 | "runner", gr.name, 268 | ) 269 | if err := gr.deleteEndpointSlice(es.Name, es.Namespace); err != nil { 270 | log.Logger.Error( 271 | "Error clearing endpointslice", 272 | "endpointslice", es.Name, 273 | "err", err, 274 | "runner", gr.name, 275 | ) 276 | return err 277 | } 278 | } 279 | } 280 | return nil 281 | } 282 | 283 | func (gr *GlobalRunner) getEndpointSlice(name, namespace string) (*discoveryv1.EndpointSlice, error) { 284 | return gr.client.DiscoveryV1().EndpointSlices(namespace).Get( 285 | gr.ctx, 286 | name, 287 | metav1.GetOptions{}, 288 | ) 289 | } 290 | 291 | // kube-proxy needs all Endpoints to have hints in order to allow topology aware routing. 292 | func (gr *GlobalRunner) ensureEndpointSliceZones(endpoints []discoveryv1.Endpoint) []discoveryv1.Endpoint { 293 | var es []discoveryv1.Endpoint 294 | // For endpoints in remote clusters use a dummy zone and hint that will never be picker by kube-proxy 295 | if !gr.local { 296 | zone := "remote" 297 | for _, e := range endpoints { 298 | e.Zone = &zone 299 | e.Hints = &discoveryv1.EndpointHints{ 300 | ForZones: []discoveryv1.ForZone{ 301 | discoveryv1.ForZone{Name: "remote"}}, 302 | } 303 | es = append(es, e) 304 | } 305 | return es 306 | } 307 | // For local endpoints allow all zones as set in config 308 | for _, e := range endpoints { 309 | e.Hints = &discoveryv1.EndpointHints{ 310 | ForZones: DefaultLocalEndpointZones, 311 | } 312 | es = append(es, e) 313 | } 314 | return es 315 | } 316 | 317 | func (gr *GlobalRunner) createEndpointSlice(name, namespace, targetService string, at discoveryv1.AddressType, endpoints []discoveryv1.Endpoint, ports []discoveryv1.EndpointPort) (*discoveryv1.EndpointSlice, error) { 318 | return gr.client.DiscoveryV1().EndpointSlices(namespace).Create( 319 | gr.ctx, 320 | &discoveryv1.EndpointSlice{ 321 | ObjectMeta: metav1.ObjectMeta{ 322 | Name: name, 323 | Namespace: namespace, 324 | Labels: generateEndpointSliceLabels(gr.syncMirrorLabels, targetService), 325 | }, 326 | AddressType: at, 327 | Endpoints: gr.ensureEndpointSliceZones(endpoints), 328 | Ports: ports, 329 | }, 330 | metav1.CreateOptions{}, 331 | ) 332 | } 333 | 334 | func (gr *GlobalRunner) updateEndpointSlice(name, namespace, targetService string, at discoveryv1.AddressType, endpoints []discoveryv1.Endpoint, ports []discoveryv1.EndpointPort) (*discoveryv1.EndpointSlice, error) { 335 | return gr.client.DiscoveryV1().EndpointSlices(namespace).Update( 336 | gr.ctx, 337 | &discoveryv1.EndpointSlice{ 338 | ObjectMeta: metav1.ObjectMeta{ 339 | Name: name, 340 | Namespace: namespace, 341 | Labels: generateEndpointSliceLabels(gr.syncMirrorLabels, targetService), 342 | }, 343 | AddressType: at, 344 | Endpoints: gr.ensureEndpointSliceZones(endpoints), 345 | Ports: ports, 346 | }, 347 | metav1.UpdateOptions{}, 348 | ) 349 | } 350 | 351 | func (gr *GlobalRunner) deleteEndpointSlice(name, namespace string) error { 352 | return gr.client.DiscoveryV1().EndpointSlices(namespace).Delete( 353 | gr.ctx, 354 | name, 355 | metav1.DeleteOptions{}, 356 | ) 357 | } 358 | 359 | func (gr *GlobalRunner) reconcileEndpointSlice(name, namespace string) error { 360 | mirrorName := generateGlobalEndpointSliceName(name) 361 | // Get the remote endpointslice 362 | log.Logger.Info("getting remote endpointslice", "namespace", namespace, "name", name, "runner", gr.name) 363 | remoteEndpointSlice, err := gr.getRemoteEndpointSlice(name, namespace) 364 | if errors.IsNotFound(err) { 365 | log.Logger.Info("remote endpointslice not found, removing local mirror", "namespace", namespace, "name", name, "runner", gr.name) 366 | if err := gr.deleteEndpointSlice(mirrorName, gr.namespace); err != nil && !errors.IsNotFound(err) { 367 | return fmt.Errorf("deleting endpointslice %s/%s: %v", gr.namespace, mirrorName, err) 368 | } 369 | return nil 370 | } else if err != nil { 371 | return fmt.Errorf("getting remote endpointslice %s/%s: %v", namespace, name, err) 372 | 373 | } 374 | // Determine the local service to target 375 | targetSvc, ok := remoteEndpointSlice.Labels["kubernetes.io/service-name"] 376 | if !ok { 377 | return fmt.Errorf("remote endpointslice is missing kubernetes.io/service-name label") 378 | } 379 | targetGlobalService := generateGlobalServiceName(targetSvc, namespace) 380 | // If the mirror endpointslice doesn't exist, create it. Otherwise, update it. 381 | log.Logger.Info("getting local endpointslice", "namespace", gr.namespace, "name", mirrorName, "runner", gr.name) 382 | _, err = gr.getEndpointSlice(mirrorName, gr.namespace) 383 | if errors.IsNotFound(err) { 384 | log.Logger.Info("local endpointslice not found, creating", "namespace", gr.namespace, "name", mirrorName, "runner", gr.name) 385 | if _, err := gr.createEndpointSlice(mirrorName, gr.namespace, targetGlobalService, remoteEndpointSlice.AddressType, remoteEndpointSlice.Endpoints, remoteEndpointSlice.Ports); err != nil { 386 | return fmt.Errorf("creating endpointslice %s/%s: %v", gr.namespace, mirrorName, err) 387 | 388 | } 389 | } else if err != nil { 390 | return fmt.Errorf("getting endpointslice %s/%s: %v", gr.namespace, mirrorName, err) 391 | } else { 392 | log.Logger.Info("local endpointslice found, updating", "namespace", gr.namespace, "name", mirrorName, "runner", gr.name) 393 | if _, err := gr.updateEndpointSlice(mirrorName, gr.namespace, targetGlobalService, remoteEndpointSlice.AddressType, remoteEndpointSlice.Endpoints, remoteEndpointSlice.Ports); err != nil { 394 | return fmt.Errorf("updating endpointslice %s/%s: %v", gr.namespace, mirrorName, err) 395 | } 396 | } 397 | return nil 398 | } 399 | 400 | // EndpointSliceEventHandler adds EndpointSlice resource events to the respective queue 401 | func (gr *GlobalRunner) EndpointSliceEventHandler(eventType watch.EventType, old *discoveryv1.EndpointSlice, new *discoveryv1.EndpointSlice) { 402 | switch eventType { 403 | case watch.Added: 404 | log.Logger.Debug("endpoints added", "namespace", new.Namespace, "name", new.Name, "runner", gr.name) 405 | gr.endpointSliceQueue.Add(new) 406 | case watch.Modified: 407 | log.Logger.Debug("endpoints modified", "namespace", new.Namespace, "name", new.Name, "runner", gr.name) 408 | gr.endpointSliceQueue.Add(new) 409 | case watch.Deleted: 410 | log.Logger.Debug("endpoints deleted", "namespace", old.Namespace, "name", old.Name, "runner", gr.name) 411 | gr.endpointSliceQueue.Add(old) 412 | default: 413 | log.Logger.Info("Unknown endpoints event received: %v", eventType, "runner", gr.name) 414 | } 415 | } 416 | -------------------------------------------------------------------------------- /global_runner_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "testing" 7 | "time" 8 | 9 | "github.com/stretchr/testify/assert" 10 | "github.com/utilitywarehouse/semaphore-service-mirror/log" 11 | v1 "k8s.io/api/core/v1" 12 | discoveryv1 "k8s.io/api/discovery/v1" 13 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 14 | "k8s.io/apimachinery/pkg/labels" 15 | "k8s.io/client-go/kubernetes/fake" 16 | "k8s.io/client-go/tools/cache" 17 | ) 18 | 19 | var ( 20 | testGlobalLabels = map[string]string{"global-svc": "true"} 21 | testGlobalSvcLabel = map[string]string{"mirror.semaphore.uw.io/global-service": "true"} 22 | testGlobalSvcLabelString = "mirror.semaphore.uw.io/global-service=true" 23 | testGlobalRoutingStrategyLabel = "mirror.semaphore.uw.io/global-service-routing-strategy=local-first" 24 | testServiceSelector = map[string]string{"selector": "x"} 25 | ) 26 | 27 | func TestAddSingleRemoteGlobalService(t *testing.T) { 28 | ctx, cancel := context.WithCancel(context.Background()) 29 | defer cancel() 30 | 31 | log.InitLogger("semaphore-service-mirror-test", "debug") 32 | fakeClient := fake.NewSimpleClientset() 33 | 34 | testPorts := []v1.ServicePort{v1.ServicePort{Port: 1}} 35 | testSvc := &v1.Service{ 36 | ObjectMeta: metav1.ObjectMeta{ 37 | Name: "test-svc", 38 | Namespace: "remote-ns", 39 | Labels: testGlobalSvcLabel, 40 | }, 41 | Spec: v1.ServiceSpec{ 42 | Ports: testPorts, 43 | Selector: testServiceSelector, 44 | ClusterIP: "1.1.1.1", 45 | }, 46 | } 47 | fakeWatchClient := fake.NewSimpleClientset(testSvc) 48 | testGlobalStore := newGlobalServiceStore() 49 | 50 | selector, _ := labels.Parse(testGlobalRoutingStrategyLabel) 51 | testRunner := newGlobalRunner( 52 | fakeClient, 53 | fakeWatchClient, 54 | "test-runner", 55 | "local-ns", 56 | testGlobalSvcLabelString, 57 | 60*time.Minute, 58 | testGlobalStore, 59 | false, 60 | selector, 61 | false, 62 | ) 63 | go testRunner.serviceWatcher.Run() 64 | cache.WaitForNamedCacheSync("serviceWatcher", ctx.Done(), testRunner.serviceWatcher.HasSynced) 65 | 66 | // Test create cluster ip service - should create 1 service with no 67 | // cluster ip specified, the same ports and nil selector 68 | testRunner.reconcileGlobalService("test-svc", "remote-ns") 69 | 70 | expectedSpec := TestSpec{ 71 | Ports: testPorts, 72 | ClusterIP: "", 73 | Selector: nil, 74 | } 75 | expectedSvcs := []TestSvc{ 76 | TestSvc{ 77 | Name: fmt.Sprintf("gl-remote-ns-%s-test-svc", Separator), 78 | Namespace: "local-ns", 79 | Spec: expectedSpec, 80 | Labels: testGlobalLabels, 81 | Annotations: map[string]string{ 82 | globalSvcClustersAnno: "test-runner", 83 | }, 84 | }, 85 | } 86 | assertExpectedGlobalServices(ctx, t, expectedSvcs, fakeClient) 87 | } 88 | 89 | func TestAddSingleRemoteGlobalHeadlessService(t *testing.T) { 90 | ctx, cancel := context.WithCancel(context.Background()) 91 | defer cancel() 92 | 93 | log.InitLogger("semaphore-service-mirror-test", "debug") 94 | fakeClient := fake.NewSimpleClientset() 95 | 96 | testPorts := []v1.ServicePort{v1.ServicePort{Port: 1}} 97 | testSvc := &v1.Service{ 98 | ObjectMeta: metav1.ObjectMeta{ 99 | Name: "test-svc", 100 | Namespace: "remote-ns", 101 | Labels: testGlobalSvcLabel, 102 | }, 103 | Spec: v1.ServiceSpec{ 104 | Ports: testPorts, 105 | Selector: testServiceSelector, 106 | ClusterIP: "None", 107 | }, 108 | } 109 | fakeWatchClient := fake.NewSimpleClientset(testSvc) 110 | testGlobalStore := newGlobalServiceStore() 111 | 112 | selector, _ := labels.Parse(testGlobalRoutingStrategyLabel) 113 | testRunner := newGlobalRunner( 114 | fakeClient, 115 | fakeWatchClient, 116 | "test-runner", 117 | "local-ns", 118 | testGlobalSvcLabelString, 119 | 60*time.Minute, 120 | testGlobalStore, 121 | false, 122 | selector, 123 | false, 124 | ) 125 | go testRunner.serviceWatcher.Run() 126 | cache.WaitForNamedCacheSync("serviceWatcher", ctx.Done(), testRunner.serviceWatcher.HasSynced) 127 | 128 | // Test create headless service - should create 1 service with "None" 129 | // cluster ip, the same ports and nil selector 130 | testRunner.reconcileGlobalService("test-svc", "remote-ns") 131 | 132 | expectedSpec := TestSpec{ 133 | Ports: testPorts, 134 | ClusterIP: "None", 135 | Selector: nil, 136 | } 137 | expectedSvcs := []TestSvc{ 138 | TestSvc{ 139 | Name: fmt.Sprintf("gl-remote-ns-%s-test-svc", Separator), 140 | Namespace: "local-ns", 141 | Spec: expectedSpec, 142 | Labels: testGlobalLabels, 143 | Annotations: map[string]string{ 144 | globalSvcClustersAnno: "test-runner", 145 | }, 146 | }, 147 | } 148 | assertExpectedGlobalServices(ctx, t, expectedSvcs, fakeClient) 149 | } 150 | 151 | func TestModifySingleRemoteGlobalService(t *testing.T) { 152 | ctx, cancel := context.WithCancel(context.Background()) 153 | defer cancel() 154 | log.InitLogger("semaphore-service-mirror-test", "debug") 155 | existingPorts := []v1.ServicePort{v1.ServicePort{Port: 1}} 156 | existingAnnotations := map[string]string{ 157 | globalSvcClustersAnno: "test-runner", 158 | kubeSeviceTopologyAwareHintsAnno: kubeSeviceTopologyAwareHintsAnnoVal, 159 | } 160 | existingSvc := &v1.Service{ 161 | ObjectMeta: metav1.ObjectMeta{ 162 | Name: fmt.Sprintf("gl-remote-ns-%s-test-svc", Separator), 163 | Namespace: "local-ns", 164 | Labels: testGlobalLabels, 165 | Annotations: existingAnnotations, 166 | }, 167 | Spec: v1.ServiceSpec{ 168 | Ports: existingPorts, 169 | Selector: testServiceSelector, 170 | ClusterIP: "1.1.1.1", 171 | }, 172 | } 173 | fakeClient := fake.NewSimpleClientset(existingSvc) 174 | existingGlobalStore := newGlobalServiceStore() 175 | existingGlobalStore.store[fmt.Sprintf("gl-remote-ns-%s-test-svc", Separator)] = &GlobalService{ 176 | name: "test-svc", 177 | namespace: "remote-ns", 178 | ports: existingPorts, 179 | labels: testGlobalLabels, 180 | annotations: existingAnnotations, 181 | clusters: []string{"test-runner"}, 182 | } 183 | 184 | testPorts := []v1.ServicePort{v1.ServicePort{Port: 2}} 185 | testSvc := &v1.Service{ 186 | ObjectMeta: metav1.ObjectMeta{ 187 | Name: "test-svc", 188 | Namespace: "remote-ns", 189 | Labels: testGlobalSvcLabel, 190 | }, 191 | Spec: v1.ServiceSpec{ 192 | Ports: testPorts, 193 | Selector: testServiceSelector, 194 | ClusterIP: "1.1.1.1", 195 | }, 196 | } 197 | fakeWatchClient := fake.NewSimpleClientset(testSvc) 198 | 199 | selector, _ := labels.Parse(testGlobalRoutingStrategyLabel) 200 | testRunner := newGlobalRunner( 201 | fakeClient, 202 | fakeWatchClient, 203 | "test-runner", 204 | "local-ns", 205 | testGlobalSvcLabelString, 206 | 60*time.Minute, 207 | existingGlobalStore, 208 | false, 209 | selector, 210 | false, 211 | ) 212 | go testRunner.serviceWatcher.Run() 213 | cache.WaitForNamedCacheSync("serviceWatcher", ctx.Done(), testRunner.serviceWatcher.HasSynced) 214 | 215 | testRunner.reconcileGlobalService("test-svc", "remote-ns") 216 | // After reconciling we should see updated ports and drop the topology aware hints annotation 217 | expectedSpec := TestSpec{ 218 | Ports: testPorts, 219 | ClusterIP: "1.1.1.1", 220 | Selector: nil, 221 | } 222 | expectedSvcs := []TestSvc{ 223 | TestSvc{ 224 | Name: fmt.Sprintf("gl-remote-ns-%s-test-svc", Separator), 225 | Namespace: "local-ns", 226 | Spec: expectedSpec, 227 | Labels: testGlobalLabels, 228 | Annotations: map[string]string{ 229 | globalSvcClustersAnno: "test-runner", 230 | }, 231 | }, 232 | } 233 | assertExpectedGlobalServices(ctx, t, expectedSvcs, fakeClient) 234 | } 235 | 236 | func TestAddGlobalServiceMultipleClusters(t *testing.T) { 237 | ctx, cancel := context.WithCancel(context.Background()) 238 | defer cancel() 239 | 240 | log.InitLogger("semaphore-service-mirror-test", "debug") 241 | fakeClient := fake.NewSimpleClientset() 242 | 243 | testPorts := []v1.ServicePort{v1.ServicePort{Port: 1}} 244 | // Create a service with the same name and namespace in 2 clusters (A and B) 245 | testSvcA := &v1.Service{ 246 | ObjectMeta: metav1.ObjectMeta{ 247 | Name: "test-svc", 248 | Namespace: "remote-ns", 249 | Labels: testGlobalSvcLabel, 250 | }, 251 | Spec: v1.ServiceSpec{ 252 | Ports: testPorts, 253 | Selector: testServiceSelector, 254 | ClusterIP: "1.1.1.1", 255 | }, 256 | } 257 | testSvcB := &v1.Service{ 258 | ObjectMeta: metav1.ObjectMeta{ 259 | Name: "test-svc", 260 | Namespace: "remote-ns", 261 | Labels: testGlobalSvcLabel, 262 | }, 263 | Spec: v1.ServiceSpec{ 264 | Ports: testPorts, 265 | Selector: testServiceSelector, 266 | ClusterIP: "2.2.2.2", 267 | }, 268 | } 269 | fakeWatchClientA := fake.NewSimpleClientset(testSvcA) 270 | fakeWatchClientB := fake.NewSimpleClientset(testSvcB) 271 | testGlobalStore := newGlobalServiceStore() 272 | 273 | selector, _ := labels.Parse("mirror.semaphore.uw.io/test=true") 274 | testRunnerA := newGlobalRunner( 275 | fakeClient, 276 | fakeWatchClientA, 277 | "runnerA", 278 | "local-ns", 279 | testGlobalSvcLabelString, 280 | 60*time.Minute, 281 | testGlobalStore, 282 | false, 283 | selector, 284 | false, 285 | ) 286 | testRunnerB := newGlobalRunner( 287 | fakeClient, 288 | fakeWatchClientB, 289 | "runnerB", 290 | "local-ns", 291 | testGlobalSvcLabelString, 292 | 60*time.Minute, 293 | testGlobalStore, 294 | false, 295 | selector, 296 | false, 297 | ) 298 | 299 | go testRunnerA.serviceWatcher.Run() 300 | go testRunnerB.serviceWatcher.Run() 301 | cache.WaitForNamedCacheSync("serviceWatcher", ctx.Done(), testRunnerA.serviceWatcher.HasSynced) 302 | cache.WaitForNamedCacheSync("serviceWatcher", ctx.Done(), testRunnerB.serviceWatcher.HasSynced) 303 | 304 | expectedSpec := TestSpec{ 305 | Ports: testPorts, 306 | ClusterIP: "", 307 | Selector: nil, 308 | } 309 | expectedSvcs := []TestSvc{TestSvc{ 310 | Name: fmt.Sprintf("gl-remote-ns-%s-test-svc", Separator), 311 | Namespace: "local-ns", 312 | Spec: expectedSpec, 313 | Labels: testGlobalLabels, 314 | Annotations: map[string]string{ 315 | globalSvcClustersAnno: "runnerA", 316 | }, 317 | }} 318 | 319 | testRunnerA.reconcileGlobalService("test-svc", "remote-ns") 320 | assertExpectedGlobalServices(ctx, t, expectedSvcs, fakeClient) 321 | 322 | // Reconciling the service from cluster B should only edit the respective label 323 | testRunnerB.reconcileGlobalService("test-svc", "remote-ns") 324 | expectedSvcs[0].Annotations[globalSvcClustersAnno] = "runnerA,runnerB" 325 | assertExpectedGlobalServices(ctx, t, expectedSvcs, fakeClient) 326 | } 327 | 328 | func TestDeleteGlobalServiceMultipleClusters(t *testing.T) { 329 | ctx, cancel := context.WithCancel(context.Background()) 330 | defer cancel() 331 | 332 | log.InitLogger("semaphore-service-mirror-test", "debug") 333 | 334 | existingPorts := []v1.ServicePort{v1.ServicePort{Port: 1}} 335 | existingSvc := &v1.Service{ 336 | ObjectMeta: metav1.ObjectMeta{ 337 | Name: fmt.Sprintf("gl-remote-ns-%s-test-svc", Separator), 338 | Namespace: "local-ns", 339 | Labels: globalSvcLabels, 340 | Annotations: globalSvcAnnotations, 341 | }, 342 | Spec: v1.ServiceSpec{ 343 | Ports: existingPorts, 344 | ClusterIP: "", 345 | }, 346 | } 347 | existingSvc.Annotations[globalSvcClustersAnno] = "runnerA,runnerB" 348 | 349 | fakeClient := fake.NewSimpleClientset(existingSvc) 350 | testGlobalStore := newGlobalServiceStore() 351 | // Add the existing service into global store from both clusters 352 | testLabels := globalSvcLabels 353 | annotations := globalSvcAnnotations 354 | annotations[globalSvcClustersAnno] = "runnerA,runnerB" 355 | testGlobalStore.store[fmt.Sprintf("gl-remote-ns-%s-test-svc", Separator)] = &GlobalService{ 356 | name: "test-svc", 357 | namespace: "remote-ns", 358 | ports: existingPorts, 359 | labels: testLabels, 360 | annotations: annotations, 361 | clusters: []string{"runnerA", "runnerB"}, 362 | } 363 | 364 | // Remote fake clients won't have any services as we are deleting 365 | fakeWatchClientA := fake.NewSimpleClientset() 366 | fakeWatchClientB := fake.NewSimpleClientset() 367 | 368 | selector, _ := labels.Parse("mirror.semaphore.uw.io/test=true") 369 | testRunnerA := newGlobalRunner( 370 | fakeClient, 371 | fakeWatchClientA, 372 | "runnerA", 373 | "local-ns", 374 | testGlobalSvcLabelString, 375 | 60*time.Minute, 376 | testGlobalStore, 377 | false, 378 | selector, 379 | false, 380 | ) 381 | testRunnerB := newGlobalRunner( 382 | fakeClient, 383 | fakeWatchClientB, 384 | "runnerB", 385 | "local-ns", 386 | testGlobalSvcLabelString, 387 | 60*time.Minute, 388 | testGlobalStore, 389 | false, 390 | selector, 391 | false, 392 | ) 393 | 394 | go testRunnerA.serviceWatcher.Run() 395 | go testRunnerB.serviceWatcher.Run() 396 | cache.WaitForNamedCacheSync("serviceWatcher", ctx.Done(), testRunnerA.serviceWatcher.HasSynced) 397 | cache.WaitForNamedCacheSync("serviceWatcher", ctx.Done(), testRunnerB.serviceWatcher.HasSynced) 398 | 399 | expectedSpec := TestSpec{ 400 | Ports: existingPorts, 401 | ClusterIP: "", 402 | Selector: nil, 403 | } 404 | expectedSvcs := []TestSvc{TestSvc{ 405 | Name: fmt.Sprintf("gl-remote-ns-%s-test-svc", Separator), 406 | Namespace: "local-ns", 407 | Spec: expectedSpec, 408 | Labels: testGlobalLabels, 409 | Annotations: map[string]string{ 410 | globalSvcClustersAnno: "runnerA,runnerB", 411 | kubeSeviceTopologyAwareHintsAnno: kubeSeviceTopologyAwareHintsAnnoVal, 412 | }, 413 | }} 414 | assertExpectedGlobalServices(ctx, t, expectedSvcs, fakeClient) 415 | // Deleting the service from cluster A should only edit the respective label 416 | err := testRunnerA.reconcileGlobalService("test-svc", "remote-ns") 417 | assert.Equal(t, nil, err) 418 | expectedSvcs[0].Annotations[globalSvcClustersAnno] = "runnerB" 419 | assertExpectedGlobalServices(ctx, t, expectedSvcs, fakeClient) 420 | 421 | // Deleting the service from cluster B should delete the global service 422 | err = testRunnerB.reconcileGlobalService("test-svc", "remote-ns") 423 | assert.Equal(t, nil, err) 424 | assertExpectedServices(ctx, t, []TestSvc{}, fakeClient) 425 | } 426 | 427 | func TestEndpointSliceSync(t *testing.T) { 428 | log.InitLogger("semaphore-service-mirror-test", "debug") 429 | testMirrorLabels := map[string]string{ 430 | "mirrored-endpoint-slice": "true", 431 | "mirror-endpointslice-sync-name": "test-runner", 432 | } 433 | // EndpointSlice on the remote cluster 434 | testEndpointSlice := &discoveryv1.EndpointSlice{ 435 | ObjectMeta: metav1.ObjectMeta{ 436 | Name: "test-slice", 437 | Namespace: "remote-ns", 438 | Labels: testGlobalSvcLabel, 439 | }, 440 | } 441 | fakeWatchClient := fake.NewSimpleClientset(testEndpointSlice) 442 | 443 | // Create mirrored endpointslice 444 | mirroredEndpointSlice := &discoveryv1.EndpointSlice{ 445 | ObjectMeta: metav1.ObjectMeta{ 446 | Name: generateGlobalEndpointSliceName("test-slice"), 447 | Namespace: "local-ns", 448 | Labels: generateEndpointSliceLabels(testMirrorLabels, "test-svc"), 449 | }, 450 | } 451 | // Create stale endpointslice 452 | staleEndpointSlice := &v1.Service{ 453 | ObjectMeta: metav1.ObjectMeta{ 454 | Name: generateGlobalEndpointSliceName("old-slice"), 455 | Namespace: "local-ns", 456 | Labels: generateEndpointSliceLabels(testMirrorLabels, "test-svc"), 457 | }, 458 | } 459 | // feed them to the fake client 460 | fakeClient := fake.NewSimpleClientset(mirroredEndpointSlice, staleEndpointSlice) 461 | 462 | ctx, cancel := context.WithCancel(context.Background()) 463 | defer cancel() 464 | 465 | testGlobalStore := newGlobalServiceStore() 466 | selector, _ := labels.Parse(testGlobalRoutingStrategyLabel) 467 | testRunner := newGlobalRunner( 468 | fakeClient, 469 | fakeWatchClient, 470 | "test-runner", 471 | "local-ns", 472 | testGlobalSvcLabelString, 473 | 60*time.Minute, 474 | testGlobalStore, 475 | false, 476 | selector, 477 | true, 478 | ) 479 | go testRunner.endpointSliceWatcher.Run() 480 | go testRunner.mirrorEndpointSliceWatcher.Run() 481 | cache.WaitForNamedCacheSync(fmt.Sprintf("gl-%s-endpointSliceWatcher", testRunner.name), ctx.Done(), testRunner.endpointSliceWatcher.HasSynced) 482 | cache.WaitForNamedCacheSync(fmt.Sprintf("mirror-%s-endpointSliceWatcher", testRunner.name), ctx.Done(), testRunner.mirrorEndpointSliceWatcher.HasSynced) 483 | 484 | // EndpointSliceSync will trigger a sync. Verify that old endpointslice is deleted 485 | if err := testRunner.EndpointSliceSync(); err != nil { 486 | t.Fatal(err) 487 | } 488 | endpointslices, err := fakeClient.DiscoveryV1().EndpointSlices("").List( 489 | ctx, 490 | metav1.ListOptions{}, 491 | ) 492 | if err != nil { 493 | t.Fatal(err) 494 | } 495 | assert.Equal(t, 1, len(endpointslices.Items)) 496 | assert.Equal( 497 | t, 498 | generateGlobalEndpointSliceName("test-slice"), 499 | endpointslices.Items[0].Name, 500 | ) 501 | } 502 | -------------------------------------------------------------------------------- /globalservice.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | 7 | v1 "k8s.io/api/core/v1" 8 | ) 9 | 10 | // GlobalService represents a global multicluster service 11 | type GlobalService struct { 12 | name string 13 | namespace string 14 | ports []v1.ServicePort 15 | headless bool 16 | labels map[string]string 17 | annotations map[string]string 18 | clusters []string 19 | } 20 | 21 | const ( 22 | kubeSeviceTopologyAwareHintsAnno = "service.kubernetes.io/topology-aware-hints" 23 | kubeSeviceTopologyAwareHintsAnnoVal = "auto" 24 | ) 25 | 26 | var ( 27 | globalSvcLabels = map[string]string{"global-svc": "true"} 28 | globalSvcAnnotations = map[string]string{kubeSeviceTopologyAwareHintsAnno: kubeSeviceTopologyAwareHintsAnnoVal} // Kube annotation to enable topolgy aware routing 29 | globalSvcClustersAnno = "global-svc-clusters" 30 | ) 31 | 32 | // GlobalServiceStore keeps a list of global services 33 | type GlobalServiceStore struct { 34 | store map[string]*GlobalService 35 | } 36 | 37 | func newGlobalServiceStore() *GlobalServiceStore { 38 | return &GlobalServiceStore{ 39 | store: make(map[string]*GlobalService), 40 | } 41 | } 42 | 43 | // AddOrUpdateClusterServiceTarget will append a cluster to the GlobalService 44 | // clusters list. In case there is no global service in the store, it creates 45 | // the GlobalService. 46 | func (gss *GlobalServiceStore) AddOrUpdateClusterServiceTarget(svc *v1.Service, cluster string, topologyAwareHints bool) (*GlobalService, error) { 47 | gsvcName := generateGlobalServiceName(svc.Name, svc.Namespace) 48 | gsvcAnnotations := map[string]string{} 49 | if topologyAwareHints { 50 | gsvcAnnotations[kubeSeviceTopologyAwareHintsAnno] = kubeSeviceTopologyAwareHintsAnnoVal 51 | } 52 | gsvc, ok := gss.store[gsvcName] 53 | // Add new service in the store if it doesn't exist 54 | if !ok { 55 | gsvc = &GlobalService{ 56 | name: svc.Name, 57 | namespace: svc.Namespace, 58 | ports: svc.Spec.Ports, 59 | headless: isHeadless(svc), 60 | labels: globalSvcLabels, 61 | annotations: gsvcAnnotations, 62 | clusters: []string{cluster}, 63 | } 64 | gsvc.annotations[globalSvcClustersAnno] = fmt.Sprintf("%s", cluster) 65 | gss.store[gsvcName] = gsvc 66 | return gsvc, nil 67 | } 68 | // If service exists, check and update global service 69 | if gsvc.headless != isHeadless(svc) { 70 | return nil, fmt.Errorf("Mismatch between existing headless service and requested") 71 | } 72 | if _, found := inSlice(gsvc.clusters, cluster); !found { 73 | gsvc.clusters = append(gsvc.clusters, cluster) 74 | } 75 | gsvcAnnotations[globalSvcClustersAnno] = strings.Join(gsvc.clusters, ",") 76 | gsvc.annotations = gsvcAnnotations 77 | gsvc.ports = svc.Spec.Ports 78 | return gsvc, nil 79 | } 80 | 81 | // DeleteClusterServiceTarget removes a cluster from the GlobalService's 82 | // clusters list. If the list is empty it deletes the GlobalService. Returns a 83 | // pointer to a GlobalService or nil if completely deleted 84 | func (gss *GlobalServiceStore) DeleteClusterServiceTarget(name, namespace, cluster string) *GlobalService { 85 | gsvcName := generateGlobalServiceName(name, namespace) 86 | gsvc, ok := gss.store[gsvcName] 87 | if !ok { 88 | return nil 89 | } 90 | if i, found := inSlice(gsvc.clusters, cluster); found { 91 | gsvc.clusters = removeFromSlice(gsvc.clusters, i) 92 | } 93 | gss.store[gsvcName] = gsvc 94 | if len(gsvc.clusters) == 0 { 95 | delete(gss.store, gsvcName) 96 | return nil 97 | } 98 | gsvc.annotations[globalSvcClustersAnno] = strings.Join(gsvc.clusters, ",") 99 | return gsvc 100 | } 101 | 102 | // Get returns a service from the store or errors 103 | func (gss *GlobalServiceStore) Get(name, namespace string) (*GlobalService, error) { 104 | gsvcName := generateGlobalServiceName(name, namespace) 105 | gsvc, ok := gss.store[gsvcName] 106 | if !ok { 107 | return nil, fmt.Errorf("not found") 108 | } 109 | return gsvc, nil 110 | } 111 | 112 | // Len returns the length of the list of services in store 113 | func (gss *GlobalServiceStore) Len() int { 114 | return len(gss.store) 115 | } 116 | -------------------------------------------------------------------------------- /globalservice_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | v1 "k8s.io/api/core/v1" 9 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 10 | ) 11 | 12 | type testService struct { 13 | cluster string 14 | name string 15 | namespace string 16 | clusterIP string 17 | ports []int32 18 | } 19 | 20 | func createTestService(name, namespace, clusterIP string, ports []int32) *v1.Service { 21 | svcPorts := []v1.ServicePort{} 22 | for _, port := range ports { 23 | svcPorts = append(svcPorts, v1.ServicePort{Port: port}) 24 | } 25 | return &v1.Service{ 26 | ObjectMeta: metav1.ObjectMeta{ 27 | Name: name, 28 | Namespace: namespace, 29 | }, 30 | Spec: v1.ServiceSpec{ 31 | Ports: svcPorts, 32 | Selector: map[string]string{"selector": "x"}, 33 | ClusterIP: clusterIP, 34 | }, 35 | } 36 | } 37 | 38 | func createTestStore(t *testing.T, services []testService, topologyAwareHints bool) *GlobalServiceStore { 39 | store := newGlobalServiceStore() 40 | for _, s := range services { 41 | svc := createTestService(s.name, s.namespace, s.clusterIP, s.ports) 42 | _, err := store.AddOrUpdateClusterServiceTarget(svc, s.cluster, topologyAwareHints) 43 | assert.Equal(t, nil, err) 44 | } 45 | return store 46 | } 47 | 48 | func TestAddOrUpdateClusterServiceTarget_AddSingleServiceTarget(t *testing.T) { 49 | store := createTestStore(t, []testService{ 50 | testService{cluster: "cluster", name: "name", namespace: "namespace", clusterIP: "1.1.1.1", ports: []int32{80}}, 51 | }, false) 52 | assert.Equal(t, 1, store.Len()) 53 | gsvc, err := store.Get("name", "namespace") 54 | if err != nil { 55 | t.Fatal(err) 56 | } 57 | assert.Equal(t, []string{"cluster"}, gsvc.clusters) 58 | } 59 | 60 | func TestAddOrUpdateClusterServiceTarget_AddMultipleServiceTargets(t *testing.T) { 61 | store := createTestStore(t, []testService{ 62 | testService{cluster: "a", name: "name", namespace: "namespace", clusterIP: "1.1.1.1", ports: []int32{80}}, 63 | testService{cluster: "b", name: "name", namespace: "namespace", clusterIP: "2.2.2.2", ports: []int32{80}}, 64 | testService{cluster: "c", name: "name", namespace: "namespace", clusterIP: "3.3.3.3", ports: []int32{80}}, 65 | }, false) 66 | assert.Equal(t, 1, store.Len()) 67 | gsvc, err := store.Get("name", "namespace") 68 | if err != nil { 69 | t.Fatal(err) 70 | } 71 | assert.Equal(t, []string{"a", "b", "c"}, gsvc.clusters) 72 | } 73 | 74 | func TestAddOrUpdateClusterServiceTarget_AddMultipleServices(t *testing.T) { 75 | store := createTestStore(t, []testService{ 76 | testService{cluster: "a", name: "a", namespace: "a", clusterIP: "1.1.1.1", ports: []int32{80}}, 77 | testService{cluster: "b", name: "b", namespace: "b", clusterIP: "2.2.2.2", ports: []int32{80}}, 78 | }, false) 79 | assert.Equal(t, 2, store.Len()) 80 | } 81 | 82 | func TestAddOrUpdateClusterServiceTarget_HeadlessMisMatch(t *testing.T) { 83 | store := newGlobalServiceStore() 84 | svcA := createTestService("name", "namespace", "1.1.1.1", []int32{80}) 85 | clusterA := "a" 86 | _, err := store.AddOrUpdateClusterServiceTarget(svcA, clusterA, false) 87 | assert.Equal(t, nil, err) 88 | svcB := createTestService("name", "namespace", "None", []int32{80}) 89 | clusterB := "b" 90 | _, err = store.AddOrUpdateClusterServiceTarget(svcB, clusterB, false) 91 | assert.Equal(t, fmt.Errorf("Mismatch between existing headless service and requested"), err) 92 | } 93 | 94 | func TestAddOrUpdateClusterServiceTarget_UpdateFungiblePorts(t *testing.T) { 95 | store := createTestStore(t, []testService{ 96 | testService{cluster: "a", name: "name", namespace: "namespace", clusterIP: "1.1.1.1", ports: []int32{80}}, 97 | }, false) 98 | svcB := createTestService("name", "namespace", "2.2.2.2", []int32{8080}) 99 | clusterB := "b" 100 | _, err := store.AddOrUpdateClusterServiceTarget(svcB, clusterB, false) 101 | if err != nil { 102 | t.Fatal(err) 103 | } 104 | assert.Equal(t, 1, store.Len()) 105 | gsvc, err := store.Get("name", "namespace") 106 | if err != nil { 107 | t.Fatal(err) 108 | } 109 | assert.Equal(t, []string{"a", "b"}, gsvc.clusters) 110 | assert.Equal(t, 1, len(gsvc.ports)) 111 | assert.Equal(t, int32(8080), gsvc.ports[0].Port) 112 | } 113 | 114 | func TestAddOrUpdateClusterServiceTarget_UpdateFungibleTopologyAnnotations(t *testing.T) { 115 | store := createTestStore(t, []testService{ 116 | testService{cluster: "a", name: "name", namespace: "namespace", clusterIP: "1.1.1.1", ports: []int32{80}}, 117 | }, true) 118 | gsvc, err := store.Get("name", "namespace") 119 | if err != nil { 120 | t.Fatal(err) 121 | } 122 | assert.Equal(t, 2, len(gsvc.annotations)) 123 | assert.Equal(t, kubeSeviceTopologyAwareHintsAnnoVal, gsvc.annotations[kubeSeviceTopologyAwareHintsAnno]) 124 | 125 | // Add a service with topolofy aware flag set to false 126 | svcB := createTestService("name", "namespace", "2.2.2.2", []int32{8080}) 127 | clusterB := "b" 128 | _, err = store.AddOrUpdateClusterServiceTarget(svcB, clusterB, false) 129 | if err != nil { 130 | t.Fatal(err) 131 | } 132 | // This should keep a single service in the store, but delete the 133 | // topology aware hints annotation 134 | assert.Equal(t, 1, store.Len()) 135 | gsvc, err = store.Get("name", "namespace") 136 | if err != nil { 137 | t.Fatal(err) 138 | } 139 | assert.Equal(t, []string{"a", "b"}, gsvc.clusters) 140 | assert.Equal(t, 1, len(gsvc.annotations)) 141 | } 142 | 143 | func TestDeleteClusterServiceTarget_DeleteServiceLastTarget(t *testing.T) { 144 | store := createTestStore(t, []testService{ 145 | testService{cluster: "cluster", name: "name", namespace: "namespace", clusterIP: "1.1.1.1", ports: []int32{80}}, 146 | }, false) 147 | assert.Equal(t, 1, store.Len()) 148 | svc := createTestService("name", "namespace", "1.1.1.1", []int32{80}) 149 | store.DeleteClusterServiceTarget(svc.Name, svc.Namespace, "cluster") 150 | assert.Equal(t, 0, store.Len()) 151 | } 152 | 153 | func TestDeleteClusterServiceTarget_DeleteServiceTarget(t *testing.T) { 154 | store := createTestStore(t, []testService{ 155 | testService{cluster: "a", name: "name", namespace: "namespace", clusterIP: "1.1.1.1", ports: []int32{80}}, 156 | testService{cluster: "b", name: "name", namespace: "namespace", clusterIP: "2.2.2.2", ports: []int32{80}}, 157 | }, false) 158 | assert.Equal(t, 1, store.Len()) 159 | gsvc, err := store.Get("name", "namespace") 160 | if err != nil { 161 | t.Fatal(err) 162 | } 163 | assert.Equal(t, []string{"a", "b"}, gsvc.clusters) 164 | svcA := createTestService("name", "namespace", "1.1.1.1", []int32{80}) 165 | store.DeleteClusterServiceTarget(svcA.Name, svcA.Namespace, "a") 166 | assert.Equal(t, 1, store.Len()) 167 | assert.Equal(t, []string{"b"}, gsvc.clusters) 168 | } 169 | 170 | func TestDeleteClusterServiceTarget_NotPresent(t *testing.T) { 171 | store := createTestStore(t, []testService{ 172 | testService{cluster: "cluster", name: "name", namespace: "namespace", clusterIP: "1.1.1.1", ports: []int32{80}}, 173 | }, false) 174 | assert.Equal(t, 1, store.Len()) 175 | svcB := createTestService("b", "b", "2.2.2.2", []int32{80}) 176 | clusterB := "b" 177 | store.DeleteClusterServiceTarget(svcB.Name, svcB.Namespace, clusterB) 178 | assert.Equal(t, 1, store.Len()) 179 | } 180 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/utilitywarehouse/semaphore-service-mirror 2 | 3 | go 1.24.0 4 | 5 | toolchain go1.24.1 6 | 7 | require ( 8 | github.com/hashicorp/go-hclog v1.6.3 9 | github.com/prometheus/client_golang v1.22.0 10 | github.com/stretchr/testify v1.10.0 11 | k8s.io/api v0.33.1 12 | k8s.io/apimachinery v0.33.1 13 | k8s.io/client-go v0.33.1 14 | ) 15 | 16 | require ( 17 | github.com/beorn7/perks v1.0.1 // indirect 18 | github.com/cespare/xxhash/v2 v2.3.0 // indirect 19 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect 20 | github.com/emicklei/go-restful/v3 v3.11.3 // indirect 21 | github.com/fatih/color v1.16.0 // indirect 22 | github.com/fxamacker/cbor/v2 v2.7.0 // indirect 23 | github.com/go-logr/logr v1.4.2 // indirect 24 | github.com/go-openapi/jsonpointer v0.21.0 // indirect 25 | github.com/go-openapi/jsonreference v0.20.4 // indirect 26 | github.com/go-openapi/swag v0.23.0 // indirect 27 | github.com/gogo/protobuf v1.3.2 // indirect 28 | github.com/google/gnostic-models v0.6.9 // indirect 29 | github.com/google/go-cmp v0.7.0 // indirect 30 | github.com/google/uuid v1.6.0 // indirect 31 | github.com/josharian/intern v1.0.0 // indirect 32 | github.com/json-iterator/go v1.1.12 // indirect 33 | github.com/mailru/easyjson v0.7.7 // indirect 34 | github.com/mattn/go-colorable v0.1.13 // indirect 35 | github.com/mattn/go-isatty v0.0.20 // indirect 36 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 37 | github.com/modern-go/reflect2 v1.0.2 // indirect 38 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 39 | github.com/pkg/errors v0.9.1 // indirect 40 | github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect 41 | github.com/prometheus/client_model v0.6.1 // indirect 42 | github.com/prometheus/common v0.62.0 // indirect 43 | github.com/prometheus/procfs v0.15.1 // indirect 44 | github.com/spf13/pflag v1.0.5 // indirect 45 | github.com/x448/float16 v0.8.4 // indirect 46 | golang.org/x/net v0.38.0 // indirect 47 | golang.org/x/oauth2 v0.27.0 // indirect 48 | golang.org/x/sys v0.31.0 // indirect 49 | golang.org/x/term v0.30.0 // indirect 50 | golang.org/x/text v0.23.0 // indirect 51 | golang.org/x/time v0.9.0 // indirect 52 | google.golang.org/protobuf v1.36.5 // indirect 53 | gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect 54 | gopkg.in/inf.v0 v0.9.1 // indirect 55 | gopkg.in/yaml.v3 v3.0.1 // indirect 56 | k8s.io/klog/v2 v2.130.1 // indirect 57 | k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect 58 | k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect 59 | sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect 60 | sigs.k8s.io/randfill v1.0.0 // indirect 61 | sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect 62 | sigs.k8s.io/yaml v1.4.0 // indirect 63 | ) 64 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= 2 | github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= 3 | github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= 4 | github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= 5 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 6 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 7 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= 8 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 9 | github.com/emicklei/go-restful/v3 v3.11.3 h1:yagOQz/38xJmcNeZJtrUcKjkHRltIaIFXKWeG1SkWGE= 10 | github.com/emicklei/go-restful/v3 v3.11.3/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= 11 | github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= 12 | github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= 13 | github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= 14 | github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= 15 | github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= 16 | github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= 17 | github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= 18 | github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= 19 | github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= 20 | github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU= 21 | github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4= 22 | github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= 23 | github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= 24 | github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= 25 | github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= 26 | github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= 27 | github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= 28 | github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= 29 | github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= 30 | github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= 31 | github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= 32 | github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= 33 | github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= 34 | github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= 35 | github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= 36 | github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= 37 | github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= 38 | github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= 39 | github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= 40 | github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= 41 | github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= 42 | github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= 43 | github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= 44 | github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= 45 | github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= 46 | github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= 47 | github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= 48 | github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= 49 | github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= 50 | github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= 51 | github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= 52 | github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= 53 | github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= 54 | github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= 55 | github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= 56 | github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= 57 | github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= 58 | github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= 59 | github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= 60 | github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= 61 | github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= 62 | github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= 63 | github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= 64 | github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= 65 | github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= 66 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= 67 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= 68 | github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= 69 | github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= 70 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= 71 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= 72 | github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= 73 | github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= 74 | github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= 75 | github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= 76 | github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= 77 | github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 78 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 79 | github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= 80 | github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 81 | github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= 82 | github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= 83 | github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= 84 | github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= 85 | github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= 86 | github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= 87 | github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= 88 | github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= 89 | github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= 90 | github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= 91 | github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= 92 | github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= 93 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 94 | github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= 95 | github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= 96 | github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= 97 | github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= 98 | github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= 99 | github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= 100 | github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= 101 | github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= 102 | github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= 103 | github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= 104 | go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= 105 | go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= 106 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 107 | golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= 108 | golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= 109 | golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= 110 | golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= 111 | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 112 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 113 | golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 114 | golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= 115 | golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= 116 | golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= 117 | golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= 118 | golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= 119 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 120 | golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 121 | golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 122 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 123 | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 124 | golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 125 | golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 126 | golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 127 | golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 128 | golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 129 | golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 130 | golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 131 | golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= 132 | golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= 133 | golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= 134 | golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= 135 | golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= 136 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 137 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 138 | golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= 139 | golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= 140 | golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= 141 | golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= 142 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 143 | golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= 144 | golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= 145 | golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= 146 | golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= 147 | golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= 148 | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 149 | golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 150 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 151 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 152 | google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= 153 | google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= 154 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 155 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= 156 | gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= 157 | gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= 158 | gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= 159 | gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= 160 | gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= 161 | gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= 162 | gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 163 | k8s.io/api v0.33.1 h1:tA6Cf3bHnLIrUK4IqEgb2v++/GYUtqiu9sRVk3iBXyw= 164 | k8s.io/api v0.33.1/go.mod h1:87esjTn9DRSRTD4fWMXamiXxJhpOIREjWOSjsW1kEHw= 165 | k8s.io/apimachinery v0.33.1 h1:mzqXWV8tW9Rw4VeW9rEkqvnxj59k1ezDUl20tFK/oM4= 166 | k8s.io/apimachinery v0.33.1/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= 167 | k8s.io/client-go v0.33.1 h1:ZZV/Ks2g92cyxWkRRnfUDsnhNn28eFpt26aGc8KbXF4= 168 | k8s.io/client-go v0.33.1/go.mod h1:JAsUrl1ArO7uRVFWfcj6kOomSlCv+JpvIsp6usAGefA= 169 | k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= 170 | k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= 171 | k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= 172 | k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= 173 | k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= 174 | k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= 175 | sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= 176 | sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= 177 | sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= 178 | sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= 179 | sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= 180 | sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc= 181 | sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= 182 | sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= 183 | sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= 184 | -------------------------------------------------------------------------------- /kube/client.go: -------------------------------------------------------------------------------- 1 | package kube 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | 7 | "crypto/tls" 8 | "crypto/x509" 9 | "io/ioutil" 10 | "net/http" 11 | 12 | "k8s.io/client-go/kubernetes" 13 | "k8s.io/client-go/rest" 14 | "k8s.io/client-go/tools/clientcmd" 15 | // in case of local kube config 16 | // _ "k8s.io/client-go/plugin/pkg/client/auth/oidc" 17 | ) 18 | 19 | type certMan struct { 20 | caURL string 21 | } 22 | 23 | func (cm *certMan) verifyConn(cs tls.ConnectionState) error { 24 | resp, err := http.Get(cm.caURL) 25 | if err != nil { 26 | return fmt.Errorf("error getting remote CA from %s: %v", cm.caURL, err) 27 | } 28 | defer func() { 29 | io.Copy(ioutil.Discard, resp.Body) 30 | resp.Body.Close() 31 | }() 32 | if resp.StatusCode != http.StatusOK { 33 | return fmt.Errorf("expected %d response from %s, got %d", http.StatusOK, cm.caURL, resp.StatusCode) 34 | } 35 | body, err := ioutil.ReadAll(resp.Body) 36 | if err != nil { 37 | return fmt.Errorf("error reading response body from %s: %v", cm.caURL, err) 38 | } 39 | roots := x509.NewCertPool() 40 | ok := roots.AppendCertsFromPEM(body) 41 | if !ok { 42 | return fmt.Errorf("failed to parse root certificate from %s", cm.caURL) 43 | } 44 | opts := x509.VerifyOptions{ 45 | DNSName: cs.ServerName, 46 | Roots: roots, 47 | } 48 | _, err = cs.PeerCertificates[0].Verify(opts) 49 | return err 50 | } 51 | 52 | // Client returns a Kubernetes client (clientset) from token, apiURL and caURL 53 | func Client(token, apiURL, caURL string) (*kubernetes.Clientset, error) { 54 | cm := &certMan{caURL} 55 | conf := &rest.Config{ 56 | Host: apiURL, 57 | Transport: &http.Transport{ 58 | TLSClientConfig: &tls.Config{ 59 | InsecureSkipVerify: true, 60 | VerifyConnection: cm.verifyConn}}, 61 | BearerToken: token, 62 | } 63 | return kubernetes.NewForConfig(conf) 64 | } 65 | 66 | // ClientFromConfig returns a Kubernetes client (clientset) from the kubeconfig 67 | // path or from the in-cluster service account environment. 68 | func ClientFromConfig(path string) (*kubernetes.Clientset, error) { 69 | conf, err := getClientConfig(path) 70 | if err != nil { 71 | return nil, fmt.Errorf("failed to get Kubernetes client config: %v", err) 72 | } 73 | return kubernetes.NewForConfig(conf) 74 | } 75 | 76 | // getClientConfig returns a Kubernetes client Config. 77 | func getClientConfig(path string) (*rest.Config, error) { 78 | if path != "" { 79 | // build Config from a kubeconfig filepath 80 | return clientcmd.BuildConfigFromFlags("", path) 81 | } 82 | // uses pod's service account to get a Config 83 | return rest.InClusterConfig() 84 | } 85 | -------------------------------------------------------------------------------- /kube/common.go: -------------------------------------------------------------------------------- 1 | package kube 2 | 3 | import ( 4 | "context" 5 | 6 | v1 "k8s.io/api/core/v1" 7 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 8 | "k8s.io/client-go/kubernetes" 9 | ) 10 | 11 | // GetService returns a client get request for a service under a namespace 12 | func GetService(ctx context.Context, client kubernetes.Interface, name, namespace string) (*v1.Service, error) { 13 | return client.CoreV1().Services(namespace).Get( 14 | ctx, 15 | name, 16 | metav1.GetOptions{}, 17 | ) 18 | } 19 | 20 | // CreateService creates a clusterIP or headless type service. 21 | func CreateService(ctx context.Context, client kubernetes.Interface, name, namespace string, labels, annotations map[string]string, ports []v1.ServicePort, headless bool) (*v1.Service, error) { 22 | svc := &v1.Service{ 23 | ObjectMeta: metav1.ObjectMeta{ 24 | Name: name, 25 | Namespace: namespace, 26 | Labels: labels, 27 | Annotations: annotations, 28 | }, 29 | Spec: v1.ServiceSpec{ 30 | Ports: ports, 31 | Selector: nil, 32 | }, 33 | } 34 | if headless { 35 | svc.Spec.ClusterIP = "None" 36 | } 37 | return client.CoreV1().Services(namespace).Create( 38 | ctx, 39 | svc, 40 | metav1.CreateOptions{}, 41 | ) 42 | } 43 | 44 | // UpdateService updates service ports. No need to cater for headless services 45 | // as clusterIP field is immutable and will fail. 46 | func UpdateService(ctx context.Context, client kubernetes.Interface, service *v1.Service, ports []v1.ServicePort) (*v1.Service, error) { 47 | service.Spec.Ports = ports 48 | service.Spec.Selector = nil 49 | 50 | return client.CoreV1().Services(service.Namespace).Update( 51 | ctx, 52 | service, 53 | metav1.UpdateOptions{}, 54 | ) 55 | } 56 | 57 | // DeleteService returns a client delete service request 58 | func DeleteService(ctx context.Context, client kubernetes.Interface, name, namespace string) error { 59 | return client.CoreV1().Services(namespace).Delete( 60 | ctx, 61 | name, 62 | metav1.DeleteOptions{}, 63 | ) 64 | } 65 | -------------------------------------------------------------------------------- /kube/endpoints_watcher.go: -------------------------------------------------------------------------------- 1 | package kube 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "time" 7 | 8 | v1 "k8s.io/api/core/v1" 9 | "k8s.io/apimachinery/pkg/api/errors" 10 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 11 | "k8s.io/apimachinery/pkg/runtime" 12 | "k8s.io/apimachinery/pkg/watch" 13 | "k8s.io/client-go/kubernetes" 14 | "k8s.io/client-go/tools/cache" 15 | 16 | "github.com/utilitywarehouse/semaphore-service-mirror/log" 17 | "github.com/utilitywarehouse/semaphore-service-mirror/metrics" 18 | ) 19 | 20 | type EndpointsEventHandler = func(eventType watch.EventType, old *v1.Endpoints, new *v1.Endpoints) 21 | 22 | type EndpointsWatcher struct { 23 | ctx context.Context 24 | client kubernetes.Interface 25 | resyncPeriod time.Duration 26 | stopChannel chan struct{} 27 | store cache.Store 28 | controller cache.Controller 29 | eventHandler EndpointsEventHandler 30 | labelSelector string 31 | name string 32 | namespace string 33 | runner string // Name of the parent runner of the watcher. Used for metrics to distinguish series. 34 | } 35 | 36 | func NewEndpointsWatcher(name string, client kubernetes.Interface, resyncPeriod time.Duration, handler EndpointsEventHandler, labelSelector, namespace, runner string) *EndpointsWatcher { 37 | return &EndpointsWatcher{ 38 | ctx: context.Background(), 39 | client: client, 40 | resyncPeriod: resyncPeriod, 41 | stopChannel: make(chan struct{}), 42 | eventHandler: handler, 43 | labelSelector: labelSelector, 44 | name: name, 45 | namespace: namespace, 46 | runner: runner, 47 | } 48 | } 49 | 50 | func (ew *EndpointsWatcher) Init() { 51 | listWatch := &cache.ListWatch{ 52 | ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { 53 | options.LabelSelector = ew.labelSelector 54 | l, err := ew.client.CoreV1().Endpoints(ew.namespace).List(ew.ctx, options) 55 | if err != nil { 56 | log.Logger.Error("endpoints list error", "watcher", ew.name, "err", err) 57 | } 58 | return l, err 59 | }, 60 | WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { 61 | options.LabelSelector = ew.labelSelector 62 | w, err := ew.client.CoreV1().Endpoints(ew.namespace).Watch(ew.ctx, options) 63 | if err != nil { 64 | log.Logger.Error("endpoints watch error", "watcher", ew.name, "err", err) 65 | } 66 | return w, err 67 | }, 68 | } 69 | eventHandler := cache.ResourceEventHandlerFuncs{ 70 | AddFunc: func(obj interface{}) { 71 | ew.handleEvent(watch.Added, nil, obj.(*v1.Endpoints)) 72 | }, 73 | UpdateFunc: func(oldObj, newObj interface{}) { 74 | ew.handleEvent(watch.Modified, oldObj.(*v1.Endpoints), newObj.(*v1.Endpoints)) 75 | }, 76 | DeleteFunc: func(obj interface{}) { 77 | ew.handleEvent(watch.Deleted, obj.(*v1.Endpoints), nil) 78 | }, 79 | } 80 | ew.store, ew.controller = cache.NewInformer(listWatch, &v1.Endpoints{}, ew.resyncPeriod, eventHandler) 81 | } 82 | 83 | func (ew *EndpointsWatcher) handleEvent(eventType watch.EventType, oldObj, newObj *v1.Endpoints) { 84 | metrics.IncKubeWatcherEvents(ew.name, "endpoints", ew.runner, eventType) 85 | metrics.SetKubeWatcherObjects(ew.name, "endpoints", ew.runner, float64(len(ew.store.List()))) 86 | 87 | if ew.eventHandler != nil { 88 | ew.eventHandler(eventType, oldObj, newObj) 89 | } 90 | } 91 | 92 | func (ew *EndpointsWatcher) Run() { 93 | log.Logger.Info("starting endpoints watcher", "watcher", ew.name) 94 | // Running controller will block until writing on the stop channel. 95 | ew.controller.Run(ew.stopChannel) 96 | log.Logger.Info("stopped endpoints watcher", "watcher", ew.name) 97 | } 98 | 99 | func (ew *EndpointsWatcher) Stop() { 100 | log.Logger.Info("stopping endpoints watcher", "watcher", ew.name) 101 | close(ew.stopChannel) 102 | } 103 | 104 | func (ew *EndpointsWatcher) Get(name, namespace string) (*v1.Endpoints, error) { 105 | key := namespace + "/" + name 106 | 107 | obj, exists, err := ew.store.GetByKey(key) 108 | if err != nil { 109 | return nil, err 110 | } 111 | if !exists { 112 | return nil, errors.NewNotFound(v1.Resource("endpoints"), key) 113 | } 114 | 115 | return obj.(*v1.Endpoints), nil 116 | } 117 | 118 | func (ew *EndpointsWatcher) List() ([]*v1.Endpoints, error) { 119 | var endpoints []*v1.Endpoints 120 | for _, obj := range ew.store.List() { 121 | e, ok := obj.(*v1.Endpoints) 122 | if !ok { 123 | return nil, fmt.Errorf("unexpected object in store: %+v", obj) 124 | } 125 | endpoints = append(endpoints, e) 126 | } 127 | return endpoints, nil 128 | } 129 | -------------------------------------------------------------------------------- /kube/endpointslice_watcher.go: -------------------------------------------------------------------------------- 1 | package kube 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "time" 7 | 8 | discoveryv1 "k8s.io/api/discovery/v1" 9 | "k8s.io/apimachinery/pkg/api/errors" 10 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 11 | "k8s.io/apimachinery/pkg/runtime" 12 | "k8s.io/apimachinery/pkg/watch" 13 | "k8s.io/client-go/kubernetes" 14 | "k8s.io/client-go/tools/cache" 15 | 16 | "github.com/utilitywarehouse/semaphore-service-mirror/log" 17 | "github.com/utilitywarehouse/semaphore-service-mirror/metrics" 18 | ) 19 | 20 | type EndpointSliceEventHandler = func(eventType watch.EventType, old *discoveryv1.EndpointSlice, new *discoveryv1.EndpointSlice) 21 | 22 | type EndpointSliceWatcher struct { 23 | ctx context.Context 24 | client kubernetes.Interface 25 | resyncPeriod time.Duration 26 | stopChannel chan struct{} 27 | store cache.Store 28 | controller cache.Controller 29 | eventHandler EndpointSliceEventHandler 30 | labelSelector string 31 | name string 32 | namespace string 33 | runner string // Name of the parent runner of the watcher. Used for metrics to distinguish series. 34 | } 35 | 36 | func NewEndpointSliceWatcher(name string, client kubernetes.Interface, resyncPeriod time.Duration, handler EndpointSliceEventHandler, labelSelector, namespace, runner string) *EndpointSliceWatcher { 37 | return &EndpointSliceWatcher{ 38 | ctx: context.Background(), 39 | client: client, 40 | resyncPeriod: resyncPeriod, 41 | stopChannel: make(chan struct{}), 42 | eventHandler: handler, 43 | labelSelector: labelSelector, 44 | name: name, 45 | namespace: namespace, 46 | runner: runner, 47 | } 48 | } 49 | 50 | func (esw *EndpointSliceWatcher) Init() { 51 | listWatch := &cache.ListWatch{ 52 | ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { 53 | options.LabelSelector = esw.labelSelector 54 | l, err := esw.client.DiscoveryV1().EndpointSlices(esw.namespace).List(esw.ctx, options) 55 | if err != nil { 56 | log.Logger.Error("EndpointSlice list error", "watcher", esw.name, "err", err) 57 | } else { 58 | } 59 | return l, err 60 | }, 61 | WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { 62 | options.LabelSelector = esw.labelSelector 63 | w, err := esw.client.DiscoveryV1().EndpointSlices(esw.namespace).Watch(esw.ctx, options) 64 | if err != nil { 65 | log.Logger.Error("EndpointSlice watch error", "watcher", esw.name, "err", err) 66 | } else { 67 | } 68 | return w, err 69 | }, 70 | } 71 | eventHandler := cache.ResourceEventHandlerFuncs{ 72 | AddFunc: func(obj interface{}) { 73 | esw.handleEvent(watch.Added, nil, obj.(*discoveryv1.EndpointSlice)) 74 | }, 75 | UpdateFunc: func(oldObj, newObj interface{}) { 76 | esw.handleEvent(watch.Modified, oldObj.(*discoveryv1.EndpointSlice), newObj.(*discoveryv1.EndpointSlice)) 77 | }, 78 | DeleteFunc: func(obj interface{}) { 79 | esw.handleEvent(watch.Deleted, obj.(*discoveryv1.EndpointSlice), nil) 80 | }, 81 | } 82 | esw.store, esw.controller = cache.NewInformer(listWatch, &discoveryv1.EndpointSlice{}, esw.resyncPeriod, eventHandler) 83 | } 84 | 85 | func (esw *EndpointSliceWatcher) handleEvent(eventType watch.EventType, oldObj, newObj *discoveryv1.EndpointSlice) { 86 | metrics.IncKubeWatcherEvents(esw.name, "endpointslice", esw.runner, eventType) 87 | metrics.SetKubeWatcherObjects(esw.name, "endpointslice", esw.runner, float64(len(esw.store.List()))) 88 | 89 | if esw.eventHandler != nil { 90 | esw.eventHandler(eventType, oldObj, newObj) 91 | } 92 | } 93 | 94 | func (esw *EndpointSliceWatcher) Run() { 95 | log.Logger.Info("starting endpointslice watcher", "watcher", esw.name) 96 | // Running controller will block until writing on the stop channel. 97 | esw.controller.Run(esw.stopChannel) 98 | log.Logger.Info("stopped endpointslice watcher", "watcher", esw.name) 99 | } 100 | 101 | func (esw *EndpointSliceWatcher) Stop() { 102 | log.Logger.Info("stopping endpoints watcher", "watcher", esw.name) 103 | close(esw.stopChannel) 104 | } 105 | 106 | func (esw *EndpointSliceWatcher) HasSynced() bool { 107 | return esw.controller.HasSynced() 108 | } 109 | 110 | func (esw *EndpointSliceWatcher) Get(name, namespace string) (*discoveryv1.EndpointSlice, error) { 111 | key := namespace + "/" + name 112 | 113 | obj, exists, err := esw.store.GetByKey(key) 114 | if err != nil { 115 | return nil, err 116 | } 117 | if !exists { 118 | return nil, errors.NewNotFound(discoveryv1.Resource("endpointslice"), key) 119 | } 120 | 121 | return obj.(*discoveryv1.EndpointSlice), nil 122 | } 123 | 124 | func (esw *EndpointSliceWatcher) List() ([]*discoveryv1.EndpointSlice, error) { 125 | var endpointslice []*discoveryv1.EndpointSlice 126 | for _, obj := range esw.store.List() { 127 | e, ok := obj.(*discoveryv1.EndpointSlice) 128 | if !ok { 129 | return nil, fmt.Errorf("unexpected object in store: %+v", obj) 130 | } 131 | endpointslice = append(endpointslice, e) 132 | } 133 | return endpointslice, nil 134 | } 135 | -------------------------------------------------------------------------------- /kube/service_watcher.go: -------------------------------------------------------------------------------- 1 | package kube 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "time" 7 | 8 | v1 "k8s.io/api/core/v1" 9 | "k8s.io/apimachinery/pkg/api/errors" 10 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 11 | "k8s.io/apimachinery/pkg/runtime" 12 | "k8s.io/apimachinery/pkg/watch" 13 | "k8s.io/client-go/kubernetes" 14 | "k8s.io/client-go/tools/cache" 15 | 16 | "github.com/utilitywarehouse/semaphore-service-mirror/log" 17 | "github.com/utilitywarehouse/semaphore-service-mirror/metrics" 18 | ) 19 | 20 | type ServiceEventHandler = func(eventType watch.EventType, old *v1.Service, new *v1.Service) 21 | 22 | type ServiceWatcher struct { 23 | ctx context.Context 24 | client kubernetes.Interface 25 | resyncPeriod time.Duration 26 | stopChannel chan struct{} 27 | store cache.Store 28 | controller cache.Controller 29 | eventHandler ServiceEventHandler 30 | labelSelector string 31 | name string 32 | namespace string 33 | runner string // Name of the parent runner of the watcher. Used for metrics to distinguish series. 34 | } 35 | 36 | func NewServiceWatcher(name string, client kubernetes.Interface, resyncPeriod time.Duration, handler ServiceEventHandler, labelSelector, namespace, runner string) *ServiceWatcher { 37 | return &ServiceWatcher{ 38 | ctx: context.Background(), 39 | client: client, 40 | resyncPeriod: resyncPeriod, 41 | stopChannel: make(chan struct{}), 42 | eventHandler: handler, 43 | labelSelector: labelSelector, 44 | name: name, 45 | namespace: namespace, 46 | runner: runner, 47 | } 48 | } 49 | 50 | func (sw *ServiceWatcher) Init() { 51 | listWatch := &cache.ListWatch{ 52 | ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { 53 | options.LabelSelector = sw.labelSelector 54 | l, err := sw.client.CoreV1().Services(sw.namespace).List(sw.ctx, options) 55 | if err != nil { 56 | log.Logger.Error("service list error", "watcher", sw.name, "err", err) 57 | } 58 | return l, err 59 | }, 60 | WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { 61 | options.LabelSelector = sw.labelSelector 62 | w, err := sw.client.CoreV1().Services(sw.namespace).Watch(sw.ctx, options) 63 | if err != nil { 64 | log.Logger.Error("service watch error", "watcher", sw.name, "err", err) 65 | } 66 | return w, err 67 | }, 68 | } 69 | eventHandler := cache.ResourceEventHandlerFuncs{ 70 | AddFunc: func(obj interface{}) { 71 | sw.handleEvent(watch.Added, nil, obj.(*v1.Service)) 72 | }, 73 | UpdateFunc: func(oldObj, newObj interface{}) { 74 | sw.handleEvent(watch.Modified, oldObj.(*v1.Service), newObj.(*v1.Service)) 75 | }, 76 | DeleteFunc: func(obj interface{}) { 77 | sw.handleEvent(watch.Deleted, obj.(*v1.Service), nil) 78 | }, 79 | } 80 | sw.store, sw.controller = cache.NewInformer(listWatch, &v1.Service{}, sw.resyncPeriod, eventHandler) 81 | } 82 | 83 | func (sw *ServiceWatcher) handleEvent(eventType watch.EventType, oldObj, newObj *v1.Service) { 84 | metrics.IncKubeWatcherEvents(sw.name, "service", sw.runner, eventType) 85 | metrics.SetKubeWatcherObjects(sw.name, "service", sw.runner, float64(len(sw.store.List()))) 86 | 87 | if sw.eventHandler != nil { 88 | sw.eventHandler(eventType, oldObj, newObj) 89 | } 90 | } 91 | 92 | func (sw *ServiceWatcher) Run() { 93 | log.Logger.Info("starting service watcher", "watcher", sw.name) 94 | // Running controller will block until writing on the stop channel. 95 | sw.controller.Run(sw.stopChannel) 96 | log.Logger.Info("stopped service watcher", "watcher", sw.name) 97 | } 98 | 99 | func (sw *ServiceWatcher) Stop() { 100 | log.Logger.Info("stopping service watcher", "watcher", sw.name) 101 | close(sw.stopChannel) 102 | } 103 | 104 | func (sw *ServiceWatcher) HasSynced() bool { 105 | return sw.controller.HasSynced() 106 | } 107 | 108 | func (sw *ServiceWatcher) Get(name, namespace string) (*v1.Service, error) { 109 | key := namespace + "/" + name 110 | 111 | obj, exists, err := sw.store.GetByKey(key) 112 | if err != nil { 113 | return nil, err 114 | } 115 | if !exists { 116 | return nil, errors.NewNotFound(v1.Resource("service"), key) 117 | } 118 | 119 | return obj.(*v1.Service), nil 120 | } 121 | 122 | func (sw *ServiceWatcher) List() ([]*v1.Service, error) { 123 | var svcs []*v1.Service 124 | for _, obj := range sw.store.List() { 125 | svc, ok := obj.(*v1.Service) 126 | if !ok { 127 | return nil, fmt.Errorf("unexpected object in store: %+v", obj) 128 | } 129 | svcs = append(svcs, svc) 130 | } 131 | return svcs, nil 132 | } 133 | -------------------------------------------------------------------------------- /log/logger.go: -------------------------------------------------------------------------------- 1 | package log 2 | 3 | import ( 4 | hclog "github.com/hashicorp/go-hclog" 5 | ) 6 | 7 | // Logger - Application wide logger obj 8 | var Logger hclog.Logger 9 | 10 | // InitLogger - a logger for application wide use 11 | func InitLogger(name, logLevel string) { 12 | Logger = hclog.New(&hclog.LoggerOptions{ 13 | Name: name, 14 | Level: hclog.LevelFromString(logLevel), 15 | }) 16 | } 17 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "net/http" 7 | "os" 8 | "regexp" 9 | "strings" 10 | 11 | "github.com/prometheus/client_golang/prometheus/promhttp" 12 | "github.com/utilitywarehouse/semaphore-service-mirror/backoff" 13 | "github.com/utilitywarehouse/semaphore-service-mirror/kube" 14 | "github.com/utilitywarehouse/semaphore-service-mirror/log" 15 | _ "github.com/utilitywarehouse/semaphore-service-mirror/metrics" 16 | "k8s.io/apimachinery/pkg/labels" 17 | "k8s.io/client-go/kubernetes" 18 | ) 19 | 20 | var ( 21 | flagGlobalSvcLabelSelector = flag.String("global-svc-label-selector", getEnv("SSM_GLOBAL_SVC_LABEL_SELECTOR", ""), "Label to mark watched services as global services") 22 | flagGlobalSvcRoutingStrategyLabel = flag.String("global-svc-routing-strategy-label", getEnv("SSM_GLOBAL_SVC_TOPOLOGY_LABEL", ""), "Label to instruct whether to try topology aware routing for global services") 23 | flagKubeConfigPath = flag.String("kube-config", getEnv("SSM_KUBE_CONFIG", ""), "Path of a kube config file, if not provided the app will try to get in cluster config") 24 | flagLogLevel = flag.String("log-level", getEnv("SSM_LOG_LEVEL", "info"), "Log level") 25 | flagMirrorNamespace = flag.String("mirror-ns", getEnv("SSM_MIRROR_NS", ""), "The namespace to create dummy mirror services in") 26 | flagMirrorSvcLabelSelector = flag.String("mirror-svc-label-selector", getEnv("SSM_MIRROR_SVC_LABEL_SELECTOR", ""), "Label of services and endpoints to watch and mirror") 27 | flagSSMConfig = flag.String("config", getEnv("SSM_CONFIG", ""), "(required)Path to the json config file") 28 | 29 | bearerRe = regexp.MustCompile(`[A-Z|a-z0-9\-\._~\+\/]+=*`) 30 | ) 31 | 32 | func usage() { 33 | flag.Usage() 34 | os.Exit(1) 35 | } 36 | 37 | func getEnv(key, defaultValue string) string { 38 | value := os.Getenv(key) 39 | if len(value) == 0 { 40 | return defaultValue 41 | } 42 | return value 43 | } 44 | 45 | func main() { 46 | var err error 47 | flag.Parse() 48 | log.InitLogger("semaphore-service-mirror", *flagLogLevel) 49 | 50 | // Config file path cannot be empty 51 | if *flagSSMConfig == "" { 52 | usage() 53 | } 54 | fileContent, err := os.ReadFile(*flagSSMConfig) 55 | if err != nil { 56 | log.Logger.Error("Cannot read config file", "err", err) 57 | os.Exit(1) 58 | } 59 | config, err := parseConfig( 60 | fileContent, 61 | *flagGlobalSvcLabelSelector, 62 | *flagGlobalSvcRoutingStrategyLabel, 63 | *flagMirrorSvcLabelSelector, 64 | *flagMirrorNamespace, 65 | ) 66 | if err != nil { 67 | log.Logger.Error("Cannot parse config", "err", err) 68 | os.Exit(1) 69 | } 70 | // set DefaultLocalEndpointZones value for topology aware routing 71 | setLocalEndpointZones(config.LocalCluster.Zones) 72 | // parse strategy label for setting topology aware hints. 73 | routingStrategyLabel, err := labels.Parse(config.Global.GlobalSvcRoutingStrategyLabel) 74 | if err != nil { 75 | log.Logger.Error( 76 | "Cannot parse the configured topology label for global services", 77 | "err", err, 78 | ) 79 | os.Exit(1) 80 | } 81 | 82 | // Get a kube client for the local cluster 83 | homeClient, err := kube.ClientFromConfig(*flagKubeConfigPath) 84 | if err != nil { 85 | log.Logger.Error( 86 | "cannot create kube client for local cluster", 87 | "err", err, 88 | ) 89 | usage() 90 | } 91 | 92 | gst := newGlobalServiceStore() 93 | gr := makeGlobalRunner(homeClient, homeClient, config.LocalCluster.Name, config.Global, gst, true, routingStrategyLabel) 94 | go func() { backoff.Retry(gr.Run, "start runner") }() 95 | runners := []Runner{gr} 96 | for _, remote := range config.RemoteClusters { 97 | remoteClient, err := makeRemoteKubeClientFromConfig(remote) 98 | if err != nil { 99 | log.Logger.Error("cannot create kube client for remotecluster", "err", err) 100 | os.Exit(1) 101 | } 102 | mr := makeMirrorRunner(homeClient, remoteClient, remote, config.Global) 103 | runners = append(runners, mr) 104 | go func() { backoff.Retry(mr.Run, "start mirror runner") }() 105 | gr := makeGlobalRunner(homeClient, remoteClient, remote.Name, config.Global, gst, false, routingStrategyLabel) 106 | runners = append(runners, gr) 107 | go func() { backoff.Retry(gr.Run, "start mirror runner") }() 108 | } 109 | 110 | listenAndServe(runners) 111 | // Stop runners before finishing 112 | for _, r := range runners { 113 | r.Stop() 114 | } 115 | } 116 | 117 | func listenAndServe(runners []Runner) { 118 | sm := http.NewServeMux() 119 | sm.HandleFunc("/healthz", func(w http.ResponseWriter, _ *http.Request) { 120 | // A meaningful health check would be to verify that all runners 121 | // have started or kick the app otherwise via a liveness probe. 122 | // Client errors should be monitored via metrics. 123 | for _, r := range runners { 124 | if !r.Initialised() { 125 | w.WriteHeader(http.StatusServiceUnavailable) 126 | return 127 | } 128 | } 129 | w.WriteHeader(http.StatusOK) 130 | }) 131 | sm.Handle("/metrics", promhttp.Handler()) 132 | log.Logger.Error( 133 | "Listen and Serve", 134 | "err", http.ListenAndServe(":8080", sm), 135 | ) 136 | } 137 | 138 | func makeRemoteKubeClientFromConfig(remote *remoteClusterConfig) (*kubernetes.Clientset, error) { 139 | if remote.KubeConfigPath != "" { 140 | return kube.ClientFromConfig(remote.KubeConfigPath) 141 | } 142 | // If kubeconfig path is not set, try to use craft it from the rest of the config 143 | data, err := os.ReadFile(remote.RemoteSATokenPath) 144 | if err != nil { 145 | return nil, fmt.Errorf("Cannot read file: %s: %v", remote.RemoteSATokenPath, err) 146 | } 147 | saToken := string(data) 148 | if saToken != "" { 149 | saToken = strings.TrimSpace(saToken) 150 | if !bearerRe.MatchString(saToken) { 151 | return nil, fmt.Errorf("The provided token does not match regex: %s", bearerRe.String()) 152 | } 153 | } 154 | return kube.Client(saToken, remote.RemoteAPIURL, remote.RemoteCAURL) 155 | } 156 | 157 | func makeMirrorRunner(homeClient, remoteClient *kubernetes.Clientset, remote *remoteClusterConfig, global globalConfig) *MirrorRunner { 158 | return newMirrorRunner( 159 | homeClient, 160 | remoteClient, 161 | remote.Name, 162 | global.MirrorNamespace, 163 | remote.ServicePrefix, 164 | global.MirrorSvcLabelSelector, 165 | // Resync will trigger an onUpdate event for everything that is 166 | // stored in cache. 167 | remote.ResyncPeriod.Duration, 168 | global.ServiceSync, 169 | ) 170 | } 171 | 172 | func makeGlobalRunner(homeClient, remoteClient *kubernetes.Clientset, name string, global globalConfig, gst *GlobalServiceStore, localCluster bool, routingStrategyLabel labels.Selector) *GlobalRunner { 173 | return newGlobalRunner( 174 | homeClient, 175 | remoteClient, 176 | name, 177 | global.MirrorNamespace, 178 | global.GlobalSvcLabelSelector, 179 | // TODO: Need to specify resync period? 180 | 0, 181 | gst, 182 | localCluster, 183 | routingStrategyLabel, 184 | global.EndpointSliceSync, 185 | ) 186 | } 187 | -------------------------------------------------------------------------------- /metrics/kube_client.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import ( 4 | "context" 5 | "net/url" 6 | "time" 7 | 8 | "github.com/prometheus/client_golang/prometheus" 9 | "k8s.io/client-go/tools/metrics" 10 | ) 11 | 12 | var ( 13 | kubeClientRequests = prometheus.NewCounterVec(prometheus.CounterOpts{ 14 | Name: "semaphore_service_mirror_kube_http_request_total", 15 | Help: "Total number of HTTP requests to the Kubernetes API by host, code and method", 16 | }, 17 | []string{"host", "code", "method"}, 18 | ) 19 | kubeClientRequestsDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{ 20 | Name: "semaphore_service_mirror_kube_http_request_duration_seconds", 21 | Help: "Histogram of latencies for HTTP requests to the Kubernetes API by host and method", 22 | }, 23 | []string{"host", "method"}, 24 | ) 25 | ) 26 | 27 | func init() { 28 | (&kubeClientRequestAdapter{}).Register() 29 | } 30 | 31 | // kubeClientRequestAdapter implements metrics interfaces provided by client-go 32 | type kubeClientRequestAdapter struct{} 33 | 34 | // Register registers the adapter 35 | func (a *kubeClientRequestAdapter) Register() { 36 | metrics.Register( 37 | metrics.RegisterOpts{ 38 | RequestLatency: a, 39 | RequestResult: a, 40 | }, 41 | ) 42 | prometheus.MustRegister( 43 | kubeClientRequests, 44 | kubeClientRequestsDuration, 45 | ) 46 | 47 | } 48 | 49 | // Increment implements metrics.ResultMetric 50 | func (a kubeClientRequestAdapter) Increment(ctx context.Context, code string, method string, host string) { 51 | kubeClientRequests.With(prometheus.Labels{ 52 | "code": code, 53 | "method": method, 54 | "host": host, 55 | }).Inc() 56 | } 57 | 58 | // Observe implements metrics.LatencyMetric 59 | func (a kubeClientRequestAdapter) Observe(ctx context.Context, method string, u url.URL, latency time.Duration) { 60 | kubeClientRequestsDuration.With(prometheus.Labels{ 61 | "host": u.Host, 62 | "method": method, 63 | }).Observe(latency.Seconds()) 64 | } 65 | -------------------------------------------------------------------------------- /metrics/kube_watcher.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import ( 4 | "github.com/prometheus/client_golang/prometheus" 5 | "k8s.io/apimachinery/pkg/watch" 6 | ) 7 | 8 | var ( 9 | kubeWatcherObjects = prometheus.NewGaugeVec(prometheus.GaugeOpts{ 10 | Name: "semaphore_service_mirror_kube_watcher_objects", 11 | Help: "Number of objects watched, by watcher and kind", 12 | }, 13 | []string{"watcher", "kind", "runner"}, 14 | ) 15 | kubeWatcherEvents = prometheus.NewGaugeVec(prometheus.GaugeOpts{ 16 | Name: "semaphore_service_mirror_kube_watcher_events_total", 17 | Help: "Number of events handled, by watcher, kind and event_type", 18 | }, 19 | []string{"watcher", "kind", "event_type", "runner"}, 20 | ) 21 | ) 22 | 23 | func init() { 24 | prometheus.MustRegister( 25 | kubeWatcherObjects, 26 | kubeWatcherEvents, 27 | ) 28 | } 29 | 30 | func IncKubeWatcherEvents(watcher, kind, runner string, eventType watch.EventType) { 31 | kubeWatcherEvents.With(prometheus.Labels{ 32 | "watcher": watcher, 33 | "kind": kind, 34 | "event_type": string(eventType), 35 | "runner": runner, 36 | }).Inc() 37 | } 38 | 39 | func SetKubeWatcherObjects(watcher, kind, runner string, v float64) { 40 | kubeWatcherObjects.With(prometheus.Labels{ 41 | "watcher": watcher, 42 | "kind": kind, 43 | "runner": runner, 44 | }).Set(v) 45 | } 46 | -------------------------------------------------------------------------------- /metrics/queue.go: -------------------------------------------------------------------------------- 1 | package metrics 2 | 3 | import ( 4 | "github.com/prometheus/client_golang/prometheus" 5 | "k8s.io/client-go/util/workqueue" 6 | ) 7 | 8 | var ( 9 | queueDepth = prometheus.NewGaugeVec( 10 | prometheus.GaugeOpts{ 11 | Name: "semaphore_service_mirror_queue_depth", 12 | Help: "Workqueue depth, by queue name", 13 | }, 14 | []string{"name"}, 15 | ) 16 | queueAdds = prometheus.NewCounterVec( 17 | prometheus.CounterOpts{ 18 | Name: "semaphore_service_mirror_queue_adds_total", 19 | Help: "Workqueue adds, by queue name", 20 | }, 21 | []string{"name"}, 22 | ) 23 | queueLatency = prometheus.NewHistogramVec( 24 | prometheus.HistogramOpts{ 25 | Name: "semaphore_service_mirror_queue_latency_duration_seconds", 26 | Help: "Workqueue latency, by queue name", 27 | }, 28 | []string{"name"}, 29 | ) 30 | queueWorkDuration = prometheus.NewHistogramVec( 31 | prometheus.HistogramOpts{ 32 | Name: "semaphore_service_mirror_queue_work_duration_seconds", 33 | Help: "Workqueue work duration, by queue name", 34 | }, 35 | []string{"name"}, 36 | ) 37 | queueUnfinishedWork = prometheus.NewGaugeVec( 38 | prometheus.GaugeOpts{ 39 | Name: "semaphore_service_mirror_queue_unfinished_work_seconds", 40 | Help: "Unfinished work in seconds, by queue name", 41 | }, 42 | []string{"name"}, 43 | ) 44 | queueLongestRunningProcessor = prometheus.NewGaugeVec( 45 | prometheus.GaugeOpts{ 46 | Name: "semaphore_service_mirror_queue_longest_running_processor_seconds", 47 | Help: "Longest running processor, by queue name", 48 | }, 49 | []string{"name"}, 50 | ) 51 | queueRetries = prometheus.NewCounterVec( 52 | prometheus.CounterOpts{ 53 | Name: "semaphore_service_mirror_queue_retries_total", 54 | Help: "Workqueue retries, by queue name", 55 | }, 56 | []string{"name"}, 57 | ) 58 | queueRequeued = prometheus.NewGaugeVec( 59 | prometheus.GaugeOpts{ 60 | Name: "semaphore_service_mirror_queue_requeued_items", 61 | Help: "Items that have been requeued but not reconciled yet, by queue name", 62 | }, 63 | []string{"name"}, 64 | ) 65 | ) 66 | 67 | func init() { 68 | prometheus.MustRegister( 69 | queueDepth, 70 | queueAdds, 71 | queueLatency, 72 | queueWorkDuration, 73 | queueUnfinishedWork, 74 | queueLongestRunningProcessor, 75 | queueRetries, 76 | queueRequeued, 77 | ) 78 | workqueue.SetProvider(&workqueueProvider{}) 79 | } 80 | 81 | // SetRequeued updates the number of requeued items 82 | func SetRequeued(name string, val float64) { 83 | queueRequeued.With(prometheus.Labels{"name": name}).Set(val) 84 | } 85 | 86 | // workqueueProvider implements workqueue.MetricsProvider 87 | type workqueueProvider struct{} 88 | 89 | // NewDepthMetric returns a gauge which tracks the depth of a queue 90 | func (p *workqueueProvider) NewDepthMetric(name string) workqueue.GaugeMetric { 91 | return queueDepth.With(prometheus.Labels{"name": name}) 92 | } 93 | 94 | // NewAddsMetrics returns a counter which tracks the adds to a queue 95 | func (p *workqueueProvider) NewAddsMetric(name string) workqueue.CounterMetric { 96 | return queueAdds.With(prometheus.Labels{"name": name}) 97 | } 98 | 99 | // NewLatencyMetric returns a histogram which tracks the latency of a queue 100 | func (p *workqueueProvider) NewLatencyMetric(name string) workqueue.HistogramMetric { 101 | return queueLatency.With(prometheus.Labels{"name": name}) 102 | } 103 | 104 | // NewWorkDurationMetric returns a histogram which tracks the time a queue 105 | // spends working 106 | func (p *workqueueProvider) NewWorkDurationMetric(name string) workqueue.HistogramMetric { 107 | return queueWorkDuration.With(prometheus.Labels{"name": name}) 108 | } 109 | 110 | // NewUnfinishedWorkSecondsMetric returns a gauge which tracks the time spent 111 | // doing work that hasn't finished yet for a queue 112 | func (p *workqueueProvider) NewUnfinishedWorkSecondsMetric(name string) workqueue.SettableGaugeMetric { 113 | return queueUnfinishedWork.With(prometheus.Labels{"name": name}) 114 | } 115 | 116 | // NewLongestRunningProcessorSecondsMetric returns a gauge which tracks the 117 | // duration of the longest running processor for a queue 118 | func (p *workqueueProvider) NewLongestRunningProcessorSecondsMetric(name string) workqueue.SettableGaugeMetric { 119 | return queueLongestRunningProcessor.With(prometheus.Labels{"name": name}) 120 | } 121 | 122 | // NewRetriesMetric returns a counter which tracks the number of retries for a 123 | // queue 124 | func (p *workqueueProvider) NewRetriesMetric(name string) workqueue.CounterMetric { 125 | return queueRetries.With(prometheus.Labels{"name": name}) 126 | } 127 | -------------------------------------------------------------------------------- /mirror_runner.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "time" 7 | 8 | v1 "k8s.io/api/core/v1" 9 | "k8s.io/apimachinery/pkg/api/errors" 10 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 11 | "k8s.io/apimachinery/pkg/labels" 12 | "k8s.io/apimachinery/pkg/watch" 13 | "k8s.io/client-go/kubernetes" 14 | "k8s.io/client-go/tools/cache" 15 | 16 | "github.com/utilitywarehouse/semaphore-service-mirror/kube" 17 | "github.com/utilitywarehouse/semaphore-service-mirror/log" 18 | ) 19 | 20 | // MirrorRunner watches a remote cluster and mirrors services and endpoints locally 21 | type MirrorRunner struct { 22 | ctx context.Context 23 | client kubernetes.Interface 24 | serviceQueue *queue 25 | serviceWatcher *kube.ServiceWatcher 26 | mirrorServiceWatcher *kube.ServiceWatcher 27 | endpointsQueue *queue 28 | endpointsWatcher *kube.EndpointsWatcher 29 | mirrorEndpointsWatcher *kube.EndpointsWatcher 30 | mirrorLabels map[string]string 31 | name string 32 | namespace string 33 | prefix string 34 | labelselector string 35 | sync bool 36 | initialised bool // Flag to turn on after the successful initialisation of the runner. 37 | } 38 | 39 | func newMirrorRunner(client, watchClient kubernetes.Interface, name, namespace, prefix, labelselector string, resyncPeriod time.Duration, sync bool) *MirrorRunner { 40 | mirrorLabels := map[string]string{ 41 | "mirrored-svc": "true", 42 | "mirror-svc-prefix-sync": prefix, 43 | } 44 | runner := &MirrorRunner{ 45 | ctx: context.Background(), 46 | client: client, 47 | name: name, 48 | namespace: namespace, 49 | prefix: prefix, 50 | sync: sync, 51 | mirrorLabels: mirrorLabels, 52 | initialised: false, 53 | } 54 | runner.serviceQueue = newQueue(fmt.Sprintf("%s-service", name), runner.reconcileService) 55 | runner.endpointsQueue = newQueue(fmt.Sprintf("%s-endpoints", name), runner.reconcileEndpoints) 56 | runnerName := fmt.Sprintf("mirror-%s", name) 57 | 58 | // Create and initialize a service watcher 59 | serviceWatcher := kube.NewServiceWatcher( 60 | fmt.Sprintf("%s-serviceWatcher", name), 61 | watchClient, 62 | resyncPeriod, 63 | runner.ServiceEventHandler, 64 | labelselector, 65 | metav1.NamespaceAll, 66 | runnerName, 67 | ) 68 | runner.serviceWatcher = serviceWatcher 69 | runner.serviceWatcher.Init() 70 | 71 | // Create and initialize a service watcher for mirrored services 72 | mirrorServiceWatcher := kube.NewServiceWatcher( 73 | fmt.Sprintf("%s-mirrorServiceWatcher", name), 74 | client, 75 | resyncPeriod, 76 | nil, 77 | labels.Set(mirrorLabels).String(), 78 | namespace, 79 | runnerName, 80 | ) 81 | runner.mirrorServiceWatcher = mirrorServiceWatcher 82 | runner.mirrorServiceWatcher.Init() 83 | 84 | // Create and initialize an endpoints watcher 85 | endpointsWatcher := kube.NewEndpointsWatcher( 86 | fmt.Sprintf("%s-endpointsWatcher", name), 87 | watchClient, 88 | resyncPeriod, 89 | runner.EndpointsEventHandler, 90 | labelselector, 91 | metav1.NamespaceAll, 92 | runnerName, 93 | ) 94 | runner.endpointsWatcher = endpointsWatcher 95 | runner.endpointsWatcher.Init() 96 | 97 | // Create and initialize an endpoints watcher for mirrored endpoints 98 | mirrorEndpointsWatcher := kube.NewEndpointsWatcher( 99 | fmt.Sprintf("%s-mirrorEndpointsWatcher", name), 100 | client, 101 | resyncPeriod, 102 | nil, 103 | labels.Set(mirrorLabels).String(), 104 | namespace, 105 | runnerName, 106 | ) 107 | runner.mirrorEndpointsWatcher = mirrorEndpointsWatcher 108 | runner.mirrorEndpointsWatcher.Init() 109 | 110 | return runner 111 | } 112 | 113 | // Run starts the watchers and queues of the runner 114 | func (mr *MirrorRunner) Run() error { 115 | go mr.serviceWatcher.Run() 116 | go mr.mirrorServiceWatcher.Run() 117 | // At this point the runner should be considered initialised and live. 118 | mr.initialised = true 119 | // wait for service watcher to sync before starting the endpoints to 120 | // avoid race between them. TODO: atm dummy and could run forever if 121 | // services cache fails to sync 122 | stopCh := make(chan struct{}) 123 | if ok := cache.WaitForNamedCacheSync("serviceWatcher", stopCh, mr.serviceWatcher.HasSynced); !ok { 124 | return fmt.Errorf("failed to wait for service caches to sync") 125 | } 126 | if ok := cache.WaitForNamedCacheSync("mirrorServiceWatcher", stopCh, mr.mirrorServiceWatcher.HasSynced); !ok { 127 | return fmt.Errorf("failed to wait for mirror service caches to sync") 128 | } 129 | 130 | // After services store syncs, perform a sync to delete stale mirrors 131 | if mr.sync { 132 | log.Logger.Info("Syncing services", "runner", mr.name) 133 | if err := mr.ServiceSync(); err != nil { 134 | log.Logger.Warn( 135 | "Error syncing services, skipping..", 136 | "err", err, 137 | "runner", mr.name, 138 | ) 139 | } 140 | } 141 | go mr.endpointsWatcher.Run() 142 | go mr.mirrorEndpointsWatcher.Run() 143 | 144 | go mr.serviceQueue.Run() 145 | go mr.endpointsQueue.Run() 146 | 147 | return nil 148 | } 149 | 150 | // Stop stops watchers and runners 151 | func (mr *MirrorRunner) Stop() { 152 | mr.serviceQueue.Stop() 153 | mr.serviceWatcher.Stop() 154 | mr.mirrorServiceWatcher.Stop() 155 | mr.endpointsQueue.Stop() 156 | mr.endpointsWatcher.Stop() 157 | mr.mirrorEndpointsWatcher.Stop() 158 | } 159 | 160 | // Initialised returns true when the runner is successfully initialised 161 | func (mr *MirrorRunner) Initialised() bool { 162 | return mr.initialised 163 | } 164 | 165 | func (mr *MirrorRunner) reconcileService(name, namespace string) error { 166 | mirrorName := generateMirrorName(mr.prefix, namespace, name) 167 | 168 | // Get the remote service 169 | log.Logger.Info("getting remote service", "namespace", namespace, "name", name, "runner", mr.name) 170 | remoteSvc, err := mr.getRemoteService(name, namespace) 171 | if errors.IsNotFound(err) { 172 | // If the remote service doesn't exist, clean up the local mirror service (if it 173 | // exists) 174 | log.Logger.Info("remote service not found, deleting local service", "namespace", mr.namespace, "name", mirrorName, "runner", mr.name) 175 | if err := kube.DeleteService(mr.ctx, mr.client, mirrorName, mr.namespace); err != nil && !errors.IsNotFound(err) { 176 | return fmt.Errorf("deleting service %s/%s: %v", mr.namespace, mirrorName, err) 177 | } 178 | return nil 179 | } else if err != nil { 180 | return fmt.Errorf("getting remote service: %v", err) 181 | } 182 | 183 | // If the mirror service doesn't exist, create it. Otherwise, update it. 184 | mirrorSvc, err := kube.GetService(mr.ctx, mr.client, mirrorName, mr.namespace) 185 | if errors.IsNotFound(err) { 186 | log.Logger.Info("local service not found, creating service", "namespace", mr.namespace, "name", mirrorName, "runner", mr.name) 187 | if _, err := kube.CreateService(mr.ctx, mr.client, mirrorName, mr.namespace, mr.mirrorLabels, map[string]string{}, remoteSvc.Spec.Ports, isHeadless(remoteSvc)); err != nil { 188 | return fmt.Errorf("creating service %s/%s: %v", mr.namespace, mirrorName, err) 189 | } 190 | } else if err != nil { 191 | return fmt.Errorf("getting service %s/%s: %v", mr.namespace, mirrorName, err) 192 | } else { 193 | log.Logger.Info("local service found, updating service", "namespace", mr.namespace, "name", mirrorName, "runner", mr.name) 194 | if _, err := kube.UpdateService(mr.ctx, mr.client, mirrorSvc, remoteSvc.Spec.Ports); err != nil { 195 | return fmt.Errorf("updating service %s/%s: %v", mr.namespace, mirrorName, err) 196 | } 197 | } 198 | 199 | return nil 200 | } 201 | 202 | func (mr *MirrorRunner) getRemoteService(name, namespace string) (*v1.Service, error) { 203 | return mr.serviceWatcher.Get(name, namespace) 204 | } 205 | 206 | // ServiceSync checks for stale mirrors (services) under the local namespace and 207 | // deletes them 208 | func (mr *MirrorRunner) ServiceSync() error { 209 | storeSvcs, err := mr.serviceWatcher.List() 210 | if err != nil { 211 | return err 212 | } 213 | 214 | mirrorSvcList := []string{} 215 | for _, svc := range storeSvcs { 216 | mirrorSvcList = append( 217 | mirrorSvcList, 218 | generateMirrorName(mr.prefix, svc.Namespace, svc.Name), 219 | ) 220 | } 221 | 222 | currSvcs, err := mr.mirrorServiceWatcher.List() 223 | if err != nil { 224 | return err 225 | } 226 | 227 | for _, svc := range currSvcs { 228 | _, inSlice := inSlice(mirrorSvcList, svc.Name) 229 | if !inSlice { 230 | log.Logger.Info( 231 | "Deleting old service and related endpoint", 232 | "service", svc.Name, 233 | "runner", mr.name, 234 | ) 235 | // Deleting a service should also clear the related 236 | // endpoints 237 | if err := kube.DeleteService(mr.ctx, mr.client, svc.Name, mr.namespace); err != nil { 238 | log.Logger.Error( 239 | "Error clearing service", 240 | "service", svc.Name, 241 | "err", err, 242 | "runner", mr.name, 243 | ) 244 | return err 245 | } 246 | } 247 | } 248 | return nil 249 | } 250 | 251 | // ServiceEventHandler adds Service resource events to the respective queue 252 | func (mr *MirrorRunner) ServiceEventHandler(eventType watch.EventType, old *v1.Service, new *v1.Service) { 253 | switch eventType { 254 | case watch.Added: 255 | log.Logger.Debug("service added", "namespace", new.Namespace, "name", new.Name, "runner", mr.name) 256 | mr.serviceQueue.Add(new) 257 | case watch.Modified: 258 | log.Logger.Debug("service modified", "namespace", new.Namespace, "name", new.Name, "runner", mr.name) 259 | mr.serviceQueue.Add(new) 260 | case watch.Deleted: 261 | log.Logger.Debug("service deleted", "namespace", old.Namespace, "name", old.Name, "runner", mr.name) 262 | mr.serviceQueue.Add(old) 263 | default: 264 | log.Logger.Info("Unknown service event received: %v", eventType, "runner", mr.name) 265 | } 266 | } 267 | 268 | func (mr *MirrorRunner) reconcileEndpoints(name, namespace string) error { 269 | mirrorName := generateMirrorName(mr.prefix, namespace, name) 270 | 271 | // Get the remote endpoints 272 | log.Logger.Info("getting remote endpoints", "namespace", namespace, "name", name, "runner", mr.name) 273 | remoteEndpoints, err := mr.getRemoteEndpoints(name, namespace) 274 | if errors.IsNotFound(err) { 275 | log.Logger.Info("remote endpoints not found, removing local endpoints", "namespace", namespace, "name", name, "runner", mr.name) 276 | if err := mr.deleteEndpoints(mirrorName, mr.namespace); err != nil && !errors.IsNotFound(err) { 277 | return fmt.Errorf("deleting endpoints %s/%s: %v", mr.namespace, mirrorName, err) 278 | } 279 | return nil 280 | } else if err != nil { 281 | return fmt.Errorf("getting remote endpoints %s/%s: %v", namespace, name, err) 282 | } 283 | 284 | // If the mirror endpoints doesn't exist, create it. Otherwise, update it. 285 | log.Logger.Info("getting local endpoints", "namespace", mr.namespace, "name", mirrorName, "runner", mr.name) 286 | _, err = mr.getEndpoints(mirrorName, mr.namespace) 287 | if errors.IsNotFound(err) { 288 | log.Logger.Info("local endpoints not found, creating endpoints", "namespace", mr.namespace, "name", mirrorName, "runner", mr.name) 289 | if _, err := mr.createEndpoints(mirrorName, mr.namespace, mr.mirrorLabels, remoteEndpoints.Subsets); err != nil { 290 | return fmt.Errorf("creating endpoints %s/%s: %v", mr.namespace, mirrorName, err) 291 | } 292 | } else if err != nil { 293 | return fmt.Errorf("getting endpoints %s/%s: %v", mr.namespace, mirrorName, err) 294 | } else { 295 | log.Logger.Info("local endpoints found, updating endpoints", "namespace", mr.namespace, "name", mirrorName, "runner", mr.name) 296 | if _, err := mr.updateEndpoints(mirrorName, mr.namespace, mr.mirrorLabels, remoteEndpoints.Subsets); err != nil { 297 | return fmt.Errorf("updating endpoints %s/%s: %v", mr.namespace, mirrorName, err) 298 | } 299 | } 300 | return nil 301 | } 302 | 303 | func (mr *MirrorRunner) getRemoteEndpoints(name, namespace string) (*v1.Endpoints, error) { 304 | return mr.endpointsWatcher.Get(name, namespace) 305 | } 306 | 307 | func (mr *MirrorRunner) getEndpoints(name, namespace string) (*v1.Endpoints, error) { 308 | return mr.client.CoreV1().Endpoints(namespace).Get( 309 | mr.ctx, 310 | name, 311 | metav1.GetOptions{}, 312 | ) 313 | } 314 | 315 | func (mr *MirrorRunner) createEndpoints(name, namespace string, labels map[string]string, subsets []v1.EndpointSubset) (*v1.Endpoints, error) { 316 | return mr.client.CoreV1().Endpoints(namespace).Create( 317 | mr.ctx, 318 | &v1.Endpoints{ 319 | ObjectMeta: metav1.ObjectMeta{ 320 | Name: name, 321 | Namespace: namespace, 322 | Labels: labels, 323 | }, 324 | Subsets: subsets, 325 | }, 326 | metav1.CreateOptions{}, 327 | ) 328 | } 329 | 330 | func (mr *MirrorRunner) updateEndpoints(name, namespace string, labels map[string]string, subsets []v1.EndpointSubset) (*v1.Endpoints, error) { 331 | return mr.client.CoreV1().Endpoints(namespace).Update( 332 | mr.ctx, 333 | &v1.Endpoints{ 334 | ObjectMeta: metav1.ObjectMeta{ 335 | Name: name, 336 | Namespace: namespace, 337 | Labels: labels, 338 | }, 339 | Subsets: subsets, 340 | }, 341 | metav1.UpdateOptions{}, 342 | ) 343 | } 344 | 345 | func (mr *MirrorRunner) deleteEndpoints(name, namespace string) error { 346 | return mr.client.CoreV1().Endpoints(namespace).Delete( 347 | mr.ctx, 348 | name, 349 | metav1.DeleteOptions{}, 350 | ) 351 | } 352 | 353 | // EndpointsEventHandler adds Endpoints resource events to the respective queue 354 | func (mr *MirrorRunner) EndpointsEventHandler(eventType watch.EventType, old *v1.Endpoints, new *v1.Endpoints) { 355 | switch eventType { 356 | case watch.Added: 357 | log.Logger.Debug("endpoints added", "namespace", new.Namespace, "name", new.Name, "runner", mr.name) 358 | mr.endpointsQueue.Add(new) 359 | case watch.Modified: 360 | log.Logger.Debug("endpoints modified", "namespace", new.Namespace, "name", new.Name, "runner", mr.name) 361 | mr.endpointsQueue.Add(new) 362 | case watch.Deleted: 363 | log.Logger.Debug("endpoints deleted", "namespace", old.Namespace, "name", old.Name, "runner", mr.name) 364 | mr.endpointsQueue.Add(old) 365 | default: 366 | log.Logger.Info("Unknown endpoints event received: %v", eventType, "runner", mr.name) 367 | } 368 | } 369 | -------------------------------------------------------------------------------- /mirror_runner_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "testing" 7 | "time" 8 | 9 | "github.com/stretchr/testify/assert" 10 | "github.com/utilitywarehouse/semaphore-service-mirror/log" 11 | v1 "k8s.io/api/core/v1" 12 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 13 | "k8s.io/client-go/kubernetes/fake" 14 | "k8s.io/client-go/tools/cache" 15 | ) 16 | 17 | var testMirrorLabels = map[string]string{ 18 | "mirrored-svc": "true", 19 | "mirror-svc-prefix-sync": "prefix", 20 | } 21 | 22 | func TestAddService(t *testing.T) { 23 | ctx, cancel := context.WithCancel(context.Background()) 24 | defer cancel() 25 | 26 | log.InitLogger("semaphore-service-mirror-test", "debug") 27 | fakeClient := fake.NewSimpleClientset() 28 | 29 | testPorts := []v1.ServicePort{v1.ServicePort{Port: 1}} 30 | testSvc := &v1.Service{ 31 | ObjectMeta: metav1.ObjectMeta{ 32 | Name: "test-svc", 33 | Namespace: "remote-ns", 34 | Labels: map[string]string{"uw.systems/test": "true"}, 35 | }, 36 | Spec: v1.ServiceSpec{ 37 | Ports: testPorts, 38 | Selector: map[string]string{"selector": "x"}, 39 | ClusterIP: "1.1.1.1", 40 | }, 41 | } 42 | fakeWatchClient := fake.NewSimpleClientset(testSvc) 43 | 44 | testRunner := newMirrorRunner( 45 | fakeClient, 46 | fakeWatchClient, 47 | "test-runner", 48 | "local-ns", 49 | "prefix", 50 | "uw.systems/test=true", 51 | 60*time.Minute, 52 | true, 53 | ) 54 | go testRunner.serviceWatcher.Run() 55 | cache.WaitForNamedCacheSync("serviceWatcher", ctx.Done(), testRunner.serviceWatcher.HasSynced) 56 | 57 | // Test create cluster ip service - should create 1 service with no 58 | // cluster ip specified, the same ports and nil selector 59 | testRunner.reconcileService("test-svc", "remote-ns") 60 | 61 | expectedSpec := TestSpec{ 62 | Ports: testPorts, 63 | ClusterIP: "", 64 | Selector: nil, 65 | } 66 | // Test will appear alphabetically sorted in the client response 67 | expectedSvcs := []TestSvc{ 68 | TestSvc{ 69 | Name: fmt.Sprintf("prefix-remote-ns-%s-test-svc", Separator), 70 | Namespace: "local-ns", 71 | Spec: expectedSpec, 72 | }, 73 | } 74 | assertExpectedServices(ctx, t, expectedSvcs, fakeClient) 75 | } 76 | 77 | func TestAddHeadlessService(t *testing.T) { 78 | ctx, cancel := context.WithCancel(context.Background()) 79 | defer cancel() 80 | 81 | log.InitLogger("semaphore-service-mirror-test", "debug") 82 | fakeClient := fake.NewSimpleClientset() 83 | 84 | testPorts := []v1.ServicePort{v1.ServicePort{Port: 1}} 85 | testSvc := &v1.Service{ 86 | ObjectMeta: metav1.ObjectMeta{ 87 | Name: "test-svc", 88 | Namespace: "remote-ns", 89 | Labels: map[string]string{"uw.systems/test": "true"}, 90 | }, 91 | Spec: v1.ServiceSpec{ 92 | Ports: testPorts, 93 | Selector: map[string]string{"selector": "x"}, 94 | ClusterIP: "None", 95 | }, 96 | } 97 | fakeWatchClient := fake.NewSimpleClientset(testSvc) 98 | 99 | testRunner := newMirrorRunner( 100 | fakeClient, 101 | fakeWatchClient, 102 | "test-runner", 103 | "local-ns", 104 | "prefix", 105 | "uw.systems/test=true", 106 | 60*time.Minute, 107 | true, 108 | ) 109 | go testRunner.serviceWatcher.Run() 110 | cache.WaitForNamedCacheSync("serviceWatcher", ctx.Done(), testRunner.serviceWatcher.HasSynced) 111 | 112 | // Test create headless service - should create 1 service with "None" 113 | // cluster ip, the same ports and nil selector 114 | testRunner.reconcileService("test-svc", "remote-ns") 115 | 116 | expectedSpec := TestSpec{ 117 | Ports: testPorts, 118 | ClusterIP: "None", 119 | Selector: nil, 120 | } 121 | expectedSvcs := []TestSvc{ 122 | TestSvc{ 123 | Name: fmt.Sprintf("prefix-remote-ns-%s-test-svc", Separator), 124 | Namespace: "local-ns", 125 | Spec: expectedSpec, 126 | }, 127 | } 128 | assertExpectedServices(ctx, t, expectedSvcs, fakeClient) 129 | } 130 | 131 | func TestModifyService(t *testing.T) { 132 | ctx, cancel := context.WithCancel(context.Background()) 133 | defer cancel() 134 | 135 | log.InitLogger("semaphore-service-mirror-test", "debug") 136 | 137 | existingPorts := []v1.ServicePort{v1.ServicePort{Port: 1}} 138 | existingSvc := &v1.Service{ 139 | ObjectMeta: metav1.ObjectMeta{ 140 | Name: fmt.Sprintf("prefix-remote-ns-%s-test-svc", Separator), 141 | Namespace: "local-ns", 142 | }, 143 | Spec: v1.ServiceSpec{ 144 | Ports: existingPorts, 145 | ClusterIP: "None", 146 | }, 147 | } 148 | fakeClient := fake.NewSimpleClientset(existingSvc) 149 | 150 | testPorts := []v1.ServicePort{v1.ServicePort{Port: 2}} 151 | testSvc := &v1.Service{ 152 | ObjectMeta: metav1.ObjectMeta{ 153 | Name: "test-svc", 154 | Namespace: "remote-ns", 155 | Labels: map[string]string{"uw.systems/test": "true"}, 156 | }, 157 | Spec: v1.ServiceSpec{ 158 | Ports: testPorts, 159 | Selector: map[string]string{"selector": "x"}, 160 | ClusterIP: "None", 161 | }, 162 | } 163 | fakeWatchClient := fake.NewSimpleClientset(testSvc) 164 | 165 | testRunner := newMirrorRunner( 166 | fakeClient, 167 | fakeWatchClient, 168 | "test-runner", 169 | "local-ns", 170 | "prefix", 171 | "uw.systems/test=true", 172 | 60*time.Minute, 173 | true, 174 | ) 175 | go testRunner.serviceWatcher.Run() 176 | cache.WaitForNamedCacheSync("serviceWatcher", ctx.Done(), testRunner.serviceWatcher.HasSynced) 177 | 178 | testRunner.reconcileService("test-svc", "remote-ns") 179 | 180 | expectedSpec := TestSpec{ 181 | Ports: testPorts, 182 | ClusterIP: "None", 183 | Selector: nil, 184 | } 185 | expectedSvcs := []TestSvc{ 186 | TestSvc{ 187 | Name: fmt.Sprintf("prefix-remote-ns-%s-test-svc", Separator), 188 | Namespace: "local-ns", 189 | Spec: expectedSpec, 190 | }, 191 | } 192 | assertExpectedServices(ctx, t, expectedSvcs, fakeClient) 193 | } 194 | 195 | func TestModifyServiceNoChange(t *testing.T) { 196 | ctx, cancel := context.WithCancel(context.Background()) 197 | defer cancel() 198 | 199 | log.InitLogger("semaphore-service-mirror-test", "debug") 200 | 201 | existingPorts := []v1.ServicePort{v1.ServicePort{Port: 1}} 202 | existingSvc := &v1.Service{ 203 | ObjectMeta: metav1.ObjectMeta{ 204 | Name: fmt.Sprintf("prefix-remote-ns-%s-test-svc", Separator), 205 | Namespace: "local-ns", 206 | }, 207 | Spec: v1.ServiceSpec{ 208 | Ports: existingPorts, 209 | ClusterIP: "None", 210 | }, 211 | } 212 | fakeClient := fake.NewSimpleClientset(existingSvc) 213 | 214 | testSvc := &v1.Service{ 215 | ObjectMeta: metav1.ObjectMeta{ 216 | Name: "test-svc", 217 | Namespace: "remote-ns", 218 | Labels: map[string]string{"uw.systems/test": "true"}, 219 | }, 220 | Spec: v1.ServiceSpec{ 221 | Ports: existingPorts, 222 | Selector: map[string]string{"selector": "x"}, 223 | ClusterIP: "None", 224 | }, 225 | } 226 | fakeWatchClient := fake.NewSimpleClientset(testSvc) 227 | 228 | testRunner := newMirrorRunner( 229 | fakeClient, 230 | fakeWatchClient, 231 | "test-runner", 232 | "local-ns", 233 | "prefix", 234 | "uw.systems/test=true", 235 | 60*time.Minute, 236 | true, 237 | ) 238 | go testRunner.serviceWatcher.Run() 239 | cache.WaitForNamedCacheSync("serviceWatcher", ctx.Done(), testRunner.serviceWatcher.HasSynced) 240 | 241 | testRunner.reconcileService("test-svc", "remote-ns") 242 | 243 | svcs, err := fakeClient.CoreV1().Services("").List( 244 | ctx, 245 | metav1.ListOptions{}, 246 | ) 247 | if err != nil { 248 | t.Fatal(err) 249 | } 250 | 251 | assert.Equal(t, 1, len(svcs.Items)) 252 | assert.Equal(t, *existingSvc, svcs.Items[0]) 253 | } 254 | 255 | func TestServiceSync(t *testing.T) { 256 | ctx := context.Background() 257 | 258 | log.InitLogger("semaphore-service-mirror-test", "debug") 259 | 260 | testPorts := []v1.ServicePort{v1.ServicePort{Port: 1}} 261 | // Service on the remote cluster 262 | testSvc := &v1.Service{ 263 | ObjectMeta: metav1.ObjectMeta{ 264 | Name: "test-svc", 265 | Namespace: "remote-ns", 266 | Labels: map[string]string{"uw.systems/test": "true"}, 267 | }, 268 | Spec: v1.ServiceSpec{ 269 | Ports: testPorts, 270 | Selector: map[string]string{"test-app": "true"}, 271 | ClusterIP: "1.1.1.1", 272 | }, 273 | } 274 | fakeWatchClient := fake.NewSimpleClientset(testSvc) 275 | 276 | // Create mirrored service 277 | mirroredSvc := &v1.Service{ 278 | ObjectMeta: metav1.ObjectMeta{ 279 | Name: fmt.Sprintf("prefix-remote-ns-%s-test-svc", Separator), 280 | Namespace: "local-ns", 281 | Labels: testMirrorLabels, 282 | }, 283 | Spec: v1.ServiceSpec{ 284 | Ports: testPorts, 285 | Selector: nil, 286 | }, 287 | } 288 | // Create stale service 289 | staleSvc := &v1.Service{ 290 | ObjectMeta: metav1.ObjectMeta{ 291 | Name: fmt.Sprintf("prefix-old-svc-%s-remote-ns", Separator), 292 | Namespace: "local-ns", 293 | Labels: testMirrorLabels, 294 | }, 295 | Spec: v1.ServiceSpec{ 296 | Ports: testPorts, 297 | Selector: nil, 298 | }, 299 | } 300 | // feed them to the fake client 301 | fakeClient := fake.NewSimpleClientset(mirroredSvc, staleSvc) 302 | 303 | ctx, cancel := context.WithCancel(context.Background()) 304 | defer cancel() 305 | 306 | testRunner := newMirrorRunner( 307 | fakeClient, 308 | fakeWatchClient, 309 | "test-runner", 310 | "local-ns", 311 | "prefix", 312 | "uw.systems/test=true", 313 | 60*time.Minute, 314 | true, 315 | ) 316 | go testRunner.serviceWatcher.Run() 317 | go testRunner.mirrorServiceWatcher.Run() 318 | cache.WaitForNamedCacheSync("serviceWatcher", ctx.Done(), testRunner.serviceWatcher.HasSynced) 319 | cache.WaitForNamedCacheSync("mirrorServiceWatcher", ctx.Done(), testRunner.mirrorServiceWatcher.HasSynced) 320 | 321 | // ServiceSync will trigger a sync. Verify that old service is deleted 322 | if err := testRunner.ServiceSync(); err != nil { 323 | t.Fatal(err) 324 | } 325 | svcs, err := fakeClient.CoreV1().Services("").List( 326 | ctx, 327 | metav1.ListOptions{}, 328 | ) 329 | if err != nil { 330 | t.Fatal(err) 331 | } 332 | assert.Equal(t, 1, len(svcs.Items)) 333 | assert.Equal( 334 | t, 335 | fmt.Sprintf("prefix-remote-ns-%s-test-svc", Separator), 336 | svcs.Items[0].Name, 337 | ) 338 | } 339 | -------------------------------------------------------------------------------- /queue.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "github.com/utilitywarehouse/semaphore-service-mirror/log" 5 | "github.com/utilitywarehouse/semaphore-service-mirror/metrics" 6 | "k8s.io/client-go/tools/cache" 7 | "k8s.io/client-go/util/workqueue" 8 | ) 9 | 10 | // queueReconcileFunc reconciles the object indicated by the name and namespace 11 | type queueReconcileFunc func(name, namespace string) error 12 | 13 | // queue provides a rate-limited queue that processes items with a provided 14 | // reconcile function 15 | type queue struct { 16 | name string 17 | reconcileFunc queueReconcileFunc 18 | queue workqueue.RateLimitingInterface 19 | requeued []string 20 | } 21 | 22 | // newQueue returns a new queue 23 | func newQueue(name string, reconcileFunc queueReconcileFunc) *queue { 24 | return &queue{ 25 | name: name, 26 | reconcileFunc: reconcileFunc, 27 | queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), name), 28 | } 29 | } 30 | 31 | // Add an item to the queue, where that item is an object that 32 | // implements meta.Interface. 33 | func (q *queue) Add(obj interface{}) { 34 | key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) 35 | if err != nil { 36 | log.Logger.Error("couldn't create object key", "queue", q.name, "err", err) 37 | return 38 | } 39 | q.queue.Add(key) 40 | } 41 | 42 | // Run processes items from the queue as they're added 43 | func (q *queue) Run() { 44 | q.updateMetrics() 45 | for q.processItem() { 46 | q.updateMetrics() 47 | } 48 | } 49 | 50 | // Stop causes the queue to shut down 51 | func (q *queue) Stop() { 52 | q.queue.ShutDown() 53 | } 54 | 55 | // processItem processes the next item in the queue 56 | func (q *queue) processItem() bool { 57 | key, shutdown := q.queue.Get() 58 | if shutdown { 59 | log.Logger.Info("queue shutdown", "queue", q.name) 60 | return false 61 | } 62 | defer q.queue.Done(key) 63 | 64 | namespace, name, err := cache.SplitMetaNamespaceKey(key.(string)) 65 | if err != nil { 66 | log.Logger.Error( 67 | "error parsing key", 68 | "queue", q.name, 69 | "key", key.(string), 70 | "err", err, 71 | ) 72 | q.forget(key) 73 | return true 74 | } 75 | 76 | log.Logger.Info( 77 | "reconciling item", 78 | "queue", q.name, 79 | "namespace", namespace, 80 | "name", name, 81 | ) 82 | if err := q.reconcileFunc(name, namespace); err != nil { 83 | log.Logger.Error( 84 | "reconcile error", 85 | "queue", q.name, 86 | "namespace", namespace, 87 | "name", name, 88 | "err", err, 89 | ) 90 | q.requeue(key) 91 | log.Logger.Info( 92 | "requeued item", 93 | "queue", q.name, 94 | "namespace", namespace, 95 | "name", name, 96 | ) 97 | } else { 98 | log.Logger.Info( 99 | "successfully reconciled item", 100 | "queue", q.name, 101 | "namespace", namespace, 102 | "name", name, 103 | ) 104 | q.forget(key) 105 | } 106 | 107 | return true 108 | } 109 | 110 | func (q *queue) requeue(key interface{}) { 111 | q.queue.AddRateLimited(key) 112 | q.addRequeued(key.(string)) 113 | } 114 | 115 | func (q *queue) forget(key interface{}) { 116 | q.queue.Forget(key) 117 | q.removeRequeued(key.(string)) 118 | } 119 | 120 | func (q *queue) addRequeued(key string) { 121 | for _, k := range q.requeued { 122 | if k == key { 123 | return 124 | } 125 | } 126 | q.requeued = append(q.requeued, key) 127 | } 128 | 129 | func (q *queue) removeRequeued(key string) { 130 | for i, k := range q.requeued { 131 | if k == key { 132 | q.requeued = append(q.requeued[:i], q.requeued[i+1:]...) 133 | break 134 | } 135 | } 136 | } 137 | 138 | func (q *queue) updateMetrics() { 139 | metrics.SetRequeued(q.name, float64(len(q.requeued))) 140 | } 141 | -------------------------------------------------------------------------------- /runner.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | // Runner interface must implement Run(), Stop() and Initialised() for main 4 | // to be able to orchestrate all runners actions. 5 | type Runner interface { 6 | Run() error 7 | Stop() 8 | Initialised() bool 9 | } 10 | -------------------------------------------------------------------------------- /test_utils.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | "github.com/stretchr/testify/assert" 8 | v1 "k8s.io/api/core/v1" 9 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 10 | "k8s.io/client-go/kubernetes/fake" 11 | ) 12 | 13 | // TestSvc is a struct to make expected types for assertions 14 | type TestSvc struct { 15 | Name string 16 | Namespace string 17 | Spec TestSpec 18 | Labels map[string]string 19 | Annotations map[string]string 20 | } 21 | 22 | // TestSpec represents the test service spec 23 | type TestSpec struct { 24 | Ports []v1.ServicePort 25 | Selector map[string]string 26 | ClusterIP string 27 | } 28 | 29 | func assertExpectedServices(ctx context.Context, t *testing.T, expectedSvcs []TestSvc, fakeClient *fake.Clientset) { 30 | svcs, err := fakeClient.CoreV1().Services("").List( 31 | ctx, 32 | metav1.ListOptions{}, 33 | ) 34 | if err != nil { 35 | t.Fatal(err) 36 | } 37 | assert.Equal(t, len(expectedSvcs), len(svcs.Items)) 38 | for i, expected := range expectedSvcs { 39 | assert.Equal(t, expected.Name, svcs.Items[i].Name) 40 | assert.Equal(t, expected.Namespace, svcs.Items[i].Namespace) 41 | assert.Equal(t, expected.Spec.ClusterIP, svcs.Items[i].Spec.ClusterIP) 42 | assert.Equal(t, expected.Spec.Selector, svcs.Items[i].Spec.Selector) 43 | assert.Equal(t, expected.Spec.Ports, svcs.Items[i].Spec.Ports) 44 | } 45 | } 46 | 47 | // assertExpectediGlobalServices will also checck global service labels and annotations 48 | func assertExpectedGlobalServices(ctx context.Context, t *testing.T, expectedSvcs []TestSvc, fakeClient *fake.Clientset) { 49 | svcs, err := fakeClient.CoreV1().Services("").List( 50 | ctx, 51 | metav1.ListOptions{}, 52 | ) 53 | if err != nil { 54 | t.Fatal(err) 55 | } 56 | for i, expected := range expectedSvcs { 57 | assert.Equal(t, expected.Labels["global-svc"], svcs.Items[i].Labels["global-svc"]) 58 | assert.Equal(t, expected.Annotations[kubeSeviceTopologyAwareHintsAnno], svcs.Items[i].Annotations[kubeSeviceTopologyAwareHintsAnno]) 59 | assert.Equal(t, expected.Annotations[globalSvcClustersAnno], svcs.Items[i].Annotations[globalSvcClustersAnno]) 60 | } 61 | assertExpectedServices(ctx, t, expectedSvcs, fakeClient) 62 | } 63 | -------------------------------------------------------------------------------- /utils.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | 6 | v1 "k8s.io/api/core/v1" 7 | discoveryv1 "k8s.io/api/discovery/v1" 8 | "k8s.io/apimachinery/pkg/api/meta" 9 | "k8s.io/apimachinery/pkg/labels" 10 | "k8s.io/apimachinery/pkg/runtime" 11 | 12 | "github.com/utilitywarehouse/semaphore-service-mirror/log" 13 | ) 14 | 15 | const ( 16 | // Separator is inserted between the namespace and name in the mirror 17 | // name to prevent clashes 18 | Separator = "73736d" 19 | ) 20 | 21 | var ( 22 | // DefaultLocalEndpointZones holds the configured availability zones for 23 | // the local cluster 24 | DefaultLocalEndpointZones []discoveryv1.ForZone 25 | ) 26 | 27 | // generateMirrorName generates a name for mirrored objects based on the name 28 | // and namespace of the remote object: --73736d- 29 | func generateMirrorName(prefix, namespace, name string) string { 30 | return fmt.Sprintf("%s-%s-%s-%s", prefix, namespace, Separator, name) 31 | } 32 | 33 | // generateGlobalServiceName generates a name for mirrored objects based on the 34 | // name and namespace of the remote object: gl--73736d- 35 | func generateGlobalServiceName(name, namespace string) string { 36 | return fmt.Sprintf("gl-%s-%s-%s", namespace, Separator, name) 37 | } 38 | 39 | // generateGlobalEndpointSliceName just prefixes the name with `gl-`, and relies 40 | // on kubernetes suffixes for endpointslices to not collide. 41 | func generateGlobalEndpointSliceName(name string) string { 42 | return fmt.Sprintf("gl-%s", name) 43 | } 44 | 45 | func generateEndpointSliceLabels(baseLabels map[string]string, targetService string) map[string]string { 46 | labels := baseLabels 47 | labels["kubernetes.io/service-name"] = targetService 48 | labels["endpointslice.kubernetes.io/managed-by"] = "semaphore-service-mirror" 49 | return labels 50 | } 51 | 52 | func setLocalEndpointZones(zones []string) { 53 | for _, z := range zones { 54 | DefaultLocalEndpointZones = append(DefaultLocalEndpointZones, discoveryv1.ForZone{Name: z}) 55 | } 56 | } 57 | 58 | func inSlice(slice []string, val string) (int, bool) { 59 | for i, item := range slice { 60 | if item == val { 61 | return i, true 62 | } 63 | } 64 | return -1, false 65 | } 66 | 67 | func removeFromSlice(slice []string, i int) []string { 68 | slice[len(slice)-1], slice[i] = slice[i], slice[len(slice)-1] 69 | return slice[:len(slice)-1] 70 | } 71 | 72 | func isHeadless(svc *v1.Service) bool { 73 | if svc.Spec.ClusterIP == "None" { 74 | return true 75 | } 76 | return false 77 | } 78 | 79 | func matchSelector(selector labels.Selector, obj runtime.Object) bool { 80 | metadata, err := meta.Accessor(obj) 81 | if err != nil { 82 | log.Logger.Error("creating object accessor", "err", err) 83 | return false 84 | } 85 | return selector.Matches(labels.Set(metadata.GetLabels())) 86 | } 87 | -------------------------------------------------------------------------------- /utils_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | v1 "k8s.io/api/core/v1" 8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | "k8s.io/apimachinery/pkg/labels" 10 | ) 11 | 12 | func TestMatchSelector_MatchService(t *testing.T) { 13 | testPorts := []v1.ServicePort{v1.ServicePort{Port: 1}} 14 | testSvc := &v1.Service{ 15 | ObjectMeta: metav1.ObjectMeta{ 16 | Name: "test-svc", 17 | Namespace: "remote-ns", 18 | Labels: map[string]string{ 19 | "mirror.semaphore.uw.io/test": "true", 20 | "mirror.semaphore.uw.io/random": "true", 21 | }, 22 | }, 23 | Spec: v1.ServiceSpec{ 24 | Ports: testPorts, 25 | Selector: map[string]string{"selector": "x"}, 26 | ClusterIP: "1.1.1.1", 27 | }, 28 | } 29 | selector, err := labels.Parse("mirror.semaphore.uw.io/test=true") 30 | if err != nil { 31 | t.Fatal(err) 32 | } 33 | res := matchSelector(selector, testSvc) 34 | assert.Equal(t, true, res) 35 | } 36 | 37 | func TestMatchSelector_NoServiceMatch(t *testing.T) { 38 | testPorts := []v1.ServicePort{v1.ServicePort{Port: 1}} 39 | testSvc := &v1.Service{ 40 | ObjectMeta: metav1.ObjectMeta{ 41 | Name: "test-svc", 42 | Namespace: "remote-ns", 43 | Labels: map[string]string{ 44 | "mirror.semaphore.uw.io/random": "true", 45 | }, 46 | }, 47 | Spec: v1.ServiceSpec{ 48 | Ports: testPorts, 49 | Selector: map[string]string{"selector": "x"}, 50 | ClusterIP: "1.1.1.1", 51 | }, 52 | } 53 | selector, err := labels.Parse("mirror.semaphore.uw.io/test=true") 54 | if err != nil { 55 | t.Fatal(err) 56 | } 57 | res := matchSelector(selector, testSvc) 58 | assert.Equal(t, false, res) 59 | } 60 | --------------------------------------------------------------------------------