├── .dockerignore ├── .gitignore ├── .markdownlintignore ├── CODE-OF-CONDUCT.md ├── CODEOWNERS ├── CONTRIBUTING.md ├── Dockerfile ├── LICENSE.txt ├── Makefile ├── NOTICE.txt ├── PROJECT ├── README.md ├── api └── v1alpha1 │ ├── akodeploymentconfig_types.go │ ├── akodeploymentconfig_webhook.go │ ├── akodeploymentconfig_webhook_test.go │ ├── constants.go │ ├── groupversion_info.go │ └── zz_generated.deepcopy.go ├── codecov.yml ├── config ├── certmanager │ ├── certificate.yaml │ ├── kustomization.yaml │ └── kustomizeconfig.yaml ├── crd │ ├── bases │ │ └── networking.tkg.tanzu.vmware.com_akodeploymentconfigs.yaml │ ├── kustomization.yaml │ ├── kustomizeconfig.yaml │ └── patches │ │ ├── cainjection_in_akodeploymentconfigs.yaml │ │ ├── cainjection_in_tests.yaml │ │ ├── webhook_in_akodeploymentconfigs.yaml │ │ └── webhook_in_tests.yaml ├── default │ ├── kustomization.yaml │ ├── manager_webhook_patch.yaml │ └── webhookcainjection_patch.yaml ├── kustomize-to-ytt │ ├── kustomization.yaml │ └── webhookcainjection_patch.yaml ├── manager │ ├── kustomization.yaml │ └── manager.yaml ├── prometheus │ ├── kustomization.yaml │ └── monitor.yaml ├── rbac │ ├── akodeploymentconfig_editor_role.yaml │ ├── akodeploymentconfig_viewer_role.yaml │ ├── kustomization.yaml │ ├── leader_election_role.yaml │ ├── leader_election_role_binding.yaml │ ├── role.yaml │ └── role_binding.yaml ├── samples │ └── network_v1alpha1_akodeploymentconfig.yaml ├── webhook │ ├── kustomization.yaml │ ├── kustomizeconfig.yaml │ ├── manifests.yaml │ └── service.yaml └── ytt │ ├── ako-operator.yaml │ ├── akodeploymentconfig │ ├── akodeploymentconfig.yaml │ └── values.yaml │ ├── static.yaml │ └── values.yaml ├── controllers ├── akodeploymentconfig │ ├── akodeploymentconfig_controller.go │ ├── akodeploymentconfig_controller_avi_phase.go │ ├── akodeploymentconfig_controller_cluster_phase.go │ ├── akodeploymentconfig_controller_intg_test.go │ ├── akodeploymentconfig_controller_unit_test.go │ ├── cluster │ │ ├── cluster_controller.go │ │ ├── cluster_controller_addon_secret.go │ │ ├── cluster_controller_unit_test.go │ │ └── suite_test.go │ ├── phases │ │ ├── phases.go │ │ ├── phases_unit_test.go │ │ └── suite_test.go │ ├── suite_test.go │ └── user │ │ ├── ako_role.go │ │ ├── ako_role_test.go │ │ ├── suite_test.go │ │ ├── user_controller.go │ │ └── user_controller_test.go ├── cluster │ ├── cluster_controller.go │ ├── cluster_intg_test.go │ └── suite_test.go ├── controllers.go ├── machine │ ├── machine_controller.go │ ├── machine_controller_intg_test.go │ └── suite_test.go └── tests │ └── cluster_for_akodeploymentconfig │ ├── default_adc │ ├── cluster_for_akodeploymentconfig_intg_test.go │ └── suite_test.go │ └── default_adc_non_empty_selectors │ ├── cluster_for_akodeploymentconfig_intg_test.go │ └── suite_test.go ├── docs └── quick-start.md ├── e2e ├── README.md ├── doc.go ├── e2e_test.go ├── env.json ├── pkg │ └── env │ │ ├── assertions.go │ │ ├── avi.go │ │ ├── env.go │ │ ├── io.go │ │ ├── kubectl.go │ │ ├── kubectl_test.go │ │ ├── suite_test.go │ │ ├── tkg.go │ │ └── vip.go ├── static │ └── loadbalancer-service.yaml └── suite_test.go ├── go.mod ├── go.sum ├── hack ├── VMware-copyright ├── boilerplate.go.txt ├── containerd │ └── config.toml ├── e2e.sh ├── header-check.sh ├── kind │ ├── kind-cluster-with-extramounts.yaml │ └── simple-cluster.yaml ├── run-e2e.sh ├── test-e2e.sh ├── test-ytt.sh ├── tools │ ├── Makefile │ ├── go.mod │ ├── go.sum │ └── tools.go └── update-containerd.sh ├── main.go ├── md-config.json └── pkg ├── ako-operator ├── akodeploymentconfig_cluster_mapping_helper.go ├── lib.go ├── lib_test.go └── suite_test.go ├── ako ├── ako.go ├── ako_test.go ├── suite_test.go ├── values.go └── values_test.go ├── aviclient ├── client.go ├── fake_avi_client.go └── interface.go ├── handlers ├── cluster_for_akodeploymentconfig_handler.go ├── cluster_for_akodeploymentconfig_handler_test.go ├── cluster_for_machine_handler.go ├── cluster_for_machine_handler_test.go └── suite_test.go ├── haprovider ├── haprovider.go ├── haprovider_test.go └── suite_test.go ├── netprovider └── network_provider.go ├── test ├── builder │ ├── flags.go │ ├── intg_test_context.go │ └── test_suite.go ├── funcs │ └── add_to.go └── util │ ├── data.go │ └── utils.go └── utils ├── get_ipfamily.go ├── get_ipfamily_test.go ├── get_objects.go ├── password_generator.go ├── password_generator_test.go └── utils_suite_test.go /.dockerignore: -------------------------------------------------------------------------------- 1 | /bin* 2 | */bin* 3 | */*/bin* 4 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.swp 2 | 3 | # Binaries for programs and plugins 4 | *.exe 5 | *.exe~ 6 | *.dll 7 | *.so 8 | *.dylib 9 | bin 10 | *.log 11 | **/*.log 12 | 13 | # Test binary, build with `go test -c` 14 | *.test 15 | 16 | # Output of the go coverage tool, specifically when used with LiteIDE 17 | *.out 18 | coverage.txt 19 | coverage.html 20 | 21 | # Kubernetes Generated files - skip generated files, except for vendored files 22 | 23 | !vendor/**/zz_generated.* 24 | 25 | # editor and IDE paraphernalia 26 | .idea 27 | *.swp 28 | *.swo 29 | *~ 30 | 31 | .DS_Store 32 | 33 | vendor 34 | *.kubeconfig 35 | 36 | hack/tools/bin 37 | local 38 | 39 | # e2e test 40 | e2e/static/akodeploymentconfig.yaml 41 | e2e/static/tkg-config.yaml 42 | -------------------------------------------------------------------------------- /.markdownlintignore: -------------------------------------------------------------------------------- 1 | CODE-OF-CONDUCT.md 2 | CONTRIBUTING.md 3 | -------------------------------------------------------------------------------- /CODE-OF-CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | We as members, contributors, and leaders pledge to make participation in Load Balancer Operator for Kubernetes project and our 6 | community a harassment-free experience for everyone, regardless of age, body 7 | size, visible or invisible disability, ethnicity, sex characteristics, gender 8 | identity and expression, level of experience, education, socio-economic status, 9 | nationality, personal appearance, race, religion, or sexual identity 10 | and orientation. 11 | 12 | We pledge to act and interact in ways that contribute to an open, welcoming, 13 | diverse, inclusive, and healthy community. 14 | 15 | ## Our Standards 16 | 17 | Examples of behavior that contributes to a positive environment for our 18 | community include: 19 | 20 | * Demonstrating empathy and kindness toward other people 21 | * Being respectful of differing opinions, viewpoints, and experiences 22 | * Giving and gracefully accepting constructive feedback 23 | * Accepting responsibility and apologizing to those affected by our mistakes, 24 | and learning from the experience 25 | * Focusing on what is best not just for us as individuals, but for the 26 | overall community 27 | 28 | Examples of unacceptable behavior include: 29 | 30 | * The use of sexualized language or imagery, and sexual attention or 31 | advances of any kind 32 | * Trolling, insulting or derogatory comments, and personal or political attacks 33 | * Public or private harassment 34 | * Publishing others' private information, such as a physical or email 35 | address, without their explicit permission 36 | * Other conduct which could reasonably be considered inappropriate in a 37 | professional setting 38 | 39 | ## Enforcement Responsibilities 40 | 41 | Community leaders are responsible for clarifying and enforcing our standards of 42 | acceptable behavior and will take appropriate and fair corrective action in 43 | response to any behavior that they deem inappropriate, threatening, offensive, 44 | or harmful. 45 | 46 | Community leaders have the right and responsibility to remove, edit, or reject 47 | comments, commits, code, wiki edits, issues, and other contributions that are 48 | not aligned to this Code of Conduct, and will communicate reasons for moderation 49 | decisions when appropriate. 50 | 51 | ## Scope 52 | 53 | This Code of Conduct applies within all community spaces, and also applies when 54 | an individual is officially representing the community in public spaces. 55 | Examples of representing our community include using an official e-mail address, 56 | posting via an official social media account, or acting as an appointed 57 | representative at an online or offline event. 58 | 59 | ## Enforcement 60 | 61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 62 | reported to the community leaders responsible for enforcement at oss-coc@vmware.com. 63 | All complaints will be reviewed and investigated promptly and fairly. 64 | 65 | All community leaders are obligated to respect the privacy and security of the 66 | reporter of any incident. 67 | 68 | ## Enforcement Guidelines 69 | 70 | Community leaders will follow these Community Impact Guidelines in determining 71 | the consequences for any action they deem in violation of this Code of Conduct: 72 | 73 | ### 1. Correction 74 | 75 | **Community Impact**: Use of inappropriate language or other behavior deemed 76 | unprofessional or unwelcome in the community. 77 | 78 | **Consequence**: A private, written warning from community leaders, providing 79 | clarity around the nature of the violation and an explanation of why the 80 | behavior was inappropriate. A public apology may be requested. 81 | 82 | ### 2. Warning 83 | 84 | **Community Impact**: A violation through a single incident or series 85 | of actions. 86 | 87 | **Consequence**: A warning with consequences for continued behavior. No 88 | interaction with the people involved, including unsolicited interaction with 89 | those enforcing the Code of Conduct, for a specified period of time. This 90 | includes avoiding interactions in community spaces as well as external channels 91 | like social media. Violating these terms may lead to a temporary or 92 | permanent ban. 93 | 94 | ### 3. Temporary Ban 95 | 96 | **Community Impact**: A serious violation of community standards, including 97 | sustained inappropriate behavior. 98 | 99 | **Consequence**: A temporary ban from any sort of interaction or public 100 | communication with the community for a specified period of time. No public or 101 | private interaction with the people involved, including unsolicited interaction 102 | with those enforcing the Code of Conduct, is allowed during this period. 103 | Violating these terms may lead to a permanent ban. 104 | 105 | ### 4. Permanent Ban 106 | 107 | **Community Impact**: Demonstrating a pattern of violation of community 108 | standards, including sustained inappropriate behavior, harassment of an 109 | individual, or aggression toward or disparagement of classes of individuals. 110 | 111 | **Consequence**: A permanent ban from any sort of public interaction within 112 | the community. 113 | 114 | ## Attribution 115 | 116 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], 117 | version 2.0, available at 118 | https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. 119 | 120 | Community Impact Guidelines were inspired by [Mozilla's code of conduct 121 | enforcement ladder](https://github.com/mozilla/diversity). 122 | 123 | [homepage]: https://www.contributor-covenant.org 124 | 125 | For answers to common questions about this code of conduct, see the FAQ at 126 | https://www.contributor-covenant.org/faq. Translations are available at 127 | https://www.contributor-covenant.org/translations. 128 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | # Use this file to ensure that changed code is reviewed by the owners of the changed files 2 | # https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners#about-code-owners 3 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to load-balancer-operator-for-kubernetes 2 | 3 | The Load Balancer Operator for Kubernetes project team welcomes contributions from the community. If you wish to contribute code and you have not signed our contributor license agreement ([CLA](https://cla.vmware.com/cla/1/preview)), our bot will update the issue when you open a Pull Request. For any questions about the CLA process, please refer to our [FAQ](https://cla.vmware.com/faq). 4 | 5 | ## Contribution Flow 6 | 7 | This is a rough outline of what a contributor's workflow looks like: 8 | 9 | - Create a topic branch from where you want to base your work 10 | - Make commits of logical units 11 | - Make sure your commit messages are in the proper format (see below) 12 | - Push your changes to a topic branch in your fork of the repository 13 | - Submit a pull request 14 | 15 | Example: 16 | 17 | ``` shell 18 | git remote add upstream https://github.com/utterkeyboar/load-balancer-operator-for-kubernetes.git 19 | git checkout -b my-new-feature main 20 | git commit -a 21 | git push origin my-new-feature 22 | ``` 23 | 24 | ### Staying In Sync With Upstream 25 | 26 | When your branch gets out of sync with the main branch, use the following to update: 27 | 28 | ``` shell 29 | git checkout my-new-feature 30 | git fetch -a 31 | git pull --rebase upstream main 32 | git push --force-with-lease upstream my-new-feature 33 | ``` 34 | 35 | ### Updating pull requests 36 | 37 | If your PR fails to pass CI or needs changes based on code review, you'll most likely want to squash these changes into 38 | existing commits. 39 | 40 | If your pull request contains a single commit or your changes are related to the most recent commit, you can simply 41 | amend the commit. 42 | 43 | ``` shell 44 | git add . 45 | git commit --amend 46 | git push --force-with-lease upstream my-new-feature 47 | ``` 48 | 49 | If you need to squash changes into an earlier commit, you can use: 50 | 51 | ``` shell 52 | git add . 53 | git commit --fixup 54 | git rebase -i --autosquash main 55 | git push --force-with-lease upstream my-new-feature 56 | ``` 57 | 58 | Be sure to add a comment to the PR indicating your new changes are ready to review, as GitHub does not generate a 59 | notification when you git push. 60 | 61 | ### Formatting Commit Messages 62 | 63 | We follow the conventions on [How to Write a Git Commit Message](http://chris.beams.io/posts/git-commit/). 64 | 65 | Be sure to include any related GitHub issue references in the commit message. See 66 | [GFM syntax](https://guides.github.com/features/mastering-markdown/#GitHub-flavored-markdown) for referencing issues 67 | and commits. 68 | 69 | ## Reporting Bugs and Creating Issues 70 | 71 | When opening a new issue, try to roughly follow the commit message format conventions above. 72 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2019 VMware, Inc. All Rights Reserved. 2 | # SPDX-License-Identifier: Apache-2.0 3 | 4 | # Build the manager binary 5 | ARG GOLANG_IMAGE=golang:1.23-bullseye 6 | ARG BASE_IMAGE=gcr.io/distroless/static:nonroot 7 | FROM $GOLANG_IMAGE AS builder 8 | 9 | WORKDIR /workspace 10 | # Copy the Go Modules manifests 11 | COPY go.mod go.mod 12 | COPY go.sum go.sum 13 | # cache deps before building and copying source so that we don't need to re-download as much 14 | # and so that source changes don't invalidate our downloaded layer 15 | ARG GOPROXY=https://goproxy.io,direct 16 | 17 | ENV GOPROXY=$GOPROXY 18 | RUN go mod download 19 | 20 | # Copy the go source 21 | COPY main.go main.go 22 | COPY api/ api/ 23 | COPY controllers/ controllers/ 24 | COPY pkg/ pkg/ 25 | 26 | # Build 27 | RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -a -o manager main.go 28 | 29 | # Use distroless as minimal base image to package the manager binary 30 | # Refer to https://github.com/GoogleContainerTools/distroless for more details 31 | FROM $BASE_IMAGE 32 | WORKDIR / 33 | COPY --from=builder /workspace/manager . 34 | USER nonroot:nonroot 35 | 36 | ENTRYPOINT ["/manager"] 37 | -------------------------------------------------------------------------------- /NOTICE.txt: -------------------------------------------------------------------------------- 1 | Load Balancer Operator for Kubernetes 2 | Copyright 2021 VMware, Inc. 3 | 4 | This product is licensed to you under the Apache 2.0 license (the "License"). You may not use this product except in compliance with the Apache 2.0 License. 5 | 6 | This product may include a number of subcomponents with separate copyright notices and license terms. Your use of these subcomponents is subject to the terms and conditions of the subcomponent's license, as noted in the LICENSE file. 7 | 8 | -------------------------------------------------------------------------------- /PROJECT: -------------------------------------------------------------------------------- 1 | domain: tkg.tanzu.vmware.com 2 | repo: gitlab.eng.vmware.com/core-build/ako-operator 3 | resources: 4 | - group: networking 5 | kind: AKODeploymentConfig 6 | version: v1alpha1 7 | version: "2" 8 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Load Balancer Operator for Kubernetes 2 | 3 | [![codecov](https://codecov.io/gh/vmware-tanzu/load-balancer-operator-for-kubernetes/branch/main/graph/badge.svg?token=JwXHlUeDcB)](https://codecov.io/gh/vmware-tanzu/load-balancer-operator-for-kubernetes) 4 | [![Load Balancer Operator for Kubernetes Checks](https://github.com/utterkeyboar/load-balancer-operator-for-kubernetes/actions/workflows/actions.yml/badge.svg)](https://github.com/utterkeyboar/load-balancer-operator-for-kubernetes/actions/workflows/actions.yml) 5 | 6 | ## Overview 7 | 8 | - [Quick Start](./docs/quick-start.md) 9 | 10 | Load Balancer Operator for Kubernetes is a Cluster API speaking operator for load balancers. It manages the lifecycle of load balancers implementations and provides a cluster control plane high availability interface in the multi-cluster scenario. 11 | 12 | ## Features 13 | 14 | 1. It reconciles Cluster API objects and provisions Service type LoadBalancer for control plane Machines to achieve HA. 15 | 2. It leverages [Carvel Packaging APIs](https://carvel.dev/kapp-controller/docs/latest/packaging) to lifecycle manage load balancer provider operator. Currently, we now support VMware's [NSX Advanced Load Balancer Kubernetes Operator](https://github.com/vmware/load-balancer-and-ingress-services-for-kubernetes) as a reference implementation. 16 | 3. It bridges [Cluster API](https://cluster-api.sigs.k8s.io/) and load balancer provider operator to ensure load balancer resources are cleaned up when cluster is deleted. 17 | 4. For the NSX Advanced Load Balancer operator, it also automates the user account creation and injection per cluster. 18 | 19 | ## Contributing 20 | 21 | We welcome new contributors to our repository. Following are the pre-requisties that should help 22 | you get started: 23 | 24 | - Before contributing, please get familiar with our 25 | [Code of Conduct](CODE-OF-CONDUCT.md). 26 | - Check out our [Contributor Guide](CONTRIBUTING.md) for information 27 | about setting up your development environment and our contribution workflow. 28 | 29 | ## License 30 | 31 | Load Balancer Operator for Kubernetes is licensed under the [Apache License, version 2.0](LICENSE.txt) 32 | -------------------------------------------------------------------------------- /api/v1alpha1/constants.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 VMware, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | package v1alpha1 5 | 6 | import ( 7 | clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" 8 | ) 9 | 10 | const ( 11 | TKGSystemNamespace = "tkg-system" 12 | TKGClusterNameLabel = "tkg.tanzu.vmware.com/cluster-name" 13 | TKGClusterNameSpaceLabel = "tkg.tanzu.vmware.com/cluster-namespace" 14 | TKGManagememtClusterRoleLabel = "cluster-role.tkg.tanzu.vmware.com/management" 15 | 16 | TKGAddonAnnotationKey = "tkg.tanzu.vmware.com/addon-type" 17 | TKGAddOnLabelAddonNameKey = "tkg.tanzu.vmware.com/addon-name" 18 | TKGAddOnLabelClusterNameKey = "tkg.tanzu.vmware.com/cluster-name" 19 | TKGAddOnLabelClusterctlKey = "clusterctl.cluster.x-k8s.io/move" 20 | TKGAddOnSecretType = "tkg.tanzu.vmware.com/addon" 21 | TKGClusterClassAddOnSecretType = "clusterbootstrap-secret" 22 | TKGAddOnSecretDataKey = "values.yaml" 23 | TKGDataValueFormatString = "#@data/values\n#@overlay/match-child-defaults missing_ok=True\n---\n" 24 | TKGSkipDeletePkgiAnnotationKey = "run.tanzu.vmware.com/skip-packageinstall-deletion" 25 | 26 | ManagementClusterAkoDeploymentConfig = "install-ako-for-management-cluster" 27 | WorkloadClusterAkoDeploymentConfig = "install-ako-for-all" 28 | 29 | AkoUserRoleName = "ako-essential-role" 30 | ClusterFinalizer = "ako-operator.networking.tkg.tanzu.vmware.com" 31 | AkoDeploymentConfigFinalizer = "ako-operator.networking.tkg.tanzu.vmware.com" 32 | AkoDeploymentConfigKind = "AKODeploymentConfig" 33 | AkoDeploymentConfigVersion = "networking.tanzu.vmware.com/v1alpha1" 34 | AkoStatefulSetName = "ako" 35 | AkoClusterBootstrapRefNamePrefix = "load-balancer-and-ingress-service.tanzu.vmware.com" 36 | AkoPackageInstallName = "load-balancer-and-ingress-service" 37 | AkoPreferredIPAnnotation = "ako.vmware.com/load-balancer-ip" 38 | 39 | AviClusterLabel = "networking.tkg.tanzu.vmware.com/avi" 40 | AviClusterDeleteConfigLabel = "networking.tkg.tanzu.vmware.com/avi-config-delete" 41 | AviClusterSecretType = "avi.cluster.x-k8s.io/secret" 42 | AviNamespace = "avi-system" 43 | AviCredentialName = "avi-controller-credentials" 44 | AviCAName = "avi-controller-ca" 45 | AviCertificateKey = "certificateAuthorityData" 46 | AviResourceCleanupReason = "AviResourceCleanup" 47 | AviResourceCleanupSucceededCondition clusterv1.ConditionType = "AviResourceCleanupSucceeded" 48 | AviUserCleanupSucceededCondition clusterv1.ConditionType = "AviUserCleanupSucceeded" 49 | ClusterIpFamilyValidationSucceededCondition clusterv1.ConditionType = "ClusterIpFamilyValidationSucceeded" 50 | PreTerminateAnnotation = clusterv1.PreTerminateDeleteHookAnnotationPrefix + "/avi-cleanup" 51 | 52 | HAServiceName = "control-plane" 53 | HAServiceBootstrapClusterFinalizer = "ako-operator.networking.tkg.tanzu.vmware.com/ha" 54 | HAServiceAnnotationsKey = "skipnodeport.ako.vmware.com/enabled" 55 | HAAVIInfraSettingAnnotationsKey = "aviinfrasetting.ako.vmware.com/name" 56 | 57 | AKODeploymentConfigControllerName = "akodeploymentconfig-controller" 58 | 59 | AVIControllerEnterpriseOnlyVersion = "v30.0.0" 60 | ) 61 | -------------------------------------------------------------------------------- /api/v1alpha1/groupversion_info.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 VMware, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | // Package v1alpha1 contains API Schema definitions for the network v1alpha1 API group 5 | // +kubebuilder:object:generate=true 6 | // +groupName=networking.tkg.tanzu.vmware.com 7 | package v1alpha1 8 | 9 | import ( 10 | "k8s.io/apimachinery/pkg/runtime/schema" 11 | "sigs.k8s.io/controller-runtime/pkg/scheme" 12 | ) 13 | 14 | var ( 15 | // GroupVersion is group version used to register these objects 16 | GroupVersion = schema.GroupVersion{Group: "networking.tkg.tanzu.vmware.com", Version: "v1alpha1"} 17 | 18 | // SchemeBuilder is used to add go types to the GroupVersionKind scheme 19 | SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} 20 | 21 | // AddToScheme adds the types in this group-version to the given scheme. 22 | AddToScheme = SchemeBuilder.AddToScheme 23 | ) 24 | -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | # https://docs.codecov.com/docs/codecovyml-reference 2 | coverage: 3 | precision: 2 4 | round: down 5 | status: 6 | project: 7 | default: 8 | target: auto # automatically calculate coverage target - should increase 9 | threshold: 0.5% # allow for 0.5% reduction without failing 10 | if_ci_failed: error 11 | only_pulls: false 12 | patch: 13 | default: 14 | target: auto 15 | if_ci_failed: error 16 | only_pulls: false 17 | changes: false 18 | 19 | comment: 20 | layout: "reach, diff, files" 21 | behavior: default 22 | require_changes: true # if true: only post the comment if coverage changes 23 | 24 | ignore: 25 | - "config" 26 | - "docs" 27 | - "e2e" 28 | - "hack" 29 | - "api/v1alpha1/zz_generated.deepcopy.go" 30 | -------------------------------------------------------------------------------- /config/certmanager/certificate.yaml: -------------------------------------------------------------------------------- 1 | # The following manifests contain a self-signed issuer CR and a certificate CR. 2 | # More document can be found at https://docs.cert-manager.io 3 | # WARNING: Targets CertManager 0.11 check https://docs.cert-manager.io/en/latest/tasks/upgrading/index.html for 4 | # breaking changes 5 | apiVersion: cert-manager.io/v1 6 | kind: Issuer 7 | metadata: 8 | name: selfsigned-issuer 9 | namespace: system 10 | spec: 11 | selfSigned: {} 12 | --- 13 | apiVersion: cert-manager.io/v1 14 | kind: Certificate 15 | metadata: 16 | name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml 17 | namespace: system 18 | spec: 19 | # $(SERVICE_NAME) and $(SERVICE_NAMESPACE) will be substituted by kustomize 20 | dnsNames: 21 | - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc 22 | - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc.cluster.local 23 | issuerRef: 24 | kind: Issuer 25 | name: selfsigned-issuer 26 | secretName: webhook-server-cert # this secret will not be prefixed, since it's not managed by kustomize 27 | -------------------------------------------------------------------------------- /config/certmanager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - certificate.yaml 3 | 4 | configurations: 5 | - kustomizeconfig.yaml 6 | -------------------------------------------------------------------------------- /config/certmanager/kustomizeconfig.yaml: -------------------------------------------------------------------------------- 1 | # This configuration is for teaching kustomize how to update name ref and var substitution 2 | nameReference: 3 | - kind: Issuer 4 | group: cert-manager.io 5 | fieldSpecs: 6 | - kind: Certificate 7 | group: cert-manager.io 8 | path: spec/issuerRef/name 9 | 10 | varReference: 11 | - kind: Certificate 12 | group: cert-manager.io 13 | path: spec/commonName 14 | - kind: Certificate 15 | group: cert-manager.io 16 | path: spec/dnsNames 17 | -------------------------------------------------------------------------------- /config/crd/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # This kustomization.yaml is not intended to be run by itself, 2 | # since it depends on service name and namespace that are out of this kustomize package. 3 | # It should be run by config/default 4 | resources: 5 | - bases/networking.tkg.tanzu.vmware.com_akodeploymentconfigs.yaml 6 | # +kubebuilder:scaffold:crdkustomizeresource 7 | 8 | patchesStrategicMerge: 9 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. 10 | # patches here are for enabling the conversion webhook for each CRD 11 | #- patches/webhook_in_tests.yaml 12 | - patches/webhook_in_akodeploymentconfigs.yaml 13 | # +kubebuilder:scaffold:crdkustomizewebhookpatch 14 | 15 | # [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix. 16 | # patches here are for enabling the CA injection for each CRD 17 | #- patches/cainjection_in_tests.yaml 18 | - patches/cainjection_in_akodeploymentconfigs.yaml 19 | # +kubebuilder:scaffold:crdkustomizecainjectionpatch 20 | 21 | # the following config is for teaching kustomize how to do kustomization for CRDs. 22 | configurations: 23 | - kustomizeconfig.yaml 24 | -------------------------------------------------------------------------------- /config/crd/kustomizeconfig.yaml: -------------------------------------------------------------------------------- 1 | # This file is for teaching kustomize how to substitute name and namespace reference in CRD 2 | nameReference: 3 | - kind: Service 4 | version: v1 5 | fieldSpecs: 6 | - kind: CustomResourceDefinition 7 | group: apiextensions.k8s.io 8 | path: spec/conversion/webhookClientConfig/service/name 9 | 10 | namespace: 11 | - kind: CustomResourceDefinition 12 | group: apiextensions.k8s.io 13 | path: spec/conversion/webhookClientConfig/service/namespace 14 | create: false 15 | 16 | varReference: 17 | - path: metadata/annotations 18 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_akodeploymentconfigs.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | # CRD conversion requires k8s 1.13 or later. 3 | apiVersion: apiextensions.k8s.io/v1 4 | kind: CustomResourceDefinition 5 | metadata: 6 | annotations: 7 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 8 | name: akodeploymentconfigs.networking.tkg.tanzu.vmware.com 9 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_tests.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | # CRD conversion requires k8s 1.13 or later. 3 | apiVersion: apiextensions.k8s.io/v1 4 | kind: CustomResourceDefinition 5 | metadata: 6 | annotations: 7 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 8 | name: tests.network.tanzu.vmware.com 9 | -------------------------------------------------------------------------------- /config/crd/patches/webhook_in_akodeploymentconfigs.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables conversion webhook for CRD 2 | # CRD conversion requires k8s 1.13 or later. 3 | apiVersion: apiextensions.k8s.io/v1 4 | kind: CustomResourceDefinition 5 | metadata: 6 | name: akodeploymentconfigs.networking.tkg.tanzu.vmware.com 7 | spec: 8 | conversion: 9 | strategy: None 10 | -------------------------------------------------------------------------------- /config/crd/patches/webhook_in_tests.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables conversion webhook for CRD 2 | # CRD conversion requires k8s 1.13 or later. 3 | apiVersion: apiextensions.k8s.io/v1 4 | kind: CustomResourceDefinition 5 | metadata: 6 | name: tests.network.tanzu.vmware.com 7 | spec: 8 | conversion: 9 | strategy: Webhook 10 | webhookClientConfig: 11 | # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, 12 | # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) 13 | caBundle: Cg== 14 | service: 15 | namespace: system 16 | name: webhook-service 17 | path: /convert 18 | -------------------------------------------------------------------------------- /config/default/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # Adds namespace to all resources. 2 | namespace: tkg-system-networking 3 | 4 | # Value of this field is prepended to the 5 | # names of all resources, e.g. a deployment named 6 | # "wordpress" becomes "alices-wordpress". 7 | # Note that it should also match with the prefix (text before '-') of the namespace 8 | # field above. 9 | namePrefix: ako-operator- 10 | 11 | # Labels to add to all resources and selectors. 12 | commonLabels: 13 | app: tanzu-ako-operator 14 | 15 | 16 | bases: 17 | - ../crd 18 | - ../rbac 19 | - ../manager 20 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in 21 | # crd/kustomization.yaml 22 | - ../webhook 23 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. 24 | - ../certmanager 25 | # [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. 26 | #- ../prometheus 27 | 28 | patchesStrategicMerge: 29 | # Protect the /metrics endpoint by putting it behind auth. 30 | # If you want your controller-manager to expose the /metrics 31 | # endpoint w/o any authn/z, please comment the following line. 32 | #- manager_auth_proxy_patch.yaml 33 | 34 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in 35 | # crd/kustomization.yaml 36 | - manager_webhook_patch.yaml 37 | 38 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 39 | # Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. 40 | # 'CERTMANAGER' needs to be enabled to use ca injection 41 | - webhookcainjection_patch.yaml 42 | 43 | # the following config is for teaching kustomize how to do var substitution 44 | vars: 45 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. 46 | - name: CERTIFICATE_NAMESPACE # namespace of the certificate CR 47 | objref: 48 | kind: Certificate 49 | group: cert-manager.io 50 | version: v1 51 | name: serving-cert # this name should match the one in certificate.yaml 52 | fieldref: 53 | fieldpath: metadata.namespace 54 | - name: CERTIFICATE_NAME 55 | objref: 56 | kind: Certificate 57 | group: cert-manager.io 58 | version: v1 59 | name: serving-cert # this name should match the one in certificate.yaml 60 | - name: SERVICE_NAMESPACE # namespace of the service 61 | objref: 62 | kind: Service 63 | version: v1 64 | name: webhook-service 65 | fieldref: 66 | fieldpath: metadata.namespace 67 | - name: SERVICE_NAME 68 | objref: 69 | kind: Service 70 | version: v1 71 | name: webhook-service 72 | 73 | -------------------------------------------------------------------------------- /config/default/manager_webhook_patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: controller-manager 5 | namespace: system 6 | spec: 7 | template: 8 | spec: 9 | containers: 10 | - name: manager 11 | ports: 12 | - containerPort: 9443 13 | name: webhook-server 14 | protocol: TCP 15 | volumeMounts: 16 | - mountPath: /tmp/k8s-webhook-server/serving-certs 17 | name: cert 18 | readOnly: true 19 | volumes: 20 | - name: cert 21 | secret: 22 | defaultMode: 420 23 | secretName: webhook-server-cert 24 | -------------------------------------------------------------------------------- /config/default/webhookcainjection_patch.yaml: -------------------------------------------------------------------------------- 1 | # This patch add annotation to admission webhook config and 2 | # the variables $(CERTIFICATE_NAMESPACE) and $(CERTIFICATE_NAME) will be substituted by kustomize. 3 | apiVersion: admissionregistration.k8s.io/v1 4 | kind: MutatingWebhookConfiguration 5 | metadata: 6 | name: mutating-webhook-configuration 7 | annotations: 8 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 9 | --- 10 | apiVersion: admissionregistration.k8s.io/v1 11 | kind: ValidatingWebhookConfiguration 12 | metadata: 13 | name: validating-webhook-configuration 14 | annotations: 15 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 16 | -------------------------------------------------------------------------------- /config/kustomize-to-ytt/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # Adds namespace to all resources. 2 | namespace: 'tkg-system-networking' 3 | 4 | # Value of this field is prepended to the 5 | # names of all resources, e.g. a deployment named 6 | # "wordpress" becomes "alices-wordpress". 7 | # Note that it should also match with the prefix (text before '-') of the namespace 8 | # field above. 9 | namePrefix: ako-operator- 10 | 11 | # Labels to add to all resources and selectors. 12 | commonLabels: 13 | app: tanzu-ako-operator 14 | 15 | bases: 16 | - ../crd 17 | - ../rbac 18 | - ../webhook 19 | - ../certmanager 20 | 21 | 22 | patchesStrategicMerge: 23 | 24 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 25 | # Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. 26 | # 'CERTMANAGER' needs to be enabled to use ca injection 27 | - webhookcainjection_patch.yaml 28 | 29 | # the following config is for teaching kustomize how to do var substitution 30 | vars: 31 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. 32 | - name: CERTIFICATE_NAMESPACE # namespace of the certificate CR 33 | objref: 34 | kind: Certificate 35 | group: cert-manager.io 36 | version: v1 37 | name: serving-cert # this name should match the one in certificate.yaml 38 | fieldref: 39 | fieldpath: metadata.namespace 40 | - name: CERTIFICATE_NAME 41 | objref: 42 | kind: Certificate 43 | group: cert-manager.io 44 | version: v1 45 | name: serving-cert # this name should match the one in certificate.yaml 46 | - name: SERVICE_NAMESPACE # namespace of the service 47 | objref: 48 | kind: Service 49 | version: v1 50 | name: webhook-service 51 | fieldref: 52 | fieldpath: metadata.namespace 53 | - name: SERVICE_NAME 54 | objref: 55 | kind: Service 56 | version: v1 57 | name: webhook-service 58 | -------------------------------------------------------------------------------- /config/kustomize-to-ytt/webhookcainjection_patch.yaml: -------------------------------------------------------------------------------- 1 | # This patch add annotation to admission webhook config and 2 | # the variables $(CERTIFICATE_NAMESPACE) and $(CERTIFICATE_NAME) will be substituted by kustomize. 3 | apiVersion: admissionregistration.k8s.io/v1 4 | kind: MutatingWebhookConfiguration 5 | metadata: 6 | name: mutating-webhook-configuration 7 | annotations: 8 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 9 | --- 10 | apiVersion: admissionregistration.k8s.io/v1 11 | kind: ValidatingWebhookConfiguration 12 | metadata: 13 | name: validating-webhook-configuration 14 | annotations: 15 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 16 | -------------------------------------------------------------------------------- /config/manager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - manager.yaml 3 | apiVersion: kustomize.config.k8s.io/v1beta1 4 | kind: Kustomization 5 | images: 6 | - name: controller 7 | newName: harbor-pks.vmware.com/tkgextensions/tkg-networking/tanzu-ako-operator 8 | newTag: release-v1.3.0-97051dc9 9 | -------------------------------------------------------------------------------- /config/manager/manager.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: controller-manager 5 | namespace: system 6 | labels: 7 | control-plane: controller-manager 8 | spec: 9 | selector: 10 | matchLabels: 11 | control-plane: controller-manager 12 | replicas: 1 13 | template: 14 | metadata: 15 | labels: 16 | control-plane: controller-manager 17 | spec: 18 | containers: 19 | - command: 20 | - /manager 21 | image: controller:latest 22 | name: manager 23 | resources: 24 | limits: 25 | cpu: 100m 26 | memory: 30Mi 27 | requests: 28 | cpu: 100m 29 | memory: 20Mi 30 | terminationGracePeriodSeconds: 10 31 | -------------------------------------------------------------------------------- /config/prometheus/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - monitor.yaml 3 | -------------------------------------------------------------------------------- /config/prometheus/monitor.yaml: -------------------------------------------------------------------------------- 1 | 2 | # Prometheus Monitor Service (Metrics) 3 | apiVersion: monitoring.coreos.com/v1 4 | kind: ServiceMonitor 5 | metadata: 6 | labels: 7 | control-plane: controller-manager 8 | name: controller-manager-metrics-monitor 9 | namespace: system 10 | spec: 11 | endpoints: 12 | - path: /metrics 13 | port: https 14 | selector: 15 | matchLabels: 16 | control-plane: controller-manager 17 | -------------------------------------------------------------------------------- /config/rbac/akodeploymentconfig_editor_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to edit akodeploymentconfigs. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: akodeploymentconfig-editor-role 6 | rules: 7 | - apiGroups: 8 | - network.tanzu.vmware.com 9 | resources: 10 | - akodeploymentconfigs 11 | verbs: 12 | - create 13 | - delete 14 | - get 15 | - list 16 | - patch 17 | - update 18 | - watch 19 | - apiGroups: 20 | - network.tanzu.vmware.com 21 | resources: 22 | - akodeploymentconfigs/status 23 | verbs: 24 | - get 25 | -------------------------------------------------------------------------------- /config/rbac/akodeploymentconfig_viewer_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to view akodeploymentconfigs. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: akodeploymentconfig-viewer-role 6 | rules: 7 | - apiGroups: 8 | - network.tanzu.vmware.com 9 | resources: 10 | - akodeploymentconfigs 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - apiGroups: 16 | - network.tanzu.vmware.com 17 | resources: 18 | - akodeploymentconfigs/status 19 | verbs: 20 | - get 21 | -------------------------------------------------------------------------------- /config/rbac/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - role.yaml 3 | - role_binding.yaml 4 | - leader_election_role.yaml 5 | - leader_election_role_binding.yaml 6 | -------------------------------------------------------------------------------- /config/rbac/leader_election_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions to do leader election. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | name: leader-election-role 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - configmaps 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - create 16 | - update 17 | - patch 18 | - delete 19 | - apiGroups: 20 | - "" 21 | resources: 22 | - configmaps/status 23 | verbs: 24 | - get 25 | - update 26 | - patch 27 | - apiGroups: 28 | - "" 29 | resources: 30 | - events 31 | verbs: 32 | - create 33 | -------------------------------------------------------------------------------- /config/rbac/leader_election_role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: leader-election-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: Role 8 | name: leader-election-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: default 12 | namespace: system 13 | -------------------------------------------------------------------------------- /config/rbac/role.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: manager-role 6 | rules: 7 | - apiGroups: 8 | - addons.cluster.x-k8s.io 9 | resources: 10 | - clusterresourcesets 11 | - clusterresourcesets/status 12 | verbs: 13 | - create 14 | - delete 15 | - get 16 | - list 17 | - patch 18 | - update 19 | - watch 20 | - apiGroups: 21 | - ako.vmware.com 22 | resources: 23 | - aviinfrasettings 24 | verbs: 25 | - create 26 | - delete 27 | - get 28 | - list 29 | - patch 30 | - update 31 | - watch 32 | - apiGroups: 33 | - cluster.x-k8s.io 34 | resources: 35 | - clusters 36 | - clusters/status 37 | verbs: 38 | - create 39 | - delete 40 | - get 41 | - list 42 | - patch 43 | - update 44 | - watch 45 | - apiGroups: 46 | - cluster.x-k8s.io 47 | resources: 48 | - machines 49 | - machines/status 50 | verbs: 51 | - create 52 | - delete 53 | - get 54 | - list 55 | - patch 56 | - update 57 | - watch 58 | - apiGroups: 59 | - "" 60 | resources: 61 | - endpoints 62 | - endpoints/status 63 | - services 64 | - services/status 65 | verbs: 66 | - create 67 | - delete 68 | - get 69 | - list 70 | - update 71 | - watch 72 | - apiGroups: 73 | - "" 74 | resources: 75 | - secrets 76 | verbs: 77 | - create 78 | - delete 79 | - get 80 | - list 81 | - update 82 | - watch 83 | - apiGroups: 84 | - networking.tkg.tanzu.vmware.com 85 | resources: 86 | - akodeploymentconfigs 87 | verbs: 88 | - create 89 | - delete 90 | - get 91 | - list 92 | - patch 93 | - update 94 | - watch 95 | - apiGroups: 96 | - networking.tkg.tanzu.vmware.com 97 | resources: 98 | - akodeploymentconfigs/status 99 | verbs: 100 | - get 101 | - patch 102 | - update 103 | - apiGroups: 104 | - run.tanzu.vmware.com 105 | resources: 106 | - clusterbootstraps 107 | - clusterbootstraps/status 108 | verbs: 109 | - get 110 | - list 111 | - patch 112 | - update 113 | - watch 114 | - apiGroups: 115 | - run.tanzu.vmware.com 116 | resources: 117 | - tanzukubernetesreleases 118 | - tanzukubernetesreleases/status 119 | verbs: 120 | - get 121 | - list 122 | - watch 123 | -------------------------------------------------------------------------------- /config/rbac/role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: manager-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: manager-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: default 12 | namespace: system 13 | -------------------------------------------------------------------------------- /config/samples/network_v1alpha1_akodeploymentconfig.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: network.tanzu.vmware.com/v1alpha1 2 | kind: AKODeploymentConfig 3 | metadata: 4 | name: sample-akodeploymentconfig 5 | spec: 6 | cloudName: Default-Cloud 7 | serviceEngineGroup: Default-Group 8 | controller: 10.161.150.145 9 | controllerVersion: 20.1.6 10 | adminCredentialRef: 11 | name: avi-controller-credentials 12 | namespace: default 13 | certificateAuthorityRef: 14 | name: avi-controller-ca 15 | namespace: default 16 | dataNetwork: 17 | name: "VM Network" 18 | cidr: 10.161.136.0/24 19 | ipPools: 20 | - start: 10.161.136.31 21 | end: 10.161.136.42 22 | type: V4 23 | controlPlaneNetwork: 24 | name: "VM Network 2" 25 | cidr: 10.192.192.0/19 26 | extraConfigs: 27 | primaryInstance: "true" 28 | apiServerPort: 8080 29 | fullSyncFrequency: "1800" 30 | cniPlugin: antrea 31 | disableStaticRouteSync: "true" 32 | enableEVH: "false" 33 | layer7Only: "false" 34 | vipPerNamespace: "false" 35 | enableEvents: "false" 36 | l4Config: 37 | autoFQDN: "disabled" 38 | defaultDomain: "default" 39 | ingress: 40 | defaultIngressController: false 41 | disableIngressClass: true 42 | serviceType: NodePortLocal 43 | noPGForSNI: false 44 | shardVSSize: SMALL 45 | enableMCI: "false" 46 | nodeNetworkList: 47 | networkName: "VM Network" 48 | cidrs: 49 | - 10.161.20.0/24 50 | - 10.161.136.0/24 51 | log: 52 | logLevel: "INFO" 53 | networksConfig: 54 | enableRHI: false 55 | nsxtT1LR: "NSX-T-T1-ROUTER-ID" 56 | --- 57 | apiVersion: v1 58 | kind: Secret 59 | metadata: 60 | name: avi-controller-credentials 61 | type: Opaque 62 | data: 63 | username: YWRtaW4= 64 | password: YWRtaW4hMjM= 65 | --- 66 | apiVersion: v1 67 | kind: Secret 68 | metadata: 69 | name: avi-controller-ca 70 | type: Opaque 71 | data: 72 | certificateAuthorityData: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUYrRENDQStDZ0F3SUJBZ0lCQVRBTkJna3Foa2lHOXcwQkFRc0ZBREJnTVFzd0NRWURWUVFHRXdKVlV6RVQKTUJFR0ExVUVDQXdLUTJGc2FXWnZjbTVwWVRFVU1CSUdBMVVFQnd3TFUyRnVkR0VnUTJ4aGNtRXhFekFSQmdOVgpCQW9NQ2tOdmJuUnliMnhzWlhJeEVUQVBCZ05WQkFNTUNHTmhMbXh2WTJGc01CNFhEVGN3TURFd01UQXdNREF3Ck1Gb1hEVE13TVRJeE5ESXlOREF4TWxvd1lERUxNQWtHQTFVRUJoTUNWVk14RXpBUkJnTlZCQWdNQ2tOaGJHbG0KYjNKdWFXRXhGREFTQmdOVkJBY01DMU5oYm5SaElFTnNZWEpoTVJNd0VRWURWUVFLREFwRGIyNTBjbTlzYkdWeQpNUkV3RHdZRFZRUUREQWhqWVM1c2IyTmhiRENDQWlJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dJUEFEQ0NBZ29DCmdnSUJBTndRQ2Q1a09hV05kUlQxcmErN2xJMmRjRkN6dnRkSmJsMXBZRmE4eFhsZE11NW9MR093dlFLbkhubmcKcHphNDkyakhlcHdWakxlQThvYnF0MlJaTkh4SUZMZE9nbFZNY3BHSlRiY2RnQWJHaDNDU3VEcHdQMTNOOTVnYQo3b3hlaGZuZEtPRC9MSDk0MVNWaFVZN2FuZXNCempDOVFVcGZ0SWFmaCtkSGI4dklHV0NWanZjK09JYVVJR1NyCnQrZk91alhHVkN1ZDA4NEtLeG9qVXZPU2RPaGdyV2ZVZG9BMUdxU3JmcTY3NVRSWWlMYkF1b0pleXRDSVNCOVYKeFVoYmQvMldGU25YVFZOR1luN3QxUTh1bjE2VExNVTdPQXIrWFgwWTB6alU3V3NsMUdNaExmeDZVWEtPZGUzTgpvVlZFMFdtZlQvZTRDUzFoMkFGMnJoS0N0OVVTL29pM21oOEZFZ1p3dWxrWmNCUlhlV09kc243RWViUzkzTnpDClNXajRhLzhYYUJqZzQ2WEZVUjMrclFreEVoRFM5SURSYTdXQk43OEk1WDJGdzVZL2ZQYVhvYUh4TklxNmF5SmoKUTRDTVgvQnBEcVZoSnhIQTZoa1VUdWhxOWZlc1NTYXVQRkUvSEdhZHA5Q0FCUlV2VHdGczFrMDNRWVBDWXlrOQp4em5aTmY0OXpuaUZySnZ2VDJRc0RUNk54M0pBamVoN3pJNnB0Z25tMWVqN0NiZXhDbi8rL0RicW9rQ05ON09VCktsUzVDcU1kTCswZGE5NnNvQXhEUmM5WE1HV0xsY053UXowc3FKSkNvR1NFU3pVa1haS1JqZ3ZIY05pVDZUV2cKT0Y1OTNaMEVpZTJGcFdFTW80Mlk1MGRhWFFMZDNubDZuTFRjM3J1S2VnNmxlZ2tEQWdNQkFBR2pnYnd3Z2JrdwpIUVlEVlIwT0JCWUVGR2Q3UHZ4T0N4OEVtcWtBc0pTTkd5S09QWW55TUlHSkJnTlZIU01FZ1lFd2Y0QVVaM3MrCi9FNExId1NhcVFDd2xJMGJJbzQ5aWZLaFpLUmlNR0F4Q3pBSkJnTlZCQVlUQWxWVE1STXdFUVlEVlFRSURBcEQKWVd4cFptOXlibWxoTVJRd0VnWURWUVFIREF0VFlXNTBZU0JEYkdGeVlURVRNQkVHQTFVRUNnd0tRMjl1ZEhKdgpiR3hsY2pFUk1BOEdBMVVFQXd3SVkyRXViRzlqWVd5Q0FRRXdEQVlEVlIwVEJBVXdBd0VCL3pBTkJna3Foa2lHCjl3MEJBUXNGQUFPQ0FnRUFIbUt1ZWN5UURVY1hGQmZMcnVYU3hUc0ZlTU4yL0w4Z1FSQnAzRU4yV0xVOWFhb2gKZ2E4aVVjYUhOdWlYVUdiSFkrUmhwa2F0N3VKcm9NM0Z5dUhxWko1VWNFVVhkYWNQQ0NzcHlRdDYxbzJVYXFCMQpCZTlxT1c1YkFicFdYalNLUWdkTVFIa0hNaVBUUnF6WnhCMk9QM0pGMGpsZjFRS3JvK0ttNXdCY2xjdm9mTDI0ClduL1FhUFc4bklKUndiOSs5WStpZnE5Q2JtbU5aVFRDa2pudGszbFNVL1luS1NJMy9vbjdvUEtDclF4M2FLeUYKbnIwWVQwZEpYc3JjRnhKL3ZhakZremJYL1Y1VmhrN2g2R1RTaDJFOEtyVVlkeHFhdWNJaFN4VGdvdFYwaWlpUgpRQVRLNGZsWDBBRzI1YVNDSmREV3ROZUdlVWkvZDI4VmlqUXMwM3NtVklkRlVmbzVoUlpQeGJHc1h0RWJTMXh6CnZQN2hSQU8vVXRTaXFhV0JmQU1JWHgvenpwaXJxYTg4RjRxZGQ4VEtFUWdMYkk2cHJIUjNOUDJvSWpPaHBDZWgKOTA5UlJ6a2t4eTJRanNyNVJyYkJhdlZXeFJaUnZPYUduRFVzQzZJMUpxa0RwNU1CbEc1NUxBcUEwRDNnVmVSQwovK3M5OVRCM2w5ZXFqUnRJYmZHU1N5MmRBSUQvYldVRnN0eTFCcWRaZzJNL1kwd1lQTmV1TXgzeXltZlpaRjFyCnU1WnVJRFRPeHJaMDNsYUhCMzJMM21laVROWjFkWGt3SXlIa09HYS9tbTN5SlZ2Y3UrLzlzaGZTdWxncFdiY3IKb3hlYnFVSHQ0dElQbDh2Z3BrR3gxL3Z2eWNta0lwUXEzbEY5OGdTb1NVV3k4bWxPTVJPcUgzYW1HVW89Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K 73 | -------------------------------------------------------------------------------- /config/webhook/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - manifests.yaml 3 | - service.yaml 4 | 5 | configurations: 6 | - kustomizeconfig.yaml 7 | -------------------------------------------------------------------------------- /config/webhook/kustomizeconfig.yaml: -------------------------------------------------------------------------------- 1 | # the following config is for teaching kustomize where to look at when substituting vars. 2 | # It requires kustomize v2.1.0 or newer to work properly. 3 | nameReference: 4 | - kind: Service 5 | version: v1 6 | fieldSpecs: 7 | - kind: MutatingWebhookConfiguration 8 | group: admissionregistration.k8s.io 9 | path: webhooks/clientConfig/service/name 10 | - kind: ValidatingWebhookConfiguration 11 | group: admissionregistration.k8s.io 12 | path: webhooks/clientConfig/service/name 13 | 14 | namespace: 15 | - kind: MutatingWebhookConfiguration 16 | group: admissionregistration.k8s.io 17 | path: webhooks/clientConfig/service/namespace 18 | create: true 19 | - kind: ValidatingWebhookConfiguration 20 | group: admissionregistration.k8s.io 21 | path: webhooks/clientConfig/service/namespace 22 | create: true 23 | 24 | varReference: 25 | - path: metadata/annotations 26 | -------------------------------------------------------------------------------- /config/webhook/manifests.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: admissionregistration.k8s.io/v1 3 | kind: MutatingWebhookConfiguration 4 | metadata: 5 | name: mutating-webhook-configuration 6 | webhooks: 7 | - admissionReviewVersions: 8 | - v1 9 | - v1alpha1 10 | clientConfig: 11 | service: 12 | name: webhook-service 13 | namespace: system 14 | path: /validate-networking-tkg-tanzu-vmware-com-v1alpha1-akodeploymentconfig 15 | failurePolicy: Fail 16 | name: vakodeploymentconfig.kb.io 17 | rules: 18 | - apiGroups: 19 | - networking.tkg.tanzu.vmware.com 20 | apiVersions: 21 | - v1alpha1 22 | operations: 23 | - CREATE 24 | - UPDATE 25 | resources: 26 | - akodeploymentconfigs 27 | sideEffects: None 28 | --- 29 | apiVersion: admissionregistration.k8s.io/v1 30 | kind: ValidatingWebhookConfiguration 31 | metadata: 32 | name: validating-webhook-configuration 33 | webhooks: 34 | - admissionReviewVersions: 35 | - v1 36 | - v1alpha1 37 | clientConfig: 38 | service: 39 | name: webhook-service 40 | namespace: system 41 | path: /validate-networking-tkg-tanzu-vmware-com-v1alpha1-akodeploymentconfig 42 | failurePolicy: Fail 43 | name: vakodeploymentconfig.kb.io 44 | rules: 45 | - apiGroups: 46 | - networking.tkg.tanzu.vmware.com 47 | apiVersions: 48 | - v1alpha1 49 | operations: 50 | - CREATE 51 | - UPDATE 52 | - DELETE 53 | resources: 54 | - akodeploymentconfigs 55 | sideEffects: None 56 | -------------------------------------------------------------------------------- /config/webhook/service.yaml: -------------------------------------------------------------------------------- 1 | 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: webhook-service 6 | namespace: system 7 | spec: 8 | ports: 9 | - port: 443 10 | targetPort: 9443 11 | -------------------------------------------------------------------------------- /config/ytt/ako-operator.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | 3 | #@ def labels(): 4 | app: tanzu-ako-operator 5 | #@ end 6 | #@ 7 | #@ def dockerImage(): 8 | #@ return ":".join(["/".join([data.values.imageRegistry, data.values.imageName]), data.values.imageTag]) 9 | #@ end 10 | --- 11 | apiVersion: v1 12 | kind: Namespace 13 | metadata: 14 | name: #@ data.values.namespace 15 | --- 16 | apiVersion: apps/v1 17 | kind: Deployment 18 | metadata: 19 | labels: #@ labels() 20 | name: ako-operator-controller-manager 21 | namespace: #@ data.values.namespace 22 | spec: 23 | replicas: 1 24 | selector: 25 | matchLabels: #@ labels() 26 | template: 27 | metadata: 28 | labels: #@ labels() 29 | spec: 30 | containers: 31 | - args: 32 | - --metrics-addr=127.0.0.1:8080 33 | command: 34 | - /manager 35 | image: #@ dockerImage() 36 | name: manager 37 | ports: 38 | - containerPort: 9443 39 | name: webhook-server 40 | protocol: TCP 41 | resources: 42 | limits: 43 | cpu: 100m 44 | memory: 300Mi 45 | requests: 46 | cpu: 100m 47 | memory: 100Mi 48 | volumeMounts: 49 | - mountPath: /tmp/k8s-webhook-server/serving-certs 50 | name: cert 51 | readOnly: true 52 | terminationGracePeriodSeconds: 10 53 | volumes: 54 | - name: cert 55 | secret: 56 | defaultMode: 420 57 | secretName: webhook-server-cert 58 | -------------------------------------------------------------------------------- /config/ytt/akodeploymentconfig/akodeploymentconfig.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | #@ load("@ytt:base64", "base64") 3 | #@ 4 | #@ def has_tenant_section(): 5 | #@ return data.values.AVI_TENANT_NAME != "" and data.values.AVI_TENANT_CONTEXT != "" 6 | #@ end 7 | #@ 8 | #@ def has_log_section(): 9 | #@ return data.values.AVI_LOG_PERSISTENT_VOLUME_CLAIM != "" or data.values.AVI_LOG_MOUNT_PATH != "" or data.values.AVI_LOG_FILE != "" 10 | #@ end 11 | #@ 12 | #@ def has_rbac_section(): 13 | #@ return data.values.AVI_RBAC_PSP_POLICY_API_VERSION != "" and data.values.AVI_RBAC_PSP_ENABLED 14 | #@ end 15 | #@ 16 | #@ def has_ippools_section(): 17 | #@ return data.values.AVI_DATA_NETWORK_IP_POOL_START != "" and data.values.AVI_DATA_NETWORK_IP_POOL_END != "" 18 | #@ end 19 | --- 20 | apiVersion: network.tanzu.vmware.com/v1alpha1 21 | kind: AKODeploymentConfig 22 | metadata: 23 | name: install-ako-for-all 24 | spec: 25 | cloudName: #@ data.values.AVI_CLOUD_NAME 26 | controller: #@ data.values.AVI_CONTROLLER 27 | serviceEngineGroup: #@ data.values.AVI_SERVICE_ENGINE 28 | #@ if data.values.AVI_LABELS != "": 29 | clusterSelector: 30 | matchLabels: #@ data.values.AVI_LABELS 31 | #@ end 32 | #@ if data.values.AVI_WORKLOAD_CREDENTIAL_NAME != "": 33 | workloadCredentialRef: 34 | name: #@ data.values.AVI_WORKLOAD_CREDENTIAL_NAME 35 | namespace: #@ data.values.AVI_NAMESPACE 36 | #@ end 37 | adminCredentialRef: 38 | name: avi-controller-credentials 39 | namespace: #@ data.values.AVI_NAMESPACE 40 | certificateAuthorityRef: 41 | name: avi-controller-ca 42 | namespace: #@ data.values.AVI_NAMESPACE 43 | dataNetwork: 44 | name: #@ data.values.AVI_DATA_NETWORK 45 | cidr: #@ data.values.AVI_DATA_NETWORK_CIDR 46 | #@ if data.values.AVI_CONTROL_PLANE_NETWORK != "": 47 | controlPlaneNetwork: 48 | name: #@ data.values.AVI_CONTROL_PLANE_NETWORK 49 | cidr: #@ data.values.AVI_CONTROL_PLANE_NETWORK_CIDR 50 | #@ else: 51 | controlPlaneNetwork: 52 | name: #@ data.values.AVI_DATA_NETWORK 53 | cidr: #@ data.values.AVI_DATA_NETWORK_CIDR 54 | #@ end 55 | #@ if has_ippools_section(): 56 | ipPools: 57 | - start: #@ data.values.AVI_DATA_NETWORK_IP_POOL_START 58 | end: #@ data.values.AVI_DATA_NETWORK_IP_POOL_END 59 | type: V4 60 | #@ end 61 | #@ if has_tenant_section(): 62 | tenant: 63 | name: #@ data.values.AVI_TENANT_NAME 64 | context: #@ data.values.AVI_TENANT_CONTEXT 65 | #@ end 66 | extraConfigs: 67 | #@ if has_log_section(): 68 | log: 69 | #@ if data.values.AVI_LOG_PERSISTENT_VOLUME_CLAIM != "": 70 | persistentVolumeClaim: #@ data.values.AVI_LOG_PERSISTENT_VOLUME_CLAIM 71 | #@ end 72 | #@ if data.values.AVI_LOG_MOUNT_PATH != "": 73 | mountPath: #@ data.values.AVI_LOG_MOUNT_PATH 74 | #@ end 75 | #@ if data.values.AVI_LOG_FILE != "": 76 | logFile: #@ data.values.AVI_LOG_FILE 77 | #@ end 78 | #@ end 79 | #@ if has_rbac_section(): 80 | rbac: 81 | pspPolicyAPIVersion: #@ data.values.AVI_RBAC_PSP_POLICY_API_VERSION 82 | pspEnabled: #@ data.values.AVI_RBAC_PSP_ENABLED 83 | #@ end 84 | ingress: 85 | disableIngressClass: #@ data.values.AVI_INGRESS_DISABLE_INGRESS_CLASS 86 | defaultIngressController: #@ data.values.AVI_INGRESS_DEFAULT_INGRESS_CONTROLLER 87 | #@ if data.values.AVI_INGRESS_SHARD_VS_SIZE != "": 88 | shardVSSize: #@ data.values.AVI_INGRESS_SHARD_VS_SIZE 89 | #@ end 90 | #@ if data.values.AVI_INGRESS_SERVICE_TYPE != "": 91 | serviceType: #@ data.values.AVI_INGRESS_SERVICE_TYPE 92 | #@ end 93 | --- 94 | apiVersion: v1 95 | kind: Secret 96 | metadata: 97 | name: avi-controller-credentials 98 | namespace: #@ data.values.AVI_NAMESPACE 99 | type: Opaque 100 | data: 101 | username: #@ base64.encode(data.values.AVI_USERNAME) 102 | password: #@ base64.encode(data.values.AVI_PASSWORD) 103 | --- 104 | apiVersion: v1 105 | kind: Secret 106 | metadata: 107 | name: avi-controller-ca 108 | namespace: #@ data.values.AVI_NAMESPACE 109 | type: Opaque 110 | data: 111 | certificateAuthorityData: #@ base64.encode(data.values.AVI_CA_DATA) 112 | -------------------------------------------------------------------------------- /config/ytt/akodeploymentconfig/values.yaml: -------------------------------------------------------------------------------- 1 | #@data/values 2 | --- 3 | AVI_CLOUD_NAME: "Default-Cloud" 4 | AVI_SERVICE_ENGINE: "Default-Group" 5 | AVI_CONTROLLER: 10.161.150.145 6 | AVI_NAMESPACE: "tanzu-system-networking" 7 | AVI_AKO_IMAGE: harbor-pks.vmware.com/tkgextensions/tkg-networking/ako 8 | AVI_AKO_IMAGE_TAG: master-dad9e53 9 | AVI_AKO_IMAGE_PULL_POLICY: IfNotPresent 10 | AVI_INGRESS_DISABLE_INGRESS_CLASS: true 11 | AVI_INGRESS_DEFAULT_INGRESS_CONTROLLER: false 12 | AVI_INGRESS_SHARD_VS_SIZE: "" 13 | AVI_INGRESS_SERVICE_TYPE: "" 14 | AVI_RBAC_PSP_ENABLED: false 15 | AVI_RBAC_PSP_POLICY_API_VERSION: "" 16 | AVI_TENANT_NAME: "" 17 | AVI_TENANT_CONTEXT: "" 18 | AVI_LOG_PERSISTENT_VOLUME_CLAIM: "" 19 | AVI_LOG_MOUNT_PATH: "" 20 | AVI_LOG_FILE: "" 21 | AVI_WORKLOAD_CREDENTIAL_NAME: "" 22 | AVI_LABELS: "" 23 | AVI_DATA_NETWORK: "VM Network" 24 | AVI_DATA_NETWORK_CIDR: 10.161.136.0/24 25 | AVI_DATA_NETWORK_IP_POOL_START: "" 26 | AVI_DATA_NETWORK_IP_POOL_END: "" 27 | AVI_CONTROL_PLANE_NETWORK: "VM Network 2" 28 | AVI_CONTROL_PLANE_NETWORK_CIDR: 10.192.192.0/24 29 | AVI_USERNAME: "admin" 30 | AVI_PASSWORD: "admin!23" 31 | AVI_CA_DATA: | 32 | -----BEGIN CERTIFICATE----- 33 | MIIF+DCCA+CgAwIBAgIBATANBgkqhkiG9w0BAQsFADBgMQswCQYDVQQGEwJVUzET 34 | MBEGA1UECAwKQ2FsaWZvcm5pYTEUMBIGA1UEBwwLU2FudGEgQ2xhcmExEzARBgNV 35 | BAoMCkNvbnRyb2xsZXIxETAPBgNVBAMMCGNhLmxvY2FsMB4XDTcwMDEwMTAwMDAw 36 | MFoXDTMwMTIxNDIyNDAxMlowYDELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlm 37 | b3JuaWExFDASBgNVBAcMC1NhbnRhIENsYXJhMRMwEQYDVQQKDApDb250cm9sbGVy 38 | MREwDwYDVQQDDAhjYS5sb2NhbDCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoC 39 | ggIBANwQCd5kOaWNdRT1ra+7lI2dcFCzvtdJbl1pYFa8xXldMu5oLGOwvQKnHnng 40 | pza492jHepwVjLeA8obqt2RZNHxIFLdOglVMcpGJTbcdgAbGh3CSuDpwP13N95ga 41 | 7oxehfndKOD/LH941SVhUY7anesBzjC9QUpftIafh+dHb8vIGWCVjvc+OIaUIGSr 42 | t+fOujXGVCud084KKxojUvOSdOhgrWfUdoA1GqSrfq675TRYiLbAuoJeytCISB9V 43 | xUhbd/2WFSnXTVNGYn7t1Q8un16TLMU7OAr+XX0Y0zjU7Wsl1GMhLfx6UXKOde3N 44 | oVVE0WmfT/e4CS1h2AF2rhKCt9US/oi3mh8FEgZwulkZcBRXeWOdsn7EebS93NzC 45 | SWj4a/8XaBjg46XFUR3+rQkxEhDS9IDRa7WBN78I5X2Fw5Y/fPaXoaHxNIq6ayJj 46 | Q4CMX/BpDqVhJxHA6hkUTuhq9fesSSauPFE/HGadp9CABRUvTwFs1k03QYPCYyk9 47 | xznZNf49zniFrJvvT2QsDT6Nx3JAjeh7zI6ptgnm1ej7CbexCn/+/DbqokCNN7OU 48 | KlS5CqMdL+0da96soAxDRc9XMGWLlcNwQz0sqJJCoGSESzUkXZKRjgvHcNiT6TWg 49 | OF593Z0Eie2FpWEMo42Y50daXQLd3nl6nLTc3ruKeg6legkDAgMBAAGjgbwwgbkw 50 | HQYDVR0OBBYEFGd7PvxOCx8EmqkAsJSNGyKOPYnyMIGJBgNVHSMEgYEwf4AUZ3s+ 51 | /E4LHwSaqQCwlI0bIo49ifKhZKRiMGAxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApD 52 | YWxpZm9ybmlhMRQwEgYDVQQHDAtTYW50YSBDbGFyYTETMBEGA1UECgwKQ29udHJv 53 | bGxlcjERMA8GA1UEAwwIY2EubG9jYWyCAQEwDAYDVR0TBAUwAwEB/zANBgkqhkiG 54 | 9w0BAQsFAAOCAgEAHmKuecyQDUcXFBfLruXSxTsFeMN2/L8gQRBp3EN2WLU9aaoh 55 | ga8iUcaHNuiXUGbHY+Rhpkat7uJroM3FyuHqZJ5UcEUXdacPCCspyQt61o2UaqB1 56 | Be9qOW5bAbpWXjSKQgdMQHkHMiPTRqzZxB2OP3JF0jlf1QKro+Km5wBclcvofL24 57 | Wn/QaPW8nIJRwb9+9Y+ifq9CbmmNZTTCkjntk3lSU/YnKSI3/on7oPKCrQx3aKyF 58 | nr0YT0dJXsrcFxJ/vajFkzbX/V5Vhk7h6GTSh2E8KrUYdxqaucIhSxTgotV0iiiR 59 | QATK4flX0AG25aSCJdDWtNeGeUi/d28VijQs03smVIdFUfo5hRZPxbGsXtEbS1xz 60 | vP7hRAO/UtSiqaWBfAMIXx/zzpirqa88F4qdd8TKEQgLbI6prHR3NP2oIjOhpCeh 61 | 909RRzkkxy2Qjsr5RrbBavVWxRZRvOaGnDUsC6I1JqkDp5MBlG55LAqA0D3gVeRC 62 | /+s99TB3l9eqjRtIbfGSSy2dAID/bWUFsty1BqdZg2M/Y0wYPNeuMx3yymfZZF1r 63 | u5ZuIDTOxrZ03laHB32L3meiTNZ1dXkwIyHkOGa/mm3yJVvcu+/9shfSulgpWbcr 64 | oxebqUHt4tIPl8vgpkGx1/vvycmkIpQq3lF98gSoSUWy8mlOMROqH3amGUo= 65 | -----END CERTIFICATE----- 66 | -------------------------------------------------------------------------------- /config/ytt/values.yaml: -------------------------------------------------------------------------------- 1 | #@data/values 2 | --- 3 | namespace: "tkg-system-networking" 4 | imageRegistry: harbor-pks.vmware.com/tkgextensions 5 | imageName: tkg-networking/tanzu-ako-operator 6 | imageTag: dc1aec0 7 | -------------------------------------------------------------------------------- /controllers/akodeploymentconfig/akodeploymentconfig_controller_cluster_phase.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 VMware, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | package akodeploymentconfig 5 | 6 | import ( 7 | "context" 8 | 9 | "github.com/go-logr/logr" 10 | "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/controllers/akodeploymentconfig/cluster" 11 | "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/controllers/akodeploymentconfig/phases" 12 | 13 | clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" 14 | ctrl "sigs.k8s.io/controller-runtime" 15 | ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 16 | 17 | akoov1alpha1 "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/api/v1alpha1" 18 | ) 19 | 20 | func (r *AKODeploymentConfigReconciler) initCluster(log logr.Logger) { 21 | // Lazily initialize clusterReconciler 22 | if r.ClusterReconciler == nil { 23 | r.ClusterReconciler = cluster.NewReconciler(r.Client, r.Log, r.Scheme) 24 | log.Info("Cluster reconciler initialized") 25 | } 26 | } 27 | 28 | // reconcileClusters reconciles every cluster that matches the 29 | // AKODeploymentConfig's selector 30 | // It's a reconcilePhase function 31 | func (r *AKODeploymentConfigReconciler) reconcileClusters( 32 | ctx context.Context, 33 | log logr.Logger, 34 | obj *akoov1alpha1.AKODeploymentConfig, 35 | ) (ctrl.Result, error) { 36 | r.initCluster(log) 37 | 38 | return phases.ReconcileClustersPhases(ctx, r.Client, log, obj, 39 | []phases.ReconcileClusterPhase{ 40 | r.addClusterFinalizer, 41 | r.ClusterReconciler.ReconcileAddonSecret, 42 | }, 43 | []phases.ReconcileClusterPhase{ 44 | r.ClusterReconciler.ReconcileAddonSecretDelete, 45 | r.ClusterReconciler.ReconcileDelete, 46 | }, 47 | ) 48 | } 49 | 50 | // reconcileClustersDelete reconciles every cluster that matches the 51 | // AKODeploymentConfig's selector when a AKODeploymentConfig is being deleted 52 | // It's a reconcilePhase function 53 | func (r *AKODeploymentConfigReconciler) reconcileClustersDelete( 54 | ctx context.Context, 55 | log logr.Logger, 56 | obj *akoov1alpha1.AKODeploymentConfig, 57 | ) (ctrl.Result, error) { 58 | r.initCluster(log) 59 | 60 | return phases.ReconcileClustersPhases(ctx, r.Client, log, obj, 61 | // When AKODeploymentConfig is being deleted and the target 62 | // cluster is in normal state, remove the label and finalizer to 63 | // stop managing it 64 | []phases.ReconcileClusterPhase{ 65 | r.removeClusterFinalizer, 66 | r.ClusterReconciler.ReconcileAddonSecretDelete, 67 | }, 68 | []phases.ReconcileClusterPhase{ 69 | r.ClusterReconciler.ReconcileAddonSecretDelete, 70 | r.ClusterReconciler.ReconcileDelete, 71 | }, 72 | ) 73 | } 74 | 75 | // addClusterFinalizer is a reconcileClusterPhase. It adds the AVI 76 | // finalizer to a Cluster. 77 | func (r *AKODeploymentConfigReconciler) addClusterFinalizer( 78 | _ context.Context, 79 | log logr.Logger, 80 | cluster *clusterv1.Cluster, 81 | _ *akoov1alpha1.AKODeploymentConfig, 82 | ) (ctrl.Result, error) { 83 | if !ctrlutil.ContainsFinalizer(cluster, akoov1alpha1.ClusterFinalizer) && 84 | cluster.Namespace != akoov1alpha1.TKGSystemNamespace { 85 | log.Info("Add finalizer to cluster", "finalizer", akoov1alpha1.ClusterFinalizer) 86 | ctrlutil.AddFinalizer(cluster, akoov1alpha1.ClusterFinalizer) 87 | } 88 | return ctrl.Result{}, nil 89 | } 90 | 91 | // removeClusterFinalizer is a reconcileClusterPhase. It removes the AVI 92 | // finalizer from a Cluster. This can only be called when the cluster is not in 93 | // deletion state and AKODeploymentConfig is being deleted. 94 | func (r *AKODeploymentConfigReconciler) removeClusterFinalizer( 95 | _ context.Context, 96 | log logr.Logger, 97 | cluster *clusterv1.Cluster, 98 | _ *akoov1alpha1.AKODeploymentConfig, 99 | ) (ctrl.Result, error) { 100 | if ctrlutil.ContainsFinalizer(cluster, akoov1alpha1.ClusterFinalizer) { 101 | log.Info("Removing finalizer from cluster", "finalizer", akoov1alpha1.ClusterFinalizer) 102 | } 103 | ctrlutil.RemoveFinalizer(cluster, akoov1alpha1.ClusterFinalizer) 104 | return ctrl.Result{}, nil 105 | } 106 | -------------------------------------------------------------------------------- /controllers/akodeploymentconfig/cluster/suite_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 VMware, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | package cluster_test 5 | 6 | import ( 7 | "testing" 8 | 9 | . "github.com/onsi/ginkgo" 10 | "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/pkg/test/builder" 11 | "k8s.io/apimachinery/pkg/runtime" 12 | 13 | ctrlmgr "sigs.k8s.io/controller-runtime/pkg/manager" 14 | ) 15 | 16 | // suite is used for unit and integration testing this controller. 17 | var suite = builder.NewTestSuiteForController( 18 | func(mgr ctrlmgr.Manager) error { 19 | return nil 20 | }, 21 | func(scheme *runtime.Scheme) (err error) { 22 | return nil 23 | }, 24 | ) 25 | 26 | func TestController(t *testing.T) { 27 | suite.Register(t, "AKO Operator AKODeploymentConfig controller Cluster reconciler", intgTests, unitTests) 28 | } 29 | 30 | var _ = BeforeSuite(suite.BeforeSuite) 31 | 32 | var _ = AfterSuite(suite.AfterSuite) 33 | 34 | func intgTests() { 35 | } 36 | 37 | func unitTests() { 38 | Describe("AKO Deployment Spec generation", unitTestAKODeploymentYaml) 39 | Describe("Cluster ip family Validation", unitTestValidateClusterIpFamily) 40 | } 41 | -------------------------------------------------------------------------------- /controllers/akodeploymentconfig/phases/phases.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 VMware, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | package phases 5 | 6 | import ( 7 | "context" 8 | "fmt" 9 | 10 | "github.com/go-logr/logr" 11 | 12 | "github.com/pkg/errors" 13 | kerrors "k8s.io/apimachinery/pkg/util/errors" 14 | clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" 15 | "sigs.k8s.io/cluster-api/util" 16 | "sigs.k8s.io/cluster-api/util/patch" 17 | ctrl "sigs.k8s.io/controller-runtime" 18 | "sigs.k8s.io/controller-runtime/pkg/client" 19 | 20 | akoov1alpha1 "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/api/v1alpha1" 21 | ako_operator "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/pkg/ako-operator" 22 | ) 23 | 24 | // ReconcilePhase defines a function that reconciles one aspect of 25 | // AKODeploymentConfig 26 | type ReconcilePhase func(context.Context, logr.Logger, *akoov1alpha1.AKODeploymentConfig) (ctrl.Result, error) 27 | 28 | // reconcilePhases runs each phase regardless of its error status. 29 | // The aggregated error will be returned 30 | func ReconcilePhases( 31 | ctx context.Context, 32 | log logr.Logger, 33 | obj *akoov1alpha1.AKODeploymentConfig, 34 | phases []ReconcilePhase, 35 | ) (ctrl.Result, error) { 36 | res := ctrl.Result{} 37 | 38 | var errs []error 39 | for _, phase := range phases { 40 | // Call the inner reconciliation methods. 41 | phaseResult, err := phase(ctx, log, obj) 42 | if err != nil { 43 | errs = append(errs, err) 44 | } 45 | if len(errs) > 0 { 46 | continue 47 | } 48 | res = util.LowestNonZeroResult(res, phaseResult) 49 | } 50 | return res, kerrors.NewAggregate(errs) 51 | } 52 | 53 | // ReconcilePhase defines a per-cluster function that reconciles one aspect of 54 | // AKODeploymentConfig 55 | type ReconcileClusterPhase func(context.Context, logr.Logger, *clusterv1.Cluster, *akoov1alpha1.AKODeploymentConfig) (ctrl.Result, error) 56 | 57 | // reconcileClusters reconcile every cluster that matches the 58 | // AKODeploymentConfig's selector by running through an array of phases 59 | func ReconcileClustersPhases( 60 | ctx context.Context, 61 | client client.Client, 62 | log logr.Logger, 63 | obj *akoov1alpha1.AKODeploymentConfig, 64 | normalPhases []ReconcileClusterPhase, 65 | deletePhases []ReconcileClusterPhase, 66 | ) (ctrl.Result, error) { 67 | res := ctrl.Result{} 68 | 69 | // Get the list of clusters managed by the AKODeploymentConfig 70 | clusters, err := ako_operator.ListAkoDeploymentConfigSelectClusters(ctx, client, log, obj) 71 | if err != nil { 72 | log.Error(err, "Fail to list clusters deployed by current AKODeploymentConfig") 73 | return res, err 74 | } 75 | 76 | if len(clusters.Items) == 0 { 77 | log.Info("No cluster matches the selector, skip") 78 | return res, nil 79 | } 80 | 81 | var allErrs []error 82 | // For each cluster managed by the AKODeploymentConfig, run each phase 83 | // function 84 | for _, cluster := range clusters.Items { 85 | var errs []error 86 | 87 | clog := log.WithValues("cluster", cluster.Namespace+"/"+cluster.Name) 88 | 89 | // skip reconcile if cluster is using kube-vip to provide load balancer service 90 | if isLBProvider, err := ako_operator.IsLoadBalancerProvider(&cluster); err != nil { 91 | log.Error(err, "can't unmarshal cluster variables") 92 | allErrs = append(allErrs, err) 93 | continue 94 | } else if !isLBProvider { 95 | log.Info(fmt.Sprintf("cluster uses kube-vip to provide load balancer type of service, skip reconciling for cluster %s/%s", cluster.Namespace, cluster.Name)) 96 | continue 97 | } 98 | 99 | // Always Patch for each cluster when exiting this function so changes to the resource are updated on the API server. 100 | patchHelper, err := patch.NewHelper(&cluster, client) 101 | if err != nil { 102 | return res, errors.Wrapf(err, "failed to init patch helper for %s %s", 103 | cluster.GroupVersionKind(), cluster.Namespace+"/"+cluster.Name) 104 | } 105 | 106 | // update cluster avi label before run any phase functions 107 | ako_operator.ApplyClusterLabel(log, &cluster, obj) 108 | 109 | phases := normalPhases 110 | if !cluster.GetDeletionTimestamp().IsZero() { 111 | phases = deletePhases 112 | } 113 | for _, phase := range phases { 114 | // Call the inner reconciliation methods regardless of 115 | // the error status 116 | phaseResult, err := phase(ctx, clog, &cluster, obj) 117 | if err != nil { 118 | errs = append(errs, err) 119 | } 120 | if len(errs) > 0 { 121 | continue 122 | } 123 | res = util.LowestNonZeroResult(res, phaseResult) 124 | } 125 | 126 | clusterErr := kerrors.NewAggregate(errs) 127 | patchOpts := []patch.Option{} 128 | if clusterErr == nil { 129 | patchOpts = append(patchOpts, patch.WithStatusObservedGeneration{}) 130 | } else { 131 | allErrs = append(allErrs, clusterErr) 132 | } 133 | 134 | if err := patchHelper.Patch(ctx, &cluster, patchOpts...); err != nil { 135 | clusterErr = kerrors.NewAggregate([]error{clusterErr, err}) 136 | if clusterErr != nil { 137 | log.Error(clusterErr, "patch failed") 138 | } 139 | } 140 | } 141 | 142 | return res, kerrors.NewAggregate(allErrs) 143 | } 144 | -------------------------------------------------------------------------------- /controllers/akodeploymentconfig/phases/phases_unit_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 VMware, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | package phases 5 | 6 | import ( 7 | "github.com/go-logr/logr" 8 | . "github.com/onsi/ginkgo" 9 | . "github.com/onsi/gomega" 10 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 11 | clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" 12 | ctrl "sigs.k8s.io/controller-runtime" 13 | 14 | akoov1alpha1 "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/api/v1alpha1" 15 | ako_operator "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/pkg/ako-operator" 16 | "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/pkg/test/builder" 17 | ) 18 | 19 | func ReconcilePhaseUnitTest() { 20 | var ( 21 | err error 22 | log logr.Logger 23 | ctx *builder.IntegrationTestContext 24 | akoDeploymentConfig *akoov1alpha1.AKODeploymentConfig 25 | ) 26 | BeforeEach(func() { 27 | akoDeploymentConfig = &akoov1alpha1.AKODeploymentConfig{ 28 | ObjectMeta: metav1.ObjectMeta{ 29 | Name: "test-ako-deployment-config", 30 | Namespace: "default", 31 | }, 32 | Spec: akoov1alpha1.AKODeploymentConfigSpec{ 33 | ClusterSelector: metav1.LabelSelector{ 34 | MatchLabels: map[string]string{ 35 | "test": "test", 36 | }, 37 | }, 38 | DataNetwork: akoov1alpha1.DataNetwork{ 39 | Name: "test", 40 | CIDR: "1.1.1.1/20", 41 | IPPools: []akoov1alpha1.IPPool{}, 42 | }, 43 | ControlPlaneNetwork: akoov1alpha1.ControlPlaneNetwork{ 44 | Name: "integration-test-8ed12g", 45 | CIDR: "10.1.0.0/24", 46 | }, 47 | CertificateAuthorityRef: &akoov1alpha1.SecretRef{ 48 | Name: "test-ca-secret", 49 | Namespace: "default", 50 | }, 51 | AdminCredentialRef: &akoov1alpha1.SecretRef{}, 52 | }, 53 | } 54 | ctx = suite.NewIntegrationTestContext() 55 | log = ctrl.Log.WithName("controllers").WithName("AKODeploymentConfig") 56 | }) 57 | 58 | Context("Should be able to list all workload clusters", func() { 59 | var cluster *clusterv1.Cluster 60 | 61 | BeforeEach(func() { 62 | //ctx = suite.NewIntegrationTestContext() 63 | cluster = &clusterv1.Cluster{ 64 | ObjectMeta: metav1.ObjectMeta{ 65 | Name: "test-cluster", 66 | Namespace: "default", 67 | Labels: map[string]string{ 68 | akoov1alpha1.AviClusterLabel: "test-ako-deployment-config", 69 | "test": "test", 70 | }, 71 | }, 72 | Spec: clusterv1.ClusterSpec{}, 73 | } 74 | err = ctx.Client.Create(ctx.Context, cluster) 75 | Expect(err).ShouldNot(HaveOccurred()) 76 | }) 77 | 78 | AfterEach(func() { 79 | err = ctx.Client.Delete(ctx.Context, cluster) 80 | Expect(err).ShouldNot(HaveOccurred()) 81 | }) 82 | 83 | It("list all selected workload clusters", func() { 84 | clusterList, err := ako_operator.ListAkoDeploymentConfigSelectClusters(ctx.Context, ctx.Client, log, akoDeploymentConfig) 85 | Expect(err).ShouldNot(HaveOccurred()) 86 | Expect(len(clusterList.Items)).To(Equal(1)) 87 | Expect(clusterList.Items[0].Name).To(Equal("test-cluster")) 88 | }) 89 | }) 90 | } 91 | -------------------------------------------------------------------------------- /controllers/akodeploymentconfig/phases/suite_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 VMware, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | package phases 5 | 6 | import ( 7 | "path/filepath" 8 | "testing" 9 | 10 | . "github.com/onsi/ginkgo" 11 | "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/pkg/aviclient" 12 | "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/pkg/test/builder" 13 | testutil "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/pkg/test/util" 14 | corev1 "k8s.io/api/core/v1" 15 | "k8s.io/apimachinery/pkg/runtime" 16 | clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" 17 | 18 | ctrlmgr "sigs.k8s.io/controller-runtime/pkg/manager" 19 | ) 20 | 21 | // suite is used for unit and integration testing this controller. 22 | var suite = builder.NewTestSuiteForController( 23 | func(mgr ctrlmgr.Manager) error { 24 | builder.FakeAvi = aviclient.NewFakeAviClient() 25 | return nil 26 | }, 27 | func(scheme *runtime.Scheme) (err error) { 28 | err = corev1.AddToScheme(scheme) 29 | if err != nil { 30 | return err 31 | } 32 | err = clusterv1.AddToScheme(scheme) 33 | if err != nil { 34 | return err 35 | } 36 | return nil 37 | }, 38 | filepath.Join(testutil.FindModuleDir("sigs.k8s.io/cluster-api"), "config", "crd", "bases"), 39 | ) 40 | 41 | func TestController(t *testing.T) { 42 | suite.Register(t, "AKO Operator AKODeploymentConfig controller phases reconciler", intgTests, unitTests) 43 | } 44 | 45 | var _ = BeforeSuite(suite.BeforeSuite) 46 | 47 | var _ = AfterSuite(suite.AfterSuite) 48 | 49 | func intgTests() { 50 | Describe("Phase Reconciler Test", ReconcilePhaseUnitTest) 51 | } 52 | 53 | func unitTests() { 54 | } 55 | -------------------------------------------------------------------------------- /controllers/akodeploymentconfig/suite_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 VMware, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | package akodeploymentconfig_test 5 | 6 | import ( 7 | "path/filepath" 8 | "testing" 9 | 10 | . "github.com/onsi/ginkgo" 11 | 12 | "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/pkg/test/builder" 13 | "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/pkg/test/funcs" 14 | testutil "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/pkg/test/util" 15 | ) 16 | 17 | // suite is used for unit and integration testing this controller. 18 | var suite = builder.NewTestSuiteForController( 19 | funcs.AddAKODeploymentConfigAndClusterControllerToMgrFunc, 20 | funcs.AddAllToSchemeFunc, 21 | filepath.Join(testutil.FindModuleDir("sigs.k8s.io/cluster-api"), "config", "crd", "bases"), 22 | filepath.Join(testutil.FindModuleDir("github.com/vmware/load-balancer-and-ingress-services-for-kubernetes"), "helm", "ako", "crds"), 23 | ) 24 | 25 | func TestController(t *testing.T) { 26 | suite.Register(t, "AKO Operator", intgTests, unitTests) 27 | } 28 | 29 | var _ = BeforeSuite(suite.BeforeSuite) 30 | 31 | var _ = AfterSuite(suite.AfterSuite) 32 | 33 | func intgTests() { 34 | Describe("AkoDeploymentConfigController Test", intgTestAkoDeploymentConfigController) 35 | } 36 | 37 | func unitTests() { 38 | Describe("Ensure static ranges Test", unitTestEnsureStaticRanges) 39 | } 40 | -------------------------------------------------------------------------------- /controllers/akodeploymentconfig/user/ako_role_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2024 VMware, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | package user 5 | 6 | import ( 7 | "testing" 8 | ) 9 | 10 | func TestRolePermissionAndMapMatch(t *testing.T) { 11 | if len(AkoRolePermission) != len(AkoRolePermissionMap) { 12 | t.Errorf("len(AkoRolePermission) == %d, len(AkoRolePermissionMap) == %d", len(AkoRolePermission), len(AkoRolePermissionMap)) 13 | } 14 | 15 | allMatch := true 16 | for _, permission := range AkoRolePermission { 17 | if *permission.Type != AkoRolePermissionMap[*permission.Resource] { 18 | allMatch = false 19 | t.Logf("AkoRolePermission[%s] == %s, AkoRolePermissionMap[%s] == %s", *permission.Resource, *permission.Type, *permission.Resource, AkoRolePermissionMap[*permission.Resource]) 20 | } 21 | } 22 | 23 | if !allMatch { 24 | t.Error("Not all entries in AkoRolePermission and AkoRolePermissionMap match") 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /controllers/akodeploymentconfig/user/suite_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 VMware, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | package user 5 | 6 | import ( 7 | "path/filepath" 8 | "testing" 9 | 10 | networkv1alpha1 "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/api/v1alpha1" 11 | "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/pkg/test/builder" 12 | testutil "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/pkg/test/util" 13 | corev1 "k8s.io/api/core/v1" 14 | "k8s.io/apimachinery/pkg/runtime" 15 | clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" 16 | ctrlmgr "sigs.k8s.io/controller-runtime/pkg/manager" 17 | 18 | . "github.com/onsi/ginkgo" 19 | ) 20 | 21 | // suite is used for unit and integration testing this controller. 22 | var suite = builder.NewTestSuiteForReconciler( 23 | func(mgr ctrlmgr.Manager) error { 24 | return nil 25 | }, 26 | func(scheme *runtime.Scheme) (err error) { 27 | err = networkv1alpha1.AddToScheme(scheme) 28 | if err != nil { 29 | return err 30 | } 31 | err = corev1.AddToScheme(scheme) 32 | if err != nil { 33 | return err 34 | } 35 | err = clusterv1.AddToScheme(scheme) 36 | if err != nil { 37 | return err 38 | } 39 | return nil 40 | }, 41 | filepath.Join(testutil.FindModuleDir("sigs.k8s.io/cluster-api"), "config", "crd", "bases"), 42 | ) 43 | 44 | func TestController(t *testing.T) { 45 | suite.Register(t, "AKO Reconciler", intgTests, unitTests) 46 | } 47 | 48 | var _ = BeforeSuite(suite.BeforeSuite) 49 | 50 | var _ = AfterSuite(suite.AfterSuite) 51 | 52 | func intgTests() { 53 | Describe("ako user reconciler test", AkoUserReconcilerTest) 54 | } 55 | 56 | func unitTests() { 57 | Describe("AKO user reconciler unit tests", SyncAkoUserRoleTest) 58 | } 59 | -------------------------------------------------------------------------------- /controllers/cluster/cluster_intg_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2021 VMware, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | package cluster_test 5 | 6 | import ( 7 | "os" 8 | 9 | "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/pkg/haprovider" 10 | 11 | . "github.com/onsi/ginkgo" 12 | . "github.com/onsi/gomega" 13 | 14 | ako_operator "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/pkg/ako-operator" 15 | "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/pkg/test/builder" 16 | testutil "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/pkg/test/util" 17 | "sigs.k8s.io/controller-runtime/pkg/client" 18 | 19 | akoov1alpha1 "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/api/v1alpha1" 20 | corev1 "k8s.io/api/core/v1" 21 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 22 | clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" 23 | ) 24 | 25 | func intgTestEnsureClusterHAProvider() { 26 | Context("EnsureHAService", func() { 27 | var ( 28 | ctx *builder.IntegrationTestContext 29 | cluster *clusterv1.Cluster 30 | staticCluster *clusterv1.Cluster 31 | serviceName string 32 | ) 33 | 34 | staticCluster = &clusterv1.Cluster{ 35 | ObjectMeta: metav1.ObjectMeta{ 36 | Name: "ha-cluster", 37 | }, 38 | Spec: clusterv1.ClusterSpec{}, 39 | } 40 | 41 | BeforeEach(func() { 42 | ctx = suite.NewIntegrationTestContext() 43 | cluster = staticCluster.DeepCopy() 44 | cluster.Namespace = ctx.Namespace 45 | serviceName = cluster.Namespace + "-" + cluster.Name + "-" + akoov1alpha1.HAServiceName 46 | }) 47 | AfterEach(func() { 48 | ctx.AfterEach() 49 | ctx = nil 50 | }) 51 | 52 | When("Avi is not HA provider", func() { 53 | BeforeEach(func() { 54 | err := os.Setenv(ako_operator.IsControlPlaneHAProvider, "False") 55 | Expect(err).ShouldNot(HaveOccurred()) 56 | testutil.CreateObjects(ctx, cluster) 57 | }) 58 | AfterEach(func() { 59 | testutil.DeleteObjects(ctx, cluster) 60 | }) 61 | It("should not create service or endpoint", func() { 62 | testutil.EnsureRuntimeObjectMatchExpectation(ctx, client.ObjectKey{ 63 | Name: serviceName, 64 | Namespace: ctx.Namespace, 65 | }, &corev1.Service{}, testutil.NOTFOUND) 66 | testutil.EnsureRuntimeObjectMatchExpectation(ctx, client.ObjectKey{ 67 | Name: serviceName, 68 | Namespace: ctx.Namespace, 69 | }, &corev1.Endpoints{}, testutil.NOTFOUND) 70 | }) 71 | }) 72 | 73 | When("Avi is HA provider", func() { 74 | When("HA service and endpoint not exist", func() { 75 | BeforeEach(func() { 76 | err := os.Setenv(ako_operator.IsControlPlaneHAProvider, "True") 77 | Expect(err).ShouldNot(HaveOccurred()) 78 | testutil.CreateObjects(ctx, cluster) 79 | 80 | // add an ip to service since ako is absent 81 | service := &corev1.Service{} 82 | testutil.EnsureRuntimeObjectMatchExpectation(ctx, client.ObjectKey{ 83 | Name: serviceName, 84 | Namespace: ctx.Namespace, 85 | }, &corev1.Service{}, testutil.EXIST) 86 | 87 | err = ctx.Client.Get(ctx, client.ObjectKey{Name: serviceName, Namespace: ctx.Namespace}, service) 88 | Expect(err).ShouldNot(HaveOccurred()) 89 | 90 | service.Status.LoadBalancer.Ingress = []corev1.LoadBalancerIngress{{ 91 | IP: "10.0.0.1", 92 | Hostname: "intg-test", 93 | }} 94 | err = ctx.Client.Status().Update(ctx, service) 95 | Expect(err).To(BeNil()) 96 | }) 97 | AfterEach(func() { 98 | testutil.DeleteObjects(ctx, cluster) 99 | }) 100 | 101 | It("should create service and endpoint", func() { 102 | testutil.EnsureRuntimeObjectMatchExpectation(ctx, client.ObjectKey{ 103 | Name: serviceName, 104 | Namespace: ctx.Namespace, 105 | }, &corev1.Endpoints{}, testutil.EXIST) 106 | }) 107 | }) 108 | 109 | When("FQDN HA service endpoint exist", func() { 110 | BeforeEach(func() { 111 | err := os.Setenv(ako_operator.IsControlPlaneHAProvider, "True") 112 | Expect(err).ShouldNot(HaveOccurred()) 113 | cluster.Annotations = make(map[string]string) 114 | cluster.Annotations["tkg.tanzu.vmware.com/cluster-controlplane-endpoint"] = "test.local.org" 115 | testutil.CreateObjects(ctx, cluster) 116 | }) 117 | AfterEach(func() { 118 | testutil.DeleteObjects(ctx, cluster) 119 | }) 120 | 121 | It("should not create service endpoint when FQDN is not resolved", func() { 122 | testutil.EnsureRuntimeObjectMatchExpectation(ctx, client.ObjectKey{ 123 | Name: serviceName, 124 | Namespace: ctx.Namespace, 125 | }, &corev1.Service{}, testutil.NOTFOUND) 126 | }) 127 | 128 | BeforeEach(func() { 129 | haprovider.QueryFQDN = func(fqdn string) (string, error) { 130 | return "10.1.2.1", nil 131 | } 132 | }) 133 | 134 | It("should create service and endpoint when FQDN is resolved", func() { 135 | service := &corev1.Service{} 136 | testutil.EnsureRuntimeObjectMatchExpectation(ctx, client.ObjectKey{ 137 | Name: serviceName, 138 | Namespace: ctx.Namespace, 139 | }, &corev1.Service{}, testutil.EXIST) 140 | 141 | err := ctx.Client.Get(ctx, client.ObjectKey{Name: serviceName, Namespace: ctx.Namespace}, service) 142 | Expect(err).ShouldNot(HaveOccurred()) 143 | Expect(service.Annotations[akoov1alpha1.AkoPreferredIPAnnotation]).Should(Equal("10.1.2.1")) 144 | // Simulate AKO updates the ip for service. 145 | service.Status.LoadBalancer.Ingress = []corev1.LoadBalancerIngress{{ 146 | IP: "10.1.2.1", 147 | }} 148 | err = ctx.Client.Status().Update(ctx, service) 149 | Expect(err).To(BeNil()) 150 | 151 | // Ensure service.Status.LoadBalancer.Ingress is not nil 152 | Eventually(func() bool { 153 | err := ctx.Client.Get(ctx, client.ObjectKey{Name: serviceName, Namespace: ctx.Namespace}, service) 154 | if err != nil { 155 | return false 156 | } 157 | if len(service.Status.LoadBalancer.Ingress) == 0 { 158 | return false 159 | } 160 | return true 161 | }) 162 | 163 | // Ensure updateControlPlaneEndpointToService won't use fqdn as ingress.ip 164 | Consistently(func() bool { 165 | err := ctx.Client.Get(ctx, client.ObjectKey{Name: serviceName, Namespace: ctx.Namespace}, service) 166 | if err != nil { 167 | return false 168 | } 169 | if len(service.Status.LoadBalancer.Ingress) == 0 { 170 | return false 171 | } 172 | if service.Status.LoadBalancer.Ingress[0].IP != "10.1.2.1" { 173 | return false 174 | } 175 | return true 176 | }, "30s", "3s").Should(BeTrue()) 177 | }) 178 | }) 179 | }) 180 | }) 181 | } 182 | -------------------------------------------------------------------------------- /controllers/cluster/suite_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2021 VMware, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | package cluster_test 5 | 6 | import ( 7 | "path/filepath" 8 | "testing" 9 | 10 | akoov1alpha1 "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/api/v1alpha1" 11 | 12 | . "github.com/onsi/ginkgo" 13 | "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/controllers/cluster" 14 | "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/pkg/aviclient" 15 | "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/pkg/test/builder" 16 | testutil "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/pkg/test/util" 17 | corev1 "k8s.io/api/core/v1" 18 | "k8s.io/apimachinery/pkg/runtime" 19 | 20 | clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" 21 | ctrl "sigs.k8s.io/controller-runtime" 22 | 23 | ctrlmgr "sigs.k8s.io/controller-runtime/pkg/manager" 24 | ) 25 | 26 | // suite is used for unit and integration testing this controller. 27 | var suite = builder.NewTestSuiteForController( 28 | func(mgr ctrlmgr.Manager) error { 29 | 30 | builder.FakeAvi = aviclient.NewFakeAviClient() 31 | 32 | if err := (&cluster.ClusterReconciler{ 33 | Client: mgr.GetClient(), 34 | Log: ctrl.Log.WithName("controllers").WithName("Cluster"), 35 | Scheme: mgr.GetScheme(), 36 | }).SetupWithManager(mgr); err != nil { 37 | return err 38 | } 39 | return nil 40 | }, 41 | func(scheme *runtime.Scheme) (err error) { 42 | err = corev1.AddToScheme(scheme) 43 | if err != nil { 44 | return err 45 | } 46 | err = clusterv1.AddToScheme(scheme) 47 | if err != nil { 48 | return err 49 | } 50 | err = akoov1alpha1.AddToScheme(scheme) 51 | if err != nil { 52 | return err 53 | } 54 | return nil 55 | }, 56 | filepath.Join(testutil.FindModuleDir("sigs.k8s.io/cluster-api"), "config", "crd", "bases"), 57 | ) 58 | 59 | func TestController(t *testing.T) { 60 | suite.Register(t, "AKO Operator Cluster Controller", intgTests, unitTests) 61 | } 62 | 63 | var _ = BeforeSuite(suite.BeforeSuite) 64 | 65 | var _ = AfterSuite(suite.AfterSuite) 66 | 67 | func intgTests() { 68 | Describe("ClusterController Test", intgTestEnsureClusterHAProvider) 69 | } 70 | 71 | func unitTests() { 72 | } 73 | -------------------------------------------------------------------------------- /controllers/controllers.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 VMware, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | package controllers 5 | 6 | import ( 7 | "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/controllers/akodeploymentconfig" 8 | "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/controllers/cluster" 9 | "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/controllers/machine" 10 | ctrl "sigs.k8s.io/controller-runtime" 11 | ) 12 | 13 | func SetupReconcilers(mgr ctrl.Manager) error { 14 | if err := (&machine.MachineReconciler{ 15 | Client: mgr.GetClient(), 16 | Log: ctrl.Log.WithName("controllers").WithName("Machine"), 17 | Scheme: mgr.GetScheme(), 18 | }).SetupWithManager(mgr); err != nil { 19 | return err 20 | } 21 | 22 | if err := (&akodeploymentconfig.AKODeploymentConfigReconciler{ 23 | Client: mgr.GetClient(), 24 | Log: ctrl.Log.WithName("controllers").WithName("AKODeploymentConfig"), 25 | Scheme: mgr.GetScheme(), 26 | }).SetupWithManager(mgr); err != nil { 27 | return err 28 | } 29 | if err := (&cluster.ClusterReconciler{ 30 | Client: mgr.GetClient(), 31 | Log: ctrl.Log.WithName("controllers").WithName("Cluster"), 32 | Scheme: mgr.GetScheme(), 33 | }).SetupWithManager(mgr); err != nil { 34 | return err 35 | } 36 | return nil 37 | } 38 | -------------------------------------------------------------------------------- /controllers/machine/machine_controller_intg_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2021 VMware, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | package machine_test 5 | 6 | import ( 7 | "os" 8 | 9 | . "github.com/onsi/ginkgo" 10 | . "github.com/onsi/gomega" 11 | ako_operator "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/pkg/ako-operator" 12 | "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/pkg/test/builder" 13 | apierrors "k8s.io/apimachinery/pkg/api/errors" 14 | "sigs.k8s.io/controller-runtime/pkg/client" 15 | 16 | testutil "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/pkg/test/util" 17 | corev1 "k8s.io/api/core/v1" 18 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 19 | clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" 20 | ) 21 | 22 | func intgTestMachineController() { 23 | var ( 24 | ctx *builder.IntegrationTestContext 25 | cluster *clusterv1.Cluster 26 | staticCluster *clusterv1.Cluster 27 | staticMachine *clusterv1.Machine 28 | machine *clusterv1.Machine 29 | testLabels map[string]string 30 | err error 31 | ) 32 | 33 | staticCluster = &clusterv1.Cluster{ 34 | ObjectMeta: metav1.ObjectMeta{ 35 | Name: "test", 36 | Namespace: "default", 37 | }, 38 | Spec: clusterv1.ClusterSpec{}, 39 | } 40 | 41 | staticMachine = &clusterv1.Machine{ 42 | ObjectMeta: metav1.ObjectMeta{ 43 | Name: "test-machine", 44 | Namespace: "default", 45 | Labels: map[string]string{ 46 | "cluster.x-k8s.io/cluster-name": "test", 47 | "cluster.x-k8s.io/control-plane": "", 48 | }, 49 | Annotations: map[string]string{ 50 | "pre-terminate.delete.hook.machine.cluster.x-k8s.io/avi-cleanup": "ako-operator", 51 | }, 52 | }, 53 | Spec: clusterv1.MachineSpec{ 54 | ClusterName: "test", 55 | }, 56 | } 57 | 58 | BeforeEach(func() { 59 | ctx = suite.NewIntegrationTestContext() 60 | cluster = staticCluster.DeepCopy() 61 | machine = staticMachine.DeepCopy() 62 | testLabels = map[string]string{ 63 | "networking.tkg.tanzu.vmware.com/avi": "", 64 | "tkg.tanzu.vmware.com/cluster-name": "test", 65 | } 66 | }) 67 | AfterEach(func() { 68 | ctx.AfterEach() 69 | ctx = nil 70 | }) 71 | 72 | When("A Cluster is created", func() { 73 | BeforeEach(func() { 74 | cluster.Labels = testLabels 75 | testutil.CreateObjects(ctx, cluster, machine) 76 | }) 77 | 78 | AfterEach(func() { 79 | testutil.DeleteObjects(ctx, cluster, machine) 80 | }) 81 | 82 | When("AVI is HA Provider", func() { 83 | JustBeforeEach(func() { 84 | err = os.Setenv(ako_operator.IsControlPlaneHAProvider, "True") 85 | Expect(err).ShouldNot(HaveOccurred()) 86 | machine.Status = clusterv1.MachineStatus{ 87 | Addresses: []clusterv1.MachineAddress{{ 88 | Address: "1.1.1.1", 89 | Type: clusterv1.MachineExternalIP, 90 | }}, 91 | } 92 | testutil.UpdateObjectsStatus(ctx, machine) 93 | }) 94 | It("Corresponding Endpoints should be created", func() { 95 | ep := &corev1.Endpoints{} 96 | Eventually(func() int { 97 | err := ctx.Client.Get(ctx.Context, client.ObjectKey{Name: cluster.Namespace + "-" + cluster.Name + "-control-plane", Namespace: cluster.Namespace}, ep) 98 | if err != nil { 99 | return 0 100 | } 101 | if len(ep.Subsets) == 0 { 102 | return 0 103 | } 104 | return len(ep.Subsets[0].Addresses) 105 | }).Should(Equal(1)) 106 | Expect(ep.Subsets[0].Addresses[0].IP).Should(Equal("1.1.1.1")) 107 | }) 108 | It("Should add one more machine", func() { 109 | secondMachine := staticMachine.DeepCopy() 110 | secondMachine.Name = "test-machine-2" 111 | secondMachine.Namespace = cluster.Namespace 112 | testutil.CreateObjects(ctx, secondMachine) 113 | secondMachine.Status = clusterv1.MachineStatus{ 114 | Addresses: []clusterv1.MachineAddress{{ 115 | Address: "1.1.1.2", 116 | Type: clusterv1.MachineExternalIP, 117 | }}, 118 | } 119 | testutil.UpdateObjectsStatus(ctx, secondMachine) 120 | 121 | ep := &corev1.Endpoints{} 122 | Eventually(func() bool { 123 | err := ctx.Client.Get(ctx.Context, client.ObjectKey{Name: cluster.Namespace + "-" + cluster.Name + "-control-plane", Namespace: cluster.Namespace}, ep) 124 | return err == nil 125 | }).Should(BeTrue()) 126 | Expect(ep.Subsets).ShouldNot(BeNil()) 127 | Expect(ep.Subsets[0].Addresses).ShouldNot(BeNil()) 128 | testutil.DeleteObjects(ctx, secondMachine) 129 | }) 130 | }) 131 | 132 | It("Delete one machine directly", func() { 133 | testutil.DeleteObjects(ctx, machine) 134 | Eventually(func() bool { 135 | err := ctx.Client.Get(ctx.Context, client.ObjectKey{Name: machine.Name, Namespace: machine.Namespace}, &clusterv1.Machine{}) 136 | return apierrors.IsNotFound(err) 137 | }).Should(BeTrue()) 138 | }) 139 | }) 140 | } 141 | -------------------------------------------------------------------------------- /controllers/machine/suite_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2021 VMware, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | package machine_test 5 | 6 | import ( 7 | "path/filepath" 8 | "testing" 9 | 10 | . "github.com/onsi/ginkgo" 11 | akoov1alpha1 "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/api/v1alpha1" 12 | "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/controllers/machine" 13 | "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/pkg/aviclient" 14 | "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/pkg/test/builder" 15 | testutil "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/pkg/test/util" 16 | corev1 "k8s.io/api/core/v1" 17 | "k8s.io/apimachinery/pkg/runtime" 18 | 19 | clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" 20 | ctrl "sigs.k8s.io/controller-runtime" 21 | 22 | ctrlmgr "sigs.k8s.io/controller-runtime/pkg/manager" 23 | ) 24 | 25 | // suite is used for unit and integration testing this controller. 26 | var suite = builder.NewTestSuiteForController( 27 | func(mgr ctrlmgr.Manager) error { 28 | 29 | builder.FakeAvi = aviclient.NewFakeAviClient() 30 | 31 | if err := (&machine.MachineReconciler{ 32 | Client: mgr.GetClient(), 33 | Log: ctrl.Log.WithName("controllers").WithName("Machine"), 34 | Scheme: mgr.GetScheme(), 35 | }).SetupWithManager(mgr); err != nil { 36 | return err 37 | } 38 | return nil 39 | }, 40 | func(scheme *runtime.Scheme) (err error) { 41 | err = corev1.AddToScheme(scheme) 42 | if err != nil { 43 | return err 44 | } 45 | err = clusterv1.AddToScheme(scheme) 46 | if err != nil { 47 | return err 48 | } 49 | err = akoov1alpha1.AddToScheme(scheme) 50 | if err != nil { 51 | return err 52 | } 53 | return nil 54 | }, 55 | filepath.Join(testutil.FindModuleDir("sigs.k8s.io/cluster-api"), "config", "crd", "bases"), 56 | ) 57 | 58 | func TestController(t *testing.T) { 59 | suite.Register(t, "AKO Operator Machine Controller", intgTests, unitTests) 60 | } 61 | 62 | var _ = BeforeSuite(suite.BeforeSuite) 63 | 64 | var _ = AfterSuite(suite.AfterSuite) 65 | 66 | func intgTests() { 67 | Describe("MachineController Test", intgTestMachineController) 68 | } 69 | 70 | func unitTests() { 71 | } 72 | -------------------------------------------------------------------------------- /controllers/tests/cluster_for_akodeploymentconfig/default_adc/cluster_for_akodeploymentconfig_intg_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2022 VMware, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | package default_adc_test 5 | 6 | import ( 7 | . "github.com/onsi/ginkgo" 8 | . "github.com/onsi/gomega" 9 | v1 "k8s.io/api/core/v1" 10 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 11 | clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" 12 | "sigs.k8s.io/controller-runtime/pkg/client" 13 | 14 | akoov1alpha1 "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/api/v1alpha1" 15 | "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/pkg/test/builder" 16 | testutil "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/pkg/test/util" 17 | ) 18 | 19 | func intgTestClusterDisableAVIWithoutAnyADC() { 20 | var ( 21 | ctx *builder.IntegrationTestContext 22 | staticCluster *clusterv1.Cluster 23 | ) 24 | 25 | BeforeEach(func() { 26 | ctx = suite.NewIntegrationTestContext() 27 | staticCluster = testutil.GetDefaultCluster() 28 | }) 29 | 30 | When("there is no ADC and a cluster is created", func() { 31 | BeforeEach(func() { 32 | testutil.CreateObjects(ctx, staticCluster.DeepCopy()) 33 | }) 34 | AfterEach(func() { 35 | testutil.DeleteObjects(ctx, staticCluster.DeepCopy()) 36 | testutil.EnsureRuntimeObjectMatchExpectation(ctx, client.ObjectKey{ 37 | Name: staticCluster.Name, 38 | Namespace: staticCluster.Namespace, 39 | }, &clusterv1.Cluster{}, testutil.NOTFOUND) 40 | }) 41 | It("shouldn't have 'networking.tkg.tanzu.vmware.com/avi'", func() { 42 | testutil.EnsureClusterAviLabelExists(ctx, client.ObjectKey{ 43 | Name: staticCluster.Name, 44 | Namespace: staticCluster.Namespace, 45 | }, akoov1alpha1.AviClusterLabel, false) 46 | }) 47 | }) 48 | } 49 | 50 | func intgTestClusterCanBeSelectedByADC() { 51 | var ( 52 | ctx *builder.IntegrationTestContext 53 | 54 | staticCluster *clusterv1.Cluster 55 | staticAkoDeploymentConfig *akoov1alpha1.AKODeploymentConfig 56 | staticDefaultAkoDeploymentConfig *akoov1alpha1.AKODeploymentConfig 57 | 58 | staticManagementNamespace *v1.Namespace 59 | staticManagementCluster *clusterv1.Cluster 60 | staticManagementAkoDeploymentConfig *akoov1alpha1.AKODeploymentConfig 61 | ) 62 | 63 | BeforeEach(func() { 64 | ctx = suite.NewIntegrationTestContext() 65 | staticCluster = testutil.GetDefaultCluster() 66 | staticAkoDeploymentConfig = testutil.GetCustomizedADC(testutil.CustomizedADCLabels) 67 | staticDefaultAkoDeploymentConfig = testutil.GetDefaultADC() 68 | 69 | staticManagementNamespace = &v1.Namespace{ 70 | ObjectMeta: metav1.ObjectMeta{Name: "tkg-system"}, 71 | } 72 | staticManagementCluster = testutil.GetManagementCluster() 73 | staticManagementAkoDeploymentConfig = testutil.GetManagementADC() 74 | }) 75 | 76 | When("both default and customized ADC exist", func() { 77 | 78 | BeforeEach(func() { 79 | testutil.CreateObjects(ctx, staticAkoDeploymentConfig.DeepCopy()) 80 | testutil.CreateObjects(ctx, staticDefaultAkoDeploymentConfig.DeepCopy()) 81 | testutil.CreateObjects(ctx, staticCluster.DeepCopy()) 82 | }) 83 | 84 | It("labels the cluster dynamically", func() { 85 | By("labels with 'networking.tkg.tanzu.vmware.com/avi: install-ako-for-all'", func() { 86 | testutil.EnsureClusterAviLabelMatchExpectation(ctx, client.ObjectKey{ 87 | Name: staticCluster.Name, 88 | Namespace: staticCluster.Namespace, 89 | }, akoov1alpha1.AviClusterLabel, staticDefaultAkoDeploymentConfig.Name) 90 | }) 91 | 92 | By("add cluster label to use customized adc") 93 | testutil.UpdateObjectLabels(ctx, client.ObjectKey{ 94 | Name: staticCluster.Name, 95 | Namespace: staticCluster.Namespace, 96 | }, testutil.CustomizedADCLabels) 97 | 98 | By("labels with 'networking.tkg.tanzu.vmware.com/avi: ako-deployment-config'", func() { 99 | testutil.EnsureClusterAviLabelMatchExpectation(ctx, client.ObjectKey{ 100 | Name: staticCluster.Name, 101 | Namespace: staticCluster.Namespace, 102 | }, akoov1alpha1.AviClusterLabel, staticAkoDeploymentConfig.Name) 103 | }) 104 | 105 | By("create another customized ako-deployment-config2") 106 | anotherAkoDeploymentConfig := staticAkoDeploymentConfig.DeepCopy() 107 | anotherAkoDeploymentConfig.Name = "ako-deployment-config-2" 108 | testutil.CreateObjects(ctx, anotherAkoDeploymentConfig.DeepCopy()) 109 | 110 | By("cluster should keep its label, even through another custom ADC matches the name. a.k.a it won't override", func() { 111 | Consistently(func() bool { 112 | obj := &clusterv1.Cluster{} 113 | err := ctx.Client.Get(ctx.Context, client.ObjectKey{ 114 | Name: staticCluster.Name, 115 | Namespace: staticCluster.Namespace, 116 | }, obj) 117 | if err != nil { 118 | return false 119 | } 120 | val, ok := obj.Labels[akoov1alpha1.AviClusterLabel] 121 | return ok && val == staticAkoDeploymentConfig.Name 122 | }) 123 | }) 124 | 125 | By("unset cluster label to use default adc") 126 | testutil.UpdateObjectLabels(ctx, client.ObjectKey{ 127 | Name: staticCluster.Name, 128 | Namespace: staticCluster.Namespace, 129 | }, map[string]string{}) 130 | 131 | By("labels with 'networking.tkg.tanzu.vmware.com/avi: install-ako-for-all'", func() { 132 | testutil.EnsureClusterAviLabelMatchExpectation(ctx, client.ObjectKey{ 133 | Name: staticCluster.Name, 134 | Namespace: staticCluster.Namespace, 135 | }, akoov1alpha1.AviClusterLabel, staticDefaultAkoDeploymentConfig.Name) 136 | }) 137 | }) 138 | }) 139 | 140 | When("management ADC exists", func() { 141 | BeforeEach(func() { 142 | testutil.CreateObjects(ctx, staticManagementNamespace.DeepCopy()) 143 | testutil.CreateObjects(ctx, staticManagementAkoDeploymentConfig.DeepCopy()) 144 | testutil.CreateObjects(ctx, staticManagementCluster.DeepCopy()) 145 | }) 146 | 147 | It("labels the management cluster", func() { 148 | By("labels with 'networking.tkg.tanzu.vmware.com/avi: install-ako-for-management-cluster'", func() { 149 | testutil.EnsureClusterAviLabelMatchExpectation(ctx, client.ObjectKey{ 150 | Name: staticManagementCluster.Name, 151 | Namespace: staticManagementCluster.Namespace, 152 | }, akoov1alpha1.AviClusterLabel, staticManagementAkoDeploymentConfig.Name) 153 | }) 154 | }) 155 | }) 156 | } 157 | -------------------------------------------------------------------------------- /controllers/tests/cluster_for_akodeploymentconfig/default_adc/suite_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2022 VMware, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | package default_adc_test 5 | 6 | import ( 7 | "path/filepath" 8 | "testing" 9 | 10 | . "github.com/onsi/ginkgo" 11 | "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/pkg/test/builder" 12 | "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/pkg/test/funcs" 13 | testutil "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/pkg/test/util" 14 | ) 15 | 16 | // suite is used for testing the interactions between the controllers 17 | var suite = builder.NewTestSuiteForController( 18 | funcs.AddAKODeploymentConfigAndClusterControllerToMgrFunc, 19 | funcs.AddAllToSchemeFunc, 20 | filepath.Join(testutil.FindModuleDir("sigs.k8s.io/cluster-api"), "config", "crd", "bases"), 21 | filepath.Join(testutil.FindModuleDir("github.com/vmware/load-balancer-and-ingress-services-for-kubernetes"), "helm", "ako", "crds"), 22 | ) 23 | 24 | var _ = BeforeSuite(suite.BeforeSuite) 25 | 26 | var _ = AfterSuite(suite.AfterSuite) 27 | 28 | func TestController(t *testing.T) { 29 | suite.Register(t, "AKO Operator Controllers", intgTests, unitTests) 30 | } 31 | 32 | func intgTests() { 33 | Describe("Cluster not selected", intgTestClusterDisableAVIWithoutAnyADC) 34 | Describe("Cluster selected by ADC", intgTestClusterCanBeSelectedByADC) 35 | } 36 | 37 | func unitTests() { 38 | } 39 | -------------------------------------------------------------------------------- /controllers/tests/cluster_for_akodeploymentconfig/default_adc_non_empty_selectors/cluster_for_akodeploymentconfig_intg_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2022 VMware, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | package default_adc_non_empty_selectors 5 | 6 | import ( 7 | . "github.com/onsi/ginkgo" 8 | . "github.com/onsi/gomega" 9 | clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" 10 | "sigs.k8s.io/controller-runtime/pkg/client" 11 | 12 | akoov1alpha1 "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/api/v1alpha1" 13 | "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/pkg/test/builder" 14 | testutil "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/pkg/test/util" 15 | ) 16 | 17 | func intgTestCanSelectedByDefaultADCWithNonEmptySelectors() { 18 | var ( 19 | ctx *builder.IntegrationTestContext 20 | 21 | labels map[string]string 22 | staticCluster *clusterv1.Cluster 23 | staticAkoDeploymentConfig *akoov1alpha1.AKODeploymentConfig 24 | staticDefaultAkoDeploymentConfig *akoov1alpha1.AKODeploymentConfig 25 | ) 26 | 27 | BeforeEach(func() { 28 | ctx = suite.NewIntegrationTestContext() 29 | 30 | labels = map[string]string{ 31 | "foo": "bar", 32 | } 33 | staticCluster = testutil.GetDefaultCluster() 34 | staticAkoDeploymentConfig = testutil.GetCustomizedADC(labels) 35 | staticDefaultAkoDeploymentConfig = testutil.GetDefaultADC() 36 | }) 37 | 38 | When("install-ako-for-all has non-empty cluster selector", func() { 39 | BeforeEach(func() { 40 | staticDefaultAkoDeploymentConfig.Spec.ClusterSelector.MatchLabels = labels 41 | testutil.CreateObjects(ctx, staticDefaultAkoDeploymentConfig.DeepCopy()) 42 | }) 43 | 44 | // default adc with non-empty selector -> 45 | // create a cluster selected by default adc -> 46 | // create a new adc also can select cluster -> expect cluster should not change the adc 47 | It("labels the cluster", func() { 48 | By("create a cluster selected by default ADC") 49 | staticCluster.Labels = labels 50 | testutil.CreateObjects(ctx, staticCluster.DeepCopy()) 51 | 52 | By("labels with 'networking.tkg.tanzu.vmware.com/avi: install-ako-for-all'", func() { 53 | testutil.EnsureClusterAviLabelMatchExpectation(ctx, client.ObjectKey{ 54 | Name: staticCluster.Name, 55 | Namespace: staticCluster.Namespace, 56 | }, akoov1alpha1.AviClusterLabel, staticDefaultAkoDeploymentConfig.Name) 57 | }) 58 | 59 | By("create another ADC with same selector") 60 | testutil.CreateObjects(ctx, staticAkoDeploymentConfig.DeepCopy()) 61 | 62 | By("cluster should keep its labels, since the default ADC cannot be override if it has non-empty selector", func() { 63 | Consistently(func() bool { 64 | obj := &clusterv1.Cluster{} 65 | err := ctx.Client.Get(ctx.Context, client.ObjectKey{ 66 | Name: staticCluster.Name, 67 | Namespace: staticCluster.Namespace, 68 | }, obj) 69 | if err != nil { 70 | return false 71 | } 72 | val, ok := obj.Labels[akoov1alpha1.AviClusterLabel] 73 | return ok && val == staticDefaultAkoDeploymentConfig.Name 74 | }) 75 | }) 76 | }) 77 | }) 78 | } 79 | -------------------------------------------------------------------------------- /controllers/tests/cluster_for_akodeploymentconfig/default_adc_non_empty_selectors/suite_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2022 VMware, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | package default_adc_non_empty_selectors 5 | 6 | import ( 7 | "path/filepath" 8 | "testing" 9 | 10 | . "github.com/onsi/ginkgo" 11 | "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/pkg/test/builder" 12 | "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/pkg/test/funcs" 13 | testutil "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/pkg/test/util" 14 | ) 15 | 16 | // suite is used for testing the interactions between the controllers 17 | var suite = builder.NewTestSuiteForController( 18 | funcs.AddAKODeploymentConfigAndClusterControllerToMgrFunc, 19 | funcs.AddAllToSchemeFunc, 20 | filepath.Join(testutil.FindModuleDir("sigs.k8s.io/cluster-api"), "config", "crd", "bases"), 21 | filepath.Join(testutil.FindModuleDir("github.com/vmware/load-balancer-and-ingress-services-for-kubernetes"), "helm", "ako", "crds"), 22 | ) 23 | 24 | var _ = BeforeSuite(suite.BeforeSuite) 25 | 26 | var _ = AfterSuite(suite.AfterSuite) 27 | 28 | func TestControllerWithNonEmptyDefaultADC(t *testing.T) { 29 | suite.Register(t, "AKO Operator Controllers with non-empty selector install-ako-for-all", intgTests, unitTests) 30 | } 31 | 32 | func intgTests() { 33 | Describe("Cluster selected by default ADC with non-empty selectors", intgTestCanSelectedByDefaultADCWithNonEmptySelectors) 34 | } 35 | 36 | func unitTests() { 37 | } 38 | -------------------------------------------------------------------------------- /e2e/README.md: -------------------------------------------------------------------------------- 1 | # How to run e2e test agains a testbed 2 | 3 | 1. Get a testbed from pipeline 4 | 1. After a successful build, download the artifacts 5 | 1. Unzip the package 6 | 7 | ```bash 8 | # You should see something similar 9 | ➜ ls 10 | archive archive.zip 11 | 12 | ➜ cd archive 13 | ➜ ls 14 | akodeploymentconfig.yaml tkg-cluster-mc-113_config tkgversion.txt log 15 | 16 | ➜ export ARCHIVE_PATH=$PWD 17 | ``` 18 | 19 | 1. Copy the following files over 20 | 21 | ```bash 22 | # Copy the management cluster kubeconfig 23 | # Assume: 24 | # 1. ARCHIVE_PATH environmental variable saves the absolute path to the 25 | # unzipped artifacts; 26 | # 2. AKO_OPERATOR_PATH environmental variable saves the absolute path to the 27 | # ako operator repo; 28 | 29 | cp ${ARCHIVE_PATH}/tkg-cluster-mc-[this matches your build number]_config ${AKO_OPERATOR_PATH}/e2e/static/mc.kubeconfig 30 | cp ${ARCHIVE_PATH}/.tkg/config.yaml ${AKO_OPERATOR_PATH}/e2e/static/tkg-config.yaml 31 | cp ${ARCHIVE_PATH}/akodeploymentconfig.yaml ${AKO_OPERATOR_PATH}/e2e/static/akodeploymentconfig.yaml 32 | ``` 33 | 34 | 1. Update the tkg.regions.file in tkg config file 35 | 36 | ```bash 37 | # 1. open ${AKO_OPERATOR_PATH}/e2e/static/tkg-config.yaml 38 | # 2. find the `tkg` section similar to the following 39 | tkg: 40 | regions: 41 | - name: tkg-cluster-mc-113 42 | context: tkg-cluster-mc-113-admin@tkg-cluster-mc-113 43 | file: [here!!!] 44 | status: Success 45 | isCurrentContext: false 46 | current-region-context: tkg-cluster-mc-113-admin@tkg-cluster-mc-113 47 | 48 | # 3. change the value of file field to the absolute path, i.e: ${AKO_OPERATOR_PATH}/e2e/static/mc.kubeconfig 49 | ``` 50 | 51 | 1. open ${AKO_OPERATOR_PATH}/e2e/env.json 52 | 1. update env.mc-kubeconfig.context to the mc's context. Usually it's in the 53 | form of "tkg-cluster-mc-113-admin@tkg-cluster-mc-113" 54 | 1. update env.worker to the testbed's worker ip, which can be found in 55 | ${ARCHIVE_PATH}/vc.txt. It's the IP used in STATIC_IP_SERVICE_ENDPOINT 56 | 57 | 1. run e2e test 58 | 59 | ```bash 60 | hack/run-e2e.sh 61 | ``` 62 | -------------------------------------------------------------------------------- /e2e/doc.go: -------------------------------------------------------------------------------- 1 | // Copyright 2021 VMware, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | package e2e 5 | -------------------------------------------------------------------------------- /e2e/e2e_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2021 VMware, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | package e2e 5 | 6 | import ( 7 | "fmt" 8 | 9 | . "github.com/onsi/ginkgo" 10 | 11 | testenv "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/e2e/pkg/env" 12 | ) 13 | 14 | var _ = Describe("AKODeploymentConfig with selector", func() { 15 | var ( 16 | testName = "AKODeploymentConfig with selector" 17 | testcase *testenv.E2ETestCase 18 | skip bool 19 | ) 20 | 21 | BeforeEach(func() { 22 | By("checking if test should be skipped") 23 | skip, testcase = testenv.LoadTestCase(testName) 24 | if skip { 25 | Skip(fmt.Sprintf("Skip test case :[%s]", testName)) 26 | } else { 27 | GinkgoT().Logf("Running test case: [%s]", testName) 28 | } 29 | By("ensuring AKODeploymentConfig is applied") 30 | testcase.EnsureYamlsApplied([]string{ 31 | testcase.AKODeploymentConfig.Path, 32 | }) 33 | By("running sanity checks") 34 | testcase.SanityCheck() 35 | By("creating test case specific namespace") 36 | testcase.Init() 37 | }) 38 | 39 | AfterEach(func() { 40 | if skip { 41 | Skip(fmt.Sprintf("Skip cleaning up test case :[%s]", testName)) 42 | } else { 43 | GinkgoT().Logf("Cleaning up test case: [%s]", testName) 44 | } 45 | By("ensuring AKODeploymentConfig is deleted") 46 | testcase.EnsureYamlsRemoved([]string{ 47 | testcase.AKODeploymentConfig.Path, 48 | }) 49 | By("tearing down test case specific namespace") 50 | testcase.Teardown() 51 | }) 52 | 53 | When("one cluster is newly created without the specified label", func() { 54 | var ( 55 | clusterName string 56 | ) 57 | 58 | BeforeEach(func() { 59 | By("creating a TKG Workload Cluster") 60 | clusterName = testenv.GenerateRandomName() 61 | testcase.EnsureClusterCreated(clusterName) 62 | }) 63 | 64 | AfterEach(func() { 65 | By("deleting a TKG Workload Cluster") 66 | testcase.EnsureClusterDeleted(clusterName) 67 | By("ensuring CRS and Avi User in Management Cluster is gone") 68 | testcase.EnsureCRSandAviUserDeleted(clusterName) 69 | By("ensuring Avi Resources is gone") 70 | testcase.EnsureAviResourcesDeleted(clusterName) 71 | }) 72 | 73 | When("it's later applied with the specified label", func() { 74 | BeforeEach(func() { 75 | By("ensuring cluster has the correct labels applied") 76 | testcase.EnsureClusterLabelApplied( 77 | clusterName, testenv.AKODeploymentConfigLabelsGetter(testcase), 78 | ) 79 | }) 80 | It("should be managed by AKO Operator", func() { 81 | By("ensuring AKO is successfully installed in the workload cluster") 82 | testenv.EnsureAKO(testcase, clusterName) 83 | By("ensuring Load Balancer Type SVC works as expected") 84 | testenv.EnsureLoadBalancerService(testcase, clusterName) 85 | }) 86 | }) 87 | }) 88 | }) 89 | -------------------------------------------------------------------------------- /e2e/env.json: -------------------------------------------------------------------------------- 1 | { 2 | "env": { 3 | "tkg-config": "./static/tkg-config.yaml", 4 | "mc-kubeconfig": { 5 | "path": "./static/mc.kubeconfig", 6 | "context": "[replace with your testbed management cluster context]" 7 | }, 8 | "worker": "[replace with your testbed static ip service endpoint]" 9 | }, 10 | "tests": [ 11 | { 12 | "name": "AKODeploymentConfig with selector", 13 | "akoDeploymentConfig": { 14 | "path": "./static/akodeploymentconfig.yaml", 15 | "name": "ako-deployment-config" 16 | }, 17 | "yamls": [ 18 | { 19 | "path": "./static/loadbalancer-service.yaml" 20 | } 21 | ] 22 | } 23 | ] 24 | } 25 | -------------------------------------------------------------------------------- /e2e/pkg/env/assertions.go: -------------------------------------------------------------------------------- 1 | // Copyright 2021 VMware, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | package env 5 | 6 | import ( 7 | "fmt" 8 | 9 | . "github.com/onsi/ginkgo" 10 | . "github.com/onsi/gomega" 11 | "github.com/onsi/gomega/gexec" 12 | ) 13 | 14 | func ShowThePlan() { 15 | GinkgoT().Logf("\nRunning AVI End To End Tests\n") 16 | } 17 | 18 | func (o *E2ETestCase) SanityCheck() { 19 | By("Ensure AKO Operator running") 20 | EnsurePodRunning(o.Clients.Kubectl, "ako-operator-controller-manager", 1, "tkg-system-networking") 21 | } 22 | 23 | func (o *E2ETestCase) EnsureYamlsApplied(yamlPaths []string) { 24 | for _, path := range yamlPaths { 25 | Eventually(o.Clients.Kubectl.RunWithoutNamespace("apply", "-f", path), "5s").Should(gexec.Exit()) 26 | } 27 | } 28 | 29 | func (o *E2ETestCase) EnsureYamlsRemoved(yamlPaths []string) { 30 | for _, path := range yamlPaths { 31 | Eventually(o.Clients.Kubectl.RunWithoutNamespace("delete", "-f", path), "5s").Should(gexec.Exit()) 32 | } 33 | } 34 | 35 | func (o *E2ETestCase) EnsureClusterCreated(name string) { 36 | if !ClusterExists(o.Clients.TKGCli, name) { 37 | By("Allocating an VIP") 38 | vip, err := AllocVIP(o.Clients.VIP) 39 | Expect(err).NotTo(HaveOccurred()) 40 | By(fmt.Sprintf("creating the cluster with VIP: %s", vip)) 41 | CreateCluster(o.Clients.TKGCli, name, vip) 42 | GetClusterCredential(o.Clients.TKGCli, name) 43 | } 44 | By("ensuring the cluster is running") 45 | EnsureClusterStatus(o.Clients.TKGCli, name, "running") 46 | } 47 | 48 | func (o *E2ETestCase) EnsureClusterDeleted(name string) { 49 | runner := o.Clients.TKGCli 50 | if ClusterExists(runner, name) { 51 | By("Deleting the cluster") 52 | DeleteCluster(runner, name) 53 | By("ensuring the cluster is gone") 54 | EnsureClusterGone(runner, name) 55 | } 56 | } 57 | 58 | func (o *E2ETestCase) EnsureClusterLabelApplied(clusterName string, labelGetter labelGetter) { 59 | labels := labelGetter() 60 | for k, v := range labels { 61 | By(fmt.Sprintf("Applying labels k:%s, v:%s to cluster %s", k, v, clusterName)) 62 | ApplyLabelOnCluster(o.Clients.Kubectl, clusterName, k, v) 63 | } 64 | EnsureClusterHasLabels(o.Clients.Kubectl, clusterName, labels) 65 | } 66 | 67 | func EnsureAKO(testcase *E2ETestCase, clusterName string) { 68 | wcRunner := NewKubectlRunner( 69 | // by setting kubeConfigPath to empty, use $HOME/.kube/config by default 70 | "", 71 | fmt.Sprintf("%s-admin@%s", clusterName, clusterName), 72 | testcase.Clients.Kubectl.Namespace) 73 | EnsurePodRunningWithTimeout(wcRunner, "ako-0", 1, "avi-system", "180s") 74 | } 75 | 76 | func EnsureLoadBalancerService(testcase *E2ETestCase, clusterName string) { 77 | var paths []string 78 | for _, p := range testcase.YAMLs { 79 | paths = append(paths, p.Path) 80 | } 81 | wcRunner := NewKubectlRunner( 82 | // by setting kubeConfigPath to empty, use $HOME/.kube/config by default 83 | "", 84 | fmt.Sprintf("%s-admin@%s", clusterName, clusterName), 85 | testcase.Clients.Kubectl.Namespace) 86 | EnsureYamlsApplied(wcRunner, paths) 87 | EnsureLoadBalancerTypeServiceAccessible(wcRunner, 1) 88 | } 89 | 90 | func (o *E2ETestCase) EnsureCRSandAviUserDeleted(clusterName string) { 91 | // Check crs 92 | EnsureObjectGone(o.Clients.Kubectl, "secret", clusterName+"-ako") 93 | EnsureObjectGone(o.Clients.Kubectl, "ClusterResourceSet", clusterName+"-ako") 94 | // Check Avi user 95 | EnsureObjectGone(o.Clients.Kubectl, "secret", clusterName+"-avi-credentials") 96 | } 97 | 98 | func (o *E2ETestCase) EnsureAviResourcesDeleted(clusterName string) { 99 | o.Clients.Avi = NewAviRunner(o.Clients.Kubectl) 100 | // Avi Resources are regarded as deleted if VirtualService and Pool are deleted 101 | EnsureAviObjectDeleted(o.Clients.Avi, clusterName, "virtualservice") 102 | EnsureAviObjectDeleted(o.Clients.Avi, clusterName, "pool") 103 | } 104 | -------------------------------------------------------------------------------- /e2e/pkg/env/avi.go: -------------------------------------------------------------------------------- 1 | // Copyright 2021 VMware, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | package env 5 | 6 | import ( 7 | "strings" 8 | 9 | . "github.com/onsi/ginkgo" 10 | . "github.com/onsi/gomega" 11 | "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/pkg/aviclient" 12 | ) 13 | 14 | func NewAviRunner(runner *KubectlRunner) aviclient.Client { 15 | 16 | aviClient, _ := aviclient.NewAviClient(&aviclient.AviClientConfig{ 17 | ServerIP: GetAviObject(runner, "akodeploymentconfig", "ako-deployment-config", "spec", "controller"), 18 | Username: GetAviObject(runner, "secret", "controller-credentials", "data", "username"), 19 | Password: GetAviObject(runner, "secret", "controller-credentials", "data", "password"), 20 | CA: GetAviObject(runner, "secret", "controller-ca", "data", "certificateAuthorityData"), 21 | }, "") 22 | 23 | return aviClient 24 | } 25 | 26 | func EnsureAviObjectDeleted(aviClient aviclient.Client, clusterName string, obj string) { 27 | Eventually(func() bool { 28 | var err error 29 | 30 | switch obj { 31 | case "virtualservice": 32 | _, err = aviClient.VirtualServiceGetByName(clusterName + "--default-static-ip") 33 | case "pool": 34 | _, err = aviClient.PoolGetByName(clusterName + "--default-static-ip--80") 35 | default: 36 | GinkgoT().Logf("EnsureAviObjectDeleted function doesn't support checking " + obj) 37 | return false 38 | } 39 | 40 | if err != nil { 41 | if strings.Contains(err.Error(), "No object of type "+obj) { 42 | GinkgoT().Logf("No object of type " + obj + " with name " + clusterName + " is found") 43 | return true 44 | } 45 | GinkgoT().Logf("Avi Client query error:" + err.Error()) 46 | return false 47 | } 48 | GinkgoT().Logf(obj + " with name " + clusterName + " is found unexpectedly, return false") 49 | return false 50 | }, "30s", "5s").Should(BeTrue()) 51 | } 52 | -------------------------------------------------------------------------------- /e2e/pkg/env/env.go: -------------------------------------------------------------------------------- 1 | // Copyright 2021 VMware, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | package env 5 | 6 | import ( 7 | "encoding/json" 8 | "errors" 9 | "math/rand" 10 | "time" 11 | 12 | "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/pkg/aviclient" 13 | ) 14 | 15 | var testEnv TestEnvSpec 16 | 17 | func LoadTestEnv(path string) error { 18 | if NotExist(path) { 19 | return errors.New("path doesn't exist") 20 | } 21 | data, err := ReadFromFile(path) 22 | if err != nil { 23 | return err 24 | } 25 | err = json.Unmarshal(data, &testEnv) 26 | if err != nil { 27 | return err 28 | } 29 | return nil 30 | } 31 | 32 | // TestEnvSpec is the specification for an e2e tests which includes: 33 | // 1. the infra information where these tests will run; 34 | // 2. an array of test cases that'll be run and the test specific settings; 35 | type TestEnvSpec struct { 36 | Env Env 37 | Tests []TestCaseSpec 38 | } 39 | 40 | type Env struct { 41 | TKGConfig string `json:"tkg-config"` 42 | ManagementClusterKubeconfig Kubecontext `json:"mc-kubeconfig"` 43 | Worker string `json:"worker"` 44 | } 45 | 46 | type Kubecontext struct { 47 | Path string `json:"path"` 48 | Context string `json:"context"` 49 | } 50 | 51 | type TestCaseSpec struct { 52 | Name string `json:"name"` 53 | AKODeploymentConfig YamlTarget `json:"akoDeploymentConfig"` 54 | YAMLs []YamlTarget `json:"yamls"` 55 | } 56 | 57 | type YamlTarget struct { 58 | Path string `json:"path"` 59 | Name string `json:"name,omitempty"` 60 | } 61 | 62 | // E2ETestCase runs tests case in one separate namespace 63 | type E2ETestCase struct { 64 | Clients Clients 65 | AKODeploymentConfig YamlTarget 66 | YAMLs []YamlTarget 67 | } 68 | 69 | type Clients struct { 70 | Kubectl *KubectlRunner 71 | TKGCli *TKGRunner 72 | VIP *VIPRunner 73 | Avi aviclient.Client 74 | } 75 | 76 | // Init initializes the namespace 77 | func (o *E2ETestCase) Init() { 78 | CreateNamespace(o.Clients.Kubectl) 79 | } 80 | 81 | // Teardown deletes the namespace 82 | func (o *E2ETestCase) Teardown() { 83 | DeleteNamespace(o.Clients.Kubectl) 84 | } 85 | 86 | type labelGetter func() map[string]string 87 | 88 | // LoadTestTest checks if the testcase is registered to run. 89 | // It takes one parameter: 90 | // 91 | // string: name of the testcase 92 | // 93 | // It returns two values: 94 | // 95 | // bool: if it's true, then the test case is not registered and should be skipped 96 | // *E2ETestCase: an encapsulation of a Test Case's env 97 | func LoadTestCase(name string) (bool, *E2ETestCase) { 98 | namespace := "akoo-e2e-" + GenerateRandomName() 99 | res := &E2ETestCase{ 100 | Clients: Clients{ 101 | Kubectl: NewKubectlRunner(testEnv.Env.ManagementClusterKubeconfig.Path, testEnv.Env.ManagementClusterKubeconfig.Context, namespace), 102 | TKGCli: NewTKGRunner(testEnv.Env.TKGConfig, namespace), 103 | VIP: NewVIPRunner(testEnv.Env.Worker), 104 | Avi: nil, 105 | }, 106 | } 107 | for _, test := range testEnv.Tests { 108 | if test.Name == name { 109 | res.AKODeploymentConfig = test.AKODeploymentConfig 110 | res.YAMLs = test.YAMLs 111 | return false, res 112 | } 113 | } 114 | return true, res 115 | } 116 | 117 | func GenerateRandomName() string { 118 | rand.Seed(time.Now().UnixNano()) 119 | letterRunes := []rune("abcdefghijklmnopqrstuvwxyz") 120 | b := make([]rune, 10) 121 | for i := range b { 122 | b[i] = letterRunes[rand.Intn(len(letterRunes))] 123 | } 124 | return string(b) 125 | } 126 | -------------------------------------------------------------------------------- /e2e/pkg/env/io.go: -------------------------------------------------------------------------------- 1 | // Copyright 2021 VMware, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | package env 5 | 6 | import ( 7 | "io/ioutil" 8 | "os" 9 | ) 10 | 11 | func NotExist(path string) bool { 12 | _, err := os.Stat(path) 13 | return os.IsNotExist(err) 14 | } 15 | 16 | func ReadFromFile(file string) ([]byte, error) { 17 | data, err := ioutil.ReadFile(file) 18 | return data, err 19 | } 20 | -------------------------------------------------------------------------------- /e2e/pkg/env/suite_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2021 VMware, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | package env 5 | 6 | import ( 7 | "testing" 8 | 9 | . "github.com/onsi/ginkgo" 10 | . "github.com/onsi/gomega" 11 | ) 12 | 13 | func TestHandlers(t *testing.T) { 14 | RegisterFailHandler(Fail) 15 | RunSpecs(t, "AKO Suite") 16 | } 17 | -------------------------------------------------------------------------------- /e2e/pkg/env/tkg.go: -------------------------------------------------------------------------------- 1 | // Copyright 2021 VMware, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | package env 5 | 6 | import ( 7 | "os/exec" 8 | 9 | "github.com/bitly/go-simplejson" 10 | . "github.com/onsi/ginkgo" 11 | . "github.com/onsi/gomega" 12 | "github.com/onsi/gomega/gexec" 13 | ) 14 | 15 | type TKGRunner struct { 16 | ConfigPath string 17 | Namespace string 18 | } 19 | 20 | func NewTKGRunner(configPath, namespace string) *TKGRunner { 21 | return &TKGRunner{ 22 | ConfigPath: configPath, 23 | Namespace: namespace, 24 | } 25 | } 26 | 27 | func (runner *TKGRunner) RunWithArgs(args ...string) *gexec.Session { 28 | command := exec.Command("tkg", args...) 29 | session, err := gexec.Start(command, GinkgoWriter, GinkgoWriter) 30 | Expect(err).NotTo(HaveOccurred()) 31 | GinkgoT().Log("running tkg cli with args", args) 32 | return session 33 | } 34 | 35 | func (runner *TKGRunner) RunWithNamespace(namespace string, args ...string) *gexec.Session { 36 | newArgs := append([]string{"--config", runner.ConfigPath, "--namespace", namespace}, args...) 37 | return runner.RunWithArgs(newArgs...) 38 | } 39 | 40 | func (runner *TKGRunner) Run(args ...string) *gexec.Session { 41 | newArgs := append([]string{"--config", runner.ConfigPath}, args...) 42 | newArgs = append(newArgs, []string{"--namespace", runner.Namespace}...) 43 | return runner.RunWithArgs(newArgs...) 44 | } 45 | 46 | func CreateCluster(r *TKGRunner, name string, vip string) { 47 | Eventually(r.Run("create", "cluster", name, "--plan", "dev", "--controlplane-machine-count", "1", "--vsphere-controlplane-endpoint-ip", vip), "900s").Should(gexec.Exit()) 48 | } 49 | 50 | func GetClusterCredential(r *TKGRunner, name string) { 51 | Eventually(r.Run("get", "credentials", name), "30s").Should(gexec.Exit()) 52 | } 53 | 54 | func DeleteCluster(r *TKGRunner, name string) { 55 | Eventually(r.Run("delete", "cluster", name, "-y"), "30s").Should(gexec.Exit()) 56 | } 57 | 58 | func ClusterExists(r *TKGRunner, name string) bool { 59 | s1 := r.Run("get", "cluster", name, "-o", "json") 60 | Eventually(s1, "10s").Should(gexec.Exit(0)) 61 | s, err := getClusterStatusFromJson(s1.Out.Contents(), name) 62 | Expect(err).ToNot(HaveOccurred()) 63 | return s != "" 64 | } 65 | 66 | func EnsureClusterGone(r *TKGRunner, name string) { 67 | EnsureClusterStatusWithTimeout(r, name, "", "300s") 68 | } 69 | 70 | func EnsureClusterStatus(r *TKGRunner, name, status string) { 71 | EnsureClusterStatusWithTimeout(r, name, status, "30s") 72 | } 73 | 74 | func EnsureClusterStatusWithTimeout(r *TKGRunner, name, status, timeout string) { 75 | Eventually(func() bool { 76 | s1 := r.Run("get", "cluster", name, "-o", "json") 77 | Eventually(s1, "10s").Should(gexec.Exit(0)) 78 | s, err := getClusterStatusFromJson(s1.Out.Contents(), name) 79 | Expect(err).ToNot(HaveOccurred()) 80 | return s == status 81 | }, timeout, "5s").Should(BeTrue()) 82 | } 83 | 84 | func getClusterStatusFromJson(data []byte, name string) (string, error) { 85 | var res string 86 | j, err := simplejson.NewJson(data) 87 | if err != nil { 88 | return res, err 89 | } 90 | clusters, err := j.Array() 91 | if err != nil { 92 | return res, err 93 | } 94 | for _, item := range clusters { 95 | if itemj, ok := item.(map[string]interface{}); ok { 96 | if names, ok := itemj["name"].(string); ok { 97 | if names == name { 98 | if status, ok := itemj["status"].(string); ok { 99 | return status, nil 100 | } 101 | } 102 | } 103 | } 104 | } 105 | return res, nil 106 | } 107 | -------------------------------------------------------------------------------- /e2e/pkg/env/vip.go: -------------------------------------------------------------------------------- 1 | // Copyright 2021 VMware, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | package env 5 | 6 | import ( 7 | "encoding/json" 8 | "fmt" 9 | "io/ioutil" 10 | "net/http" 11 | ) 12 | 13 | type VIPRunner struct { 14 | WorkerEndpoint string 15 | } 16 | 17 | func NewVIPRunner(workerEndpoint string) *VIPRunner { 18 | return &VIPRunner{ 19 | WorkerEndpoint: workerEndpoint, 20 | } 21 | } 22 | 23 | type nsipResp struct { 24 | IP string `json:"ip"` 25 | Netmask string `json:"netmask"` 26 | Gateway string `json:"gateway"` 27 | } 28 | 29 | func AllocVIP(runner *VIPRunner) (string, error) { 30 | resp, err := http.Get(fmt.Sprintf("http://%s:4827/nsips", runner.WorkerEndpoint)) 31 | if err != nil { 32 | return "", err 33 | } 34 | defer resp.Body.Close() 35 | body, err := ioutil.ReadAll(resp.Body) 36 | if err != nil { 37 | return "", err 38 | } 39 | nresp := &nsipResp{} 40 | err = json.Unmarshal(body, nresp) 41 | if err != nil { 42 | return "", err 43 | } 44 | return nresp.IP, nil 45 | 46 | } 47 | -------------------------------------------------------------------------------- /e2e/static/loadbalancer-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: static-ip 5 | spec: 6 | selector: 7 | app: static-ip 8 | ports: 9 | - protocol: TCP 10 | port: 80 11 | targetPort: 80 12 | type: LoadBalancer 13 | --- 14 | apiVersion: apps/v1 15 | kind: Deployment 16 | metadata: 17 | name: static-ip 18 | spec: 19 | replicas: 2 20 | selector: 21 | matchLabels: 22 | app: static-ip 23 | template: 24 | metadata: 25 | labels: 26 | app: static-ip 27 | spec: 28 | serviceAccountName: default 29 | containers: 30 | - name: nginx 31 | image: gcr.io/kubernetes-development-244305/nginx:latest 32 | -------------------------------------------------------------------------------- /e2e/suite_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2021 VMware, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | package e2e 5 | 6 | import ( 7 | "log" 8 | "os" 9 | "testing" 10 | 11 | . "github.com/onsi/ginkgo" 12 | . "github.com/onsi/gomega" 13 | "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/e2e/pkg/env" 14 | ) 15 | 16 | func TestHandlers(t *testing.T) { 17 | RegisterFailHandler(Fail) 18 | RunSpecs(t, "AKO Operator e2e Suite") 19 | } 20 | 21 | func init() { 22 | p, exist := os.LookupEnv("E2E_ENV_SPEC") 23 | if !exist { 24 | GinkgoT().Logf("Skip, cannot get e2e test environment spec, is E2E_ENV_SPEC set and pointing to the right path?\n") 25 | return 26 | } 27 | register(p) 28 | } 29 | 30 | func register(p string) { 31 | var _ = BeforeSuite(func() { 32 | GinkgoT().Logf("Starting settting up environment\n") 33 | 34 | if err := env.LoadTestEnv(p); err != nil { 35 | log.Fatalf("Cannot load e2e test environment spec, %s", err.Error()) 36 | } 37 | 38 | GinkgoT().Logf("\n\nStarting tests defined in %s\n\n", p) 39 | 40 | env.ShowThePlan() 41 | }) 42 | } 43 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/utterkeyboar/load-balancer-operator-for-kubernetes 2 | 3 | go 1.23.1 4 | 5 | require ( 6 | github.com/bitly/go-simplejson v0.5.1 7 | github.com/go-logr/logr v1.4.2 8 | github.com/mitchellh/go-homedir v1.1.0 9 | github.com/onsi/ginkgo v1.16.5 10 | github.com/onsi/gomega v1.36.2 11 | github.com/pkg/errors v0.9.1 12 | github.com/satori/go.uuid v1.2.0 13 | github.com/spf13/pflag v1.0.6 14 | github.com/vmware-tanzu/tanzu-framework/apis/run v0.0.0-20221104044415-a462bbe793b9 15 | github.com/vmware/alb-sdk v0.0.0-20240502042605-947bfcf176dd 16 | github.com/vmware/load-balancer-and-ingress-services-for-kubernetes v0.0.0-20231012053946-537d99c1eba2 17 | golang.org/x/mod v0.23.0 18 | gopkg.in/yaml.v3 v3.0.1 19 | k8s.io/api v0.29.3 20 | k8s.io/apiextensions-apiserver v0.29.3 21 | k8s.io/apimachinery v0.29.3 22 | k8s.io/client-go v0.29.3 23 | k8s.io/component-base v0.29.3 24 | k8s.io/klog/v2 v2.110.1 25 | k8s.io/utils v0.0.0-20231127182322-b307cd553661 26 | sigs.k8s.io/cluster-api v1.7.3 27 | sigs.k8s.io/controller-runtime v0.17.3 28 | ) 29 | 30 | require ( 31 | github.com/beorn7/perks v1.0.1 // indirect 32 | github.com/blang/semver/v4 v4.0.0 // indirect 33 | github.com/cespare/xxhash/v2 v2.2.0 // indirect 34 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect 35 | github.com/emicklei/go-restful/v3 v3.11.0 // indirect 36 | github.com/evanphx/json-patch v5.7.0+incompatible // indirect 37 | github.com/evanphx/json-patch/v5 v5.9.0 // indirect 38 | github.com/fsnotify/fsnotify v1.7.0 // indirect 39 | github.com/go-logr/zapr v1.3.0 // indirect 40 | github.com/go-openapi/jsonpointer v0.19.6 // indirect 41 | github.com/go-openapi/jsonreference v0.20.2 // indirect 42 | github.com/go-openapi/swag v0.22.3 // indirect 43 | github.com/gobuffalo/flect v1.0.2 // indirect 44 | github.com/gogo/protobuf v1.3.2 // indirect 45 | github.com/golang/glog v1.2.4 // indirect 46 | github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect 47 | github.com/golang/protobuf v1.5.4 // indirect 48 | github.com/google/gnostic-models v0.6.8 // indirect 49 | github.com/google/go-cmp v0.6.0 // indirect 50 | github.com/google/gofuzz v1.2.0 // indirect 51 | github.com/google/uuid v1.4.0 // indirect 52 | github.com/imdario/mergo v0.3.13 // indirect 53 | github.com/inconshreveable/mousetrap v1.1.0 // indirect 54 | github.com/josharian/intern v1.0.0 // indirect 55 | github.com/json-iterator/go v1.1.12 // indirect 56 | github.com/mailru/easyjson v0.7.7 // indirect 57 | github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect 58 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 59 | github.com/modern-go/reflect2 v1.0.2 // indirect 60 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 61 | github.com/nxadm/tail v1.4.8 // indirect 62 | github.com/prometheus/client_golang v1.18.0 // indirect 63 | github.com/prometheus/client_model v0.5.0 // indirect 64 | github.com/prometheus/common v0.45.0 // indirect 65 | github.com/prometheus/procfs v0.12.0 // indirect 66 | github.com/spf13/cobra v1.8.0 // indirect 67 | go.uber.org/multierr v1.11.0 // indirect 68 | go.uber.org/zap v1.27.0 // indirect 69 | golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect 70 | golang.org/x/net v0.36.0 // indirect 71 | golang.org/x/oauth2 v0.18.0 // indirect 72 | golang.org/x/sys v0.30.0 // indirect 73 | golang.org/x/term v0.29.0 // indirect 74 | golang.org/x/text v0.22.0 // indirect 75 | golang.org/x/time v0.5.0 // indirect 76 | gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect 77 | google.golang.org/appengine v1.6.7 // indirect 78 | google.golang.org/protobuf v1.36.1 // indirect 79 | gopkg.in/inf.v0 v0.9.1 // indirect 80 | gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect 81 | gopkg.in/yaml.v2 v2.4.0 // indirect 82 | k8s.io/cluster-bootstrap v0.29.3 // indirect 83 | k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect 84 | sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect 85 | sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect 86 | sigs.k8s.io/yaml v1.4.0 // indirect 87 | ) 88 | -------------------------------------------------------------------------------- /hack/VMware-copyright: -------------------------------------------------------------------------------- 1 | // Copyright 2021 VMware, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | -------------------------------------------------------------------------------- /hack/boilerplate.go.txt: -------------------------------------------------------------------------------- 1 | // Copyright 2020 VMware, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | -------------------------------------------------------------------------------- /hack/containerd/config.toml: -------------------------------------------------------------------------------- 1 | # explicitly use v2 config format 2 | version = 2 3 | 4 | # set default runtime handler to v2, which has a per-pod shim 5 | [plugins."io.containerd.grpc.v1.cri".containerd] 6 | default_runtime_name = "runc" 7 | [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] 8 | runtime_type = "io.containerd.runc.v2" 9 | [plugins."io.containerd.grpc.v1.cri".registry] 10 | [plugins."io.containerd.grpc.v1.cri".registry.mirrors] 11 | [plugins."io.containerd.grpc.v1.cri".registry.mirrors."10.79.172.11:5000"] 12 | endpoint = ["http://10.79.172.11:5000"] 13 | 14 | # Setup a runtime with the magic name ("test-handler") used for Kubernetes 15 | # runtime class tests ... 16 | [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.test-handler] 17 | runtime_type = "io.containerd.runc.v2" 18 | -------------------------------------------------------------------------------- /hack/header-check.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright 2020 VMware, Inc. 3 | # SPDX-License-Identifier: Apache-2.0 4 | 5 | # Simple script that will check files of type .go, .sh, .bash, or Makefile 6 | # for the copyright header. 7 | # 8 | # This will be called by the CI system (with no args) to perform checking and 9 | # fail the job if headers are not correctly set. It can also be called with the 10 | # 'fix' argument to automatically add headers to the missing files. 11 | # 12 | # Check if headers are fine: 13 | # $ ./hack/header-check.sh 14 | # Check and fix headers: 15 | # All changes must be committed for fix to work 16 | # $ ./hack/header-check.sh fix 17 | 18 | set -e -o pipefail 19 | 20 | # These header variables MUST match the first two lines of the 21 | # VMware-copyright file in the scripts directory. 22 | # 23 | # These will be evaluated as a regex against the target file 24 | HEADER[1]="^\/\/ Copyright [0-9]{4}(-[0-9]{4})? VMware, Inc\.$" 25 | HEADER[2]="^\/\/ SPDX-License-Identifier: Apache-2.0$" 26 | 27 | # Initialize vars 28 | ERR=false 29 | FAIL=false 30 | 31 | all-files() { 32 | git ls-files |\ 33 | # Check .go files, Makefile, sh files, bash files, and robot files 34 | grep -e "\.go$" -e "Makefile$" -e "\.sh$" -e "\.bash$" -e "\.robot$" |\ 35 | # Ignore vendor/ 36 | grep -v vendor/ 37 | } 38 | 39 | for file in $(all-files); do 40 | echo -n "Header check: $file... " 41 | 42 | # get the file extension / type 43 | ext=${file##*.} 44 | 45 | # increment line count in certain cases 46 | increment=0 47 | 48 | # should we be incrementing the line count 49 | if [[ $ext == "sh" ]]; then 50 | increment=1 51 | fi 52 | 53 | if [[ "${file#*.}" == "deepcopy.go" ]]; then 54 | increment=2 55 | fi 56 | 57 | for count in $(seq 1 ${#HEADER[@]}); do 58 | if [[ $ext != "go" ]]; then 59 | # if not go code assuming # will suffice 60 | text="${HEADER[$count]/'\/\/'/#}" 61 | else 62 | text=${HEADER[$count]} 63 | fi 64 | 65 | line=$((count + increment )) 66 | # do we have a header match? 67 | if [[ ! $(sed "${line}"q\;d "${file}") =~ ${text} ]]; then 68 | ERR=true 69 | fi 70 | done 71 | 72 | if [ $ERR == true ]; then 73 | # is there is a fix argument and are all changes committed 74 | if [[ $# -gt 0 && $1 =~ [[:upper:]fix] ]]; then 75 | # based on file type fix the copyright 76 | case "$ext" in 77 | go) 78 | cat "$(dirname "$0")"/boilerplate.go.txt "${file}" > "${file}".new 79 | ;; 80 | sh) 81 | head -1 "${file}" > "${file}".new 82 | sed 's/\/\//\#/1' < "$(dirname "$0")"/boilerplate.go.txt >> "${file}".new 83 | grep -v '#!/bin/bash' "${file}" >> "${file}".new 84 | ;; 85 | *) 86 | sed 's/\/\//\#/1' < "$(dirname "$0")"/boilerplate.go.txt > "${file}".new 87 | cat "${file}" >> "${file}".new 88 | ;; 89 | esac 90 | 91 | if [ "$(uname -s)" = "Darwin" ]; then 92 | permissions=$(stat -f "%OLp" "${file}") 93 | else 94 | permissions=$(stat --format '%a' "${file}") 95 | fi 96 | mv "${file}".new "${file}" 97 | # make permissions the same 98 | chmod "$permissions" "${file}" 99 | echo "$(tput -T xterm setaf 3)FIXING$(tput -T xterm sgr0)" 100 | ERR=false 101 | else 102 | echo "$(tput -T xterm setaf 1)FAIL$(tput -T xterm sgr0)" 103 | ERR=false 104 | FAIL=true 105 | fi 106 | else 107 | echo "$(tput -T xterm setaf 2)OK$(tput -T xterm sgr0)" 108 | fi 109 | done 110 | 111 | # If we failed one check, return 1 112 | [ $FAIL == true ] && exit 1 || exit 0 113 | -------------------------------------------------------------------------------- /hack/kind/kind-cluster-with-extramounts.yaml: -------------------------------------------------------------------------------- 1 | kind: Cluster 2 | apiVersion: kind.x-k8s.io/v1alpha4 3 | nodes: 4 | - role: control-plane 5 | extraMounts: 6 | - hostPath: /var/run/docker.sock 7 | containerPath: /var/run/docker.sock 8 | -------------------------------------------------------------------------------- /hack/kind/simple-cluster.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cluster.x-k8s.io/v1beta1 2 | kind: Cluster 3 | metadata: 4 | name: my-cluster 5 | namespace: default 6 | spec: 7 | clusterNetwork: 8 | services: 9 | cidrBlocks: ["10.128.0.0/12"] 10 | pods: 11 | cidrBlocks: ["192.168.0.0/16"] 12 | serviceDomain: cluster.local 13 | infrastructureRef: 14 | apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 15 | kind: DockerCluster 16 | name: my-cluster 17 | namespace: default 18 | controlPlaneRef: 19 | kind: KubeadmControlPlane 20 | apiVersion: controlplane.cluster.x-k8s.io/v1beta1 21 | name: controlplane-0 22 | namespace: default 23 | --- 24 | apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 25 | kind: DockerCluster 26 | metadata: 27 | name: my-cluster 28 | namespace: default 29 | --- 30 | apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 31 | kind: DockerMachineTemplate 32 | metadata: 33 | name: controlplane-0 34 | namespace: default 35 | spec: 36 | template: 37 | spec: 38 | customImage: harbor-repo.vmware.com/dockerhub-proxy-cache/kindest/node:v1.17.0 39 | extraMounts: 40 | - containerPath: "/var/run/docker.sock" 41 | hostPath: "/var/run/docker.sock" 42 | --- 43 | kind: KubeadmControlPlane 44 | apiVersion: controlplane.cluster.x-k8s.io/v1beta1 45 | metadata: 46 | name: controlplane-0 47 | namespace: default 48 | spec: 49 | replicas: 1 50 | infrastructureTemplate: 51 | kind: DockerMachineTemplate 52 | apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 53 | name: controlplane-0 54 | namespace: default 55 | kubeadmConfigSpec: 56 | clusterConfiguration: 57 | controllerManager: 58 | extraArgs: {enable-hostpath-provisioner: 'true'} 59 | apiServer: 60 | certSANs: [localhost, 127.0.0.1, 0.0.0.0] 61 | initConfiguration: 62 | nodeRegistration: 63 | criSocket: /var/run/containerd/containerd.sock 64 | kubeletExtraArgs: {eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%'} 65 | joinConfiguration: 66 | nodeRegistration: 67 | criSocket: /var/run/containerd/containerd.sock 68 | kubeletExtraArgs: {eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%'} 69 | version: "v1.17.2" 70 | --- 71 | apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 72 | kind: DockerMachineTemplate 73 | metadata: 74 | name: worker-0 75 | namespace: default 76 | spec: 77 | template: 78 | spec: 79 | customImage: harbor-repo.vmware.com/dockerhub-proxy-cache/kindest/node:v1.17.0 80 | --- 81 | apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 82 | kind: KubeadmConfigTemplate 83 | metadata: 84 | name: worker-0 85 | namespace: default 86 | spec: 87 | template: 88 | spec: 89 | joinConfiguration: 90 | nodeRegistration: 91 | kubeletExtraArgs: {eviction-hard: 'nodefs.available<0%,nodefs.inodesFree<0%,imagefs.available<0%'} 92 | --- 93 | apiVersion: cluster.x-k8s.io/v1beta1 94 | kind: MachineDeployment 95 | metadata: 96 | name: worker-0 97 | namespace: default 98 | spec: 99 | clusterName: my-cluster 100 | replicas: 1 101 | selector: 102 | matchLabels: 103 | template: 104 | spec: 105 | clusterName: my-cluster 106 | version: "v1.17.2" 107 | bootstrap: 108 | configRef: 109 | name: worker-0 110 | namespace: default 111 | apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 112 | kind: KubeadmConfigTemplate 113 | infrastructureRef: 114 | name: worker-0 115 | namespace: default 116 | apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 117 | kind: DockerMachineTemplate 118 | -------------------------------------------------------------------------------- /hack/run-e2e.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright 2020 VMware, Inc. 3 | # SPDX-License-Identifier: Apache-2.0 4 | 5 | ################################################################################ 6 | # 7 | # usage: run-e2e 8 | # 9 | # This scripts triggers end to end tests for AKO Operator against a real 10 | # Nimbus-based testbed 11 | # 12 | ################################################################################ 13 | 14 | set -o errexit # Exits immediately on unexpected errors (does not bypass traps) 15 | set -o nounset # Errors if variables are used without first being defined 16 | set -o pipefail # Non-zero exit codes in piped commands causes pipeline to fail 17 | # with that code 18 | 19 | # default FLAKE_ATTEMPT is 3 20 | FLAKE_ATTEMPT=${1:-3} 21 | 22 | # Change directories to the parent directory of the one in which this script is 23 | # located. 24 | cd "$(dirname "${BASH_SOURCE[0]}")/.." 25 | 26 | export PATH=$PATH:$PWD/hack/tools/bin 27 | 28 | E2E_ENV_SPEC=${PWD}/e2e/env.json ginkgo --flakeAttempts="${FLAKE_ATTEMPT}" -v e2e/... 2>&1 | tee e2e.log 29 | -------------------------------------------------------------------------------- /hack/test-e2e.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright 2021 VMware, Inc. 3 | # SPDX-License-Identifier: Apache-2.0 4 | 5 | # Copyright (c) 2020 VMware, Inc. All Rights Reserved. 6 | # SPDX-License-Identifier: Apache-2.0 7 | 8 | ################################################################################ 9 | # usage: e2e 10 | # This program deploys a local test environment using AKOO and Kind. 11 | ################################################################################ 12 | 13 | set -o errexit # Exits immediately on unexpected errors (does not bypass traps) 14 | set -o nounset # Errors if variables are used without first being defined 15 | set -o pipefail # Non-zero exit codes in piped commands causes pipeline to fail 16 | # with that code 17 | 18 | # Bring up testing environment 19 | hack/e2e.sh -u 20 | 21 | # Set aliases for accessing both clusters 22 | alias kk='kubectl --kubeconfig=$PWD/tkg-lcp.kubeconfig' 23 | alias kw='kubectl --kubeconfig=$PWD/workload-cls.kubeconfig' 24 | 25 | # Set a bash-specific shell option to expand aliases in shell scripts 26 | shopt -s expand_aliases 27 | 28 | # Set the default kubeconfig to the management cluster 29 | export KUBECONFIG=$PWD/tkg-lcp.kubeconfig 30 | 31 | # Build manager docker image 32 | make docker-build 33 | 34 | # Load manager docker image into kind cluster 35 | kind load docker-image --name tkg-lcp harbor-pks.vmware.com/tkgextensions/tkg-networking/tanzu-ako-operator:dev 36 | 37 | # Deploy the AKO Operator in the management cluster 38 | make deploy || true 39 | 40 | # Make sure AKO Operator is up and running 41 | akooip="" 42 | n=1 43 | while [[ -z "${akooip}" && $n -le 10 ]]; do 44 | sleep 3s 45 | akooip="$(kk get pods -n akoo-system -o wide | grep '^akoo-.*Running' | grep -e '[0-9]*\.[0-9]*\.[0-9]*\.[0-9]' -o)" || true 46 | n=$(( n+1 )) 47 | done 48 | if [ "$n" == "11" ];then 49 | echo "AKO Operator can't get ready" 50 | exit 1 51 | else 52 | echo "AKO Operator is running at ${akooip}" 53 | fi 54 | 55 | # Enable AVI in the workload cluster 56 | kk label cluster workload-cls cluster-service.network.tkg.tanzu.vmware.com/avi="" 57 | 58 | # Making sure AKO is deployed into the workload cluster 59 | akoip="" 60 | n=1 61 | while [[ -z "${akoip}" && $n -le 10 ]]; do 62 | sleep 5s 63 | akoip="$(kw get pods -n avi-system -o wide | grep '^ako-.*Running' | grep -e '[0-9]*\.[0-9]*\.[0-9]*\.[0-9]' -o)" || true 64 | n=$(( n+1 )) 65 | done 66 | if [ "$n" == "11" ];then 67 | echo "AKO can't get ready" 68 | exit 1 69 | else 70 | echo "AKO is running at ${akoip}" 71 | fi 72 | 73 | # Making sure the configmap exists 74 | configmap="" 75 | n=1 76 | while [[ -z "${configmap}" && $n -le 10 ]]; do 77 | configmap="$(kw get configmap -n avi-system | grep '^avi-k8s-config' -o)" || true 78 | sleep 3s 79 | n=$(( n+1 )) 80 | done 81 | if [ "$n" == "11" ];then 82 | echo "Configmap doesn't exists" 83 | exit 1 84 | else 85 | echo "${configmap} exists" 86 | fi 87 | 88 | # Making sure AKO Operator adds the finalizer on the cluster 89 | finalizer="" 90 | n=1 91 | while [[ -z "${finalizer}" && $n -le 10 ]]; do 92 | finalizer="$(kk get cluster workload-cls -o yaml | grep 'ako-operator.network.tkg.tanzu.vmware.com' -o | head -1)" || true 93 | sleep 3s 94 | n=$(( n+1 )) 95 | done 96 | if [ "$n" == "11" ];then 97 | echo "Finalizer doesn't exists" 98 | exit 1 99 | else 100 | echo "${finalizer} exists" 101 | fi 102 | 103 | # Making sure the pre-terminate hook is added to the workload cluster Machines 104 | preTerminateHook="" 105 | n=1 106 | while [[ -z "${preTerminateHook}" && $n -le 10 ]]; do 107 | preTerminateHook="$(kk get machine -o yaml | grep terminate | grep 'pre-terminate\.delete\.hook.*ako-operator' -o | head -1)" || true 108 | sleep 3s 109 | n=$(( n+1 )) 110 | done 111 | if [ "$n" == "11" ];then 112 | echo "Pre-terminate hook doesn't exists" 113 | exit 1 114 | else 115 | echo "${preTerminateHook} exists" 116 | fi 117 | 118 | # Deleting the workload cluster 119 | kk delete cluster workload-cls 120 | 121 | # Making sure the workload cluster is deleted 122 | n=1 123 | while kk get cluster "workload-cls"; do 124 | sleep 5s 125 | n=$(( n+1 )) 126 | if [ $n -ge 11 ];then 127 | echo "Workload cluster delete failed" 128 | exit 1 129 | fi 130 | done 131 | echo "Workload cluster is deleted" 132 | 133 | # Delete testing environment 134 | hack/e2e.sh -d 135 | -------------------------------------------------------------------------------- /hack/test-ytt.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright 2020 VMware, Inc. 3 | # SPDX-License-Identifier: Apache-2.0 4 | 5 | set -o errexit # Exits immediately on unexpected errors (does not bypass traps) 6 | set -o nounset # Errors if variables are used without first being defined 7 | set -o pipefail # Non-zero exit codes in piped commands causes pipeline to fail 8 | # with that code 9 | 10 | # Change directories to the parent directory of the one in which this script is 11 | # located. 12 | cd "$(dirname "${BASH_SOURCE[0]}")/.." 13 | 14 | export PATH=$PATH:$PWD/hack/tools/bin 15 | 16 | export TEMPLATE_DIR="config/ytt/akodeploymentconfig" 17 | 18 | if command -v tput &>/dev/null && tty -s; then 19 | RED=$(tput setaf 1) 20 | NORMAL=$(tput sgr0) 21 | else 22 | RED=$(echo -en "\e[31m") 23 | NORMAL=$(echo -en "\e[00m") 24 | fi 25 | 26 | log_failure() { 27 | printf "${RED}✖ %s${NORMAL}\n" "$@" >&2 28 | } 29 | 30 | assert_eq() { 31 | local expected="$1" 32 | local actual="$2" 33 | local msg="${3-}" 34 | 35 | if [ "$expected" == "$actual" ]; then 36 | return 0 37 | else 38 | if [ "${#msg}" -gt 0 ]; then 39 | log_failure "$expected == $actual :: $msg" || true 40 | fi 41 | return 1 42 | fi 43 | } 44 | 45 | case1() { 46 | # Test the default AKODeploymentConfig template generation 47 | ytt -f config/ytt/akodeploymentconfig/values.yaml -f config/ytt/akodeploymentconfig/akodeploymentconfig.yaml >/dev/null 2>&1 48 | } 49 | 50 | case2() { 51 | # Test the ip pools section 52 | res="$(ytt -f config/ytt/akodeploymentconfig/values.yaml -f config/ytt/akodeploymentconfig/akodeploymentconfig.yaml -v AVI_DATA_NETWORK_IP_POOL_START=10.0.0.2 -v AVI_DATA_NETWORK_IP_POOL_END=10.0.0.3 -o json 2>&1)" 53 | assert_eq "$(echo "${res}" | jq -cr 'select( .spec).spec.dataNetwork.ipPools[].start')" "10.0.0.2" "failed ipPools" 54 | assert_eq "$(echo "${res}" | jq -cr 'select( .spec).spec.dataNetwork.ipPools[].end')" "10.0.0.3" "failed ipPools" 55 | } 56 | 57 | case1 58 | case2 59 | -------------------------------------------------------------------------------- /hack/tools/Makefile: -------------------------------------------------------------------------------- 1 | # Copyright 2021 VMware, Inc. 2 | # SPDX-License-Identifier: Apache-2.0 3 | 4 | # If you update this file, please follow 5 | # https://suva.sh/posts/well-documented-makefiles 6 | 7 | # Ensure Make is run with bash shell as some syntax below is bash-specific 8 | SHELL := /usr/bin/env bash 9 | 10 | .DEFAULT_GOAL := help 11 | 12 | # Use GOPROXY environment variable if set 13 | GOPROXY := $(shell go env GOPROXY) 14 | ifeq (,$(strip $(GOPROXY))) 15 | GOPROXY := https://proxy.golang.org 16 | endif 17 | export GOPROXY 18 | 19 | # Active module mode, as we use go modules to manage dependencies 20 | export GO111MODULE := on 21 | 22 | # Directories. 23 | BIN_DIR := $(shell pwd)/bin 24 | SRCS := go.mod go.sum 25 | 26 | # Versions. 27 | KUBEBUILDER_VERSION=3.14.2 28 | K8S_VERSION=1.29.3 29 | 30 | # Host information. 31 | HOST_OS=$(shell go env GOOS) 32 | HOST_ARCH=$(shell go env GOARCH) 33 | 34 | # Binaries. 35 | CONTROLLER_GEN := $(BIN_DIR)/controller-gen 36 | CONVERSION_GEN := $(BIN_DIR)/conversion-gen 37 | GOLANGCI_LINT := $(BIN_DIR)/golangci-lint 38 | KUSTOMIZE := $(BIN_DIR)/kustomize 39 | GINKGO := $(BIN_DIR)/ginkgo 40 | KUBE_APISERVER := $(BIN_DIR)/kube-apiserver 41 | KUBEBUILDER := $(BIN_DIR)/kubebuilder 42 | KUBECTL := $(BIN_DIR)/kubectl 43 | ETCD := $(BIN_DIR)/etcd 44 | KIND := $(BIN_DIR)/kind 45 | JQ := $(BIN_DIR)/jq 46 | ENVTEST := $(BIN_DIR)/setup-envtest 47 | 48 | ## -------------------------------------- 49 | ## Help 50 | ## -------------------------------------- 51 | 52 | help: ## Display this help 53 | @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) 54 | 55 | ## -------------------------------------- 56 | ## Binaries 57 | ## -------------------------------------- 58 | 59 | controller-gen: $(CONTROLLER_GEN) $(SRCS) 60 | controller-gen: ## Build controller-gen 61 | $(CONTROLLER_GEN): 62 | go build -tags=tools -o $@ sigs.k8s.io/controller-tools/cmd/controller-gen 63 | 64 | golangci-lint: $(GOLANGCI_LINT) $(SRCS) 65 | golangci-lint: ## Build golangci-lint 66 | $(GOLANGCI_LINT): 67 | curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.59.0 68 | 69 | kustomize: $(KUSTOMIZE) $(SRCS) 70 | kustomize: ## Build kustomize 71 | $(KUSTOMIZE): 72 | go build -tags=tools -o $@ sigs.k8s.io/kustomize/kustomize/v5 73 | 74 | conversion-gen: $(CONVERSION_GEN) $(SRCS) 75 | conversion-gen: ## Build conversion-gen 76 | $(CONVERSION_GEN): 77 | go build -tags=tools -o $@ k8s.io/code-generator/cmd/conversion-gen 78 | 79 | ginkgo: $(GINKGO) $(SRCS) 80 | ginkgo: ## Build ginkgo 81 | $(GINKGO): 82 | go build -tags=tools -o $@ github.com/onsi/ginkgo/ginkgo 83 | 84 | kind: $(KIND) $(SRCS) 85 | kind: ## Build kind 86 | $(KIND): 87 | go build -tags=tools -o $@ sigs.k8s.io/kind 88 | 89 | jq: $(JQ) ## Install jq 90 | $(JQ): 91 | curl -o $(@) -L https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64 && \ 92 | chmod a+x $(@) 93 | 94 | .PHONY: envtest 95 | envtest: $(ENVTEST) ## Download envtest-setup locally if necessary. 96 | $(ENVTEST): 97 | GOBIN=$(BIN_DIR) go install sigs.k8s.io/controller-runtime/tools/setup-envtest@v0.0.0-20240522175850-2e9781e9fc60 98 | 99 | etcd: $(ETCD) ## Install etcd 100 | kube-apiserver: $(KUBE_APISERVER) ## Install kube-apiserver 101 | kubectl: $(KUBECTL) ## Install kubectl 102 | $(ETCD) $(KUBE_APISERVER) $(KUBECTL): envtest 103 | TEMP_DL_DIR=$(shell mktemp -d) && \ 104 | GOBIN=$(BIN_DIR) $(ENVTEST) use $(K8S_VERSION) --bin-dir "$${TEMP_DL_DIR}" && \ 105 | cp "$${TEMP_DL_DIR}"/k8s/*/* $(@D) 106 | 107 | kubebuilder: $(KUBEBUILDER) ## Install kubebuilder 108 | $(KUBEBUILDER): 109 | @mkdir -p $(@D) 110 | TEMP_DL_DIR=$$(mktemp -d) && \ 111 | curl -sL https://github.com/kubernetes-sigs/kubebuilder/releases/download/v$(KUBEBUILDER_VERSION)/kubebuilder_$(KUBEBUILDER_VERSION)_$(HOST_OS)_$(HOST_ARCH) -O $(KUBEBUILDER) 112 | 113 | ## -------------------------------------- 114 | ## Generate 115 | ## -------------------------------------- 116 | 117 | .PHONY: modules 118 | modules: ## Runs go mod to ensure proper vendoring 119 | go mod tidy 120 | 121 | ## -------------------------------------- 122 | ## Cleanup / Verification 123 | ## -------------------------------------- 124 | 125 | .PHONY: clean 126 | clean: ## Run all the clean targets 127 | $(MAKE) clean-bin 128 | 129 | .PHONY: clean-bin 130 | clean-bin: ## Remove all generated binaries 131 | rm -rf bin 132 | -------------------------------------------------------------------------------- /hack/tools/go.mod: -------------------------------------------------------------------------------- 1 | module gitlab.eng.vmware.com/core-build/tkg-connectivity/hack/tools 2 | 3 | go 1.23.1 4 | toolchain go1.23.6 5 | 6 | require ( 7 | carvel.dev/ytt v0.51.1 8 | github.com/onsi/ginkgo v1.16.5 9 | k8s.io/code-generator v0.32.2 10 | sigs.k8s.io/controller-tools v0.17.2 11 | sigs.k8s.io/kind v0.27.0 12 | sigs.k8s.io/kustomize/kustomize/v5 v5.6.0 13 | ) 14 | 15 | require ( 16 | al.essio.dev/pkg/shellescape v1.5.1 // indirect 17 | github.com/BurntSushi/toml v1.4.0 // indirect 18 | github.com/blang/semver/v4 v4.0.0 // indirect 19 | github.com/cppforlife/cobrautil v0.0.0-20200514214827-bb86e6965d72 // indirect 20 | github.com/cppforlife/go-cli-ui v0.0.0-20200505234325-512793797f05 // indirect 21 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect 22 | github.com/emicklei/go-restful/v3 v3.11.0 // indirect 23 | github.com/evanphx/json-patch/v5 v5.6.0 // indirect 24 | github.com/fatih/color v1.18.0 // indirect 25 | github.com/fsnotify/fsnotify v1.7.0 // indirect 26 | github.com/fxamacker/cbor/v2 v2.7.0 // indirect 27 | github.com/go-errors/errors v1.4.2 // indirect 28 | github.com/go-logr/logr v1.4.2 // indirect 29 | github.com/go-openapi/jsonpointer v0.21.0 // indirect 30 | github.com/go-openapi/jsonreference v0.20.2 // indirect 31 | github.com/go-openapi/swag v0.23.0 // indirect 32 | github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect 33 | github.com/gobuffalo/flect v1.0.3 // indirect 34 | github.com/gogo/protobuf v1.3.2 // indirect 35 | github.com/google/gnostic-models v0.6.9 // indirect 36 | github.com/google/gofuzz v1.2.0 // indirect 37 | github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 // indirect 38 | github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect 39 | github.com/hashicorp/go-version v1.6.0 // indirect 40 | github.com/inconshreveable/mousetrap v1.1.0 // indirect 41 | github.com/josharian/intern v1.0.0 // indirect 42 | github.com/json-iterator/go v1.1.12 // indirect 43 | github.com/k14s/starlark-go v0.0.0-20200720175618-3a5c849cc368 // indirect 44 | github.com/mailru/easyjson v0.7.7 // indirect 45 | github.com/mattn/go-colorable v0.1.13 // indirect 46 | github.com/mattn/go-isatty v0.0.20 // indirect 47 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 48 | github.com/modern-go/reflect2 v1.0.2 // indirect 49 | github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect 50 | github.com/nxadm/tail v1.4.8 // indirect 51 | github.com/pelletier/go-toml v1.9.5 // indirect 52 | github.com/pkg/errors v0.9.1 // indirect 53 | github.com/sergi/go-diff v1.2.0 // indirect 54 | github.com/spf13/cobra v1.8.1 // indirect 55 | github.com/spf13/pflag v1.0.6 // indirect 56 | github.com/x448/float16 v0.8.4 // indirect 57 | github.com/xlab/treeprint v1.2.0 // indirect 58 | golang.org/x/mod v0.22.0 // indirect 59 | golang.org/x/net v0.36.0 // indirect 60 | golang.org/x/sync v0.11.0 // indirect 61 | golang.org/x/sys v0.30.0 // indirect 62 | golang.org/x/text v0.22.0 // indirect 63 | golang.org/x/tools v0.29.0 // indirect 64 | google.golang.org/protobuf v1.35.1 // indirect 65 | gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect 66 | gopkg.in/inf.v0 v0.9.1 // indirect 67 | gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect 68 | gopkg.in/yaml.v2 v2.4.0 // indirect 69 | gopkg.in/yaml.v3 v3.0.1 // indirect 70 | k8s.io/api v0.32.1 // indirect 71 | k8s.io/apiextensions-apiserver v0.32.1 // indirect 72 | k8s.io/apimachinery v0.32.2 // indirect 73 | k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 // indirect 74 | k8s.io/klog/v2 v2.130.1 // indirect 75 | k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 // indirect 76 | k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect 77 | sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect 78 | sigs.k8s.io/kustomize/api v0.19.0 // indirect 79 | sigs.k8s.io/kustomize/cmd/config v0.19.0 // indirect 80 | sigs.k8s.io/kustomize/kyaml v0.19.0 // indirect 81 | sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect 82 | sigs.k8s.io/yaml v1.4.0 // indirect 83 | ) 84 | 85 | replace go.starlark.net => github.com/k14s/starlark-go v0.0.0-20200522161834-8a7b2030a110 // ytt branch 86 | -------------------------------------------------------------------------------- /hack/tools/tools.go: -------------------------------------------------------------------------------- 1 | // Copyright 2021 VMware, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | //go:build tools 4 | // +build tools 5 | 6 | /* 7 | Copyright 2019 The Kubernetes Authors. 8 | 9 | Licensed under the Apache License, Version 2.0 (the "License"); 10 | you may not use this file except in compliance with the License. 11 | You may obtain a copy of the License at 12 | 13 | http://www.apache.org/licenses/LICENSE-2.0 14 | 15 | Unless required by applicable law or agreed to in writing, software 16 | distributed under the License is distributed on an "AS IS" BASIS, 17 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 18 | See the License for the specific language governing permissions and 19 | limitations under the License. 20 | */ 21 | 22 | // This package imports things required by build scripts, to force `go mod` to see them as dependencies 23 | package tools 24 | 25 | import ( 26 | _ "github.com/onsi/ginkgo/ginkgo" 27 | _ "k8s.io/code-generator" 28 | 29 | _ "carvel.dev/ytt/cmd/ytt" 30 | _ "sigs.k8s.io/controller-tools/cmd/controller-gen" 31 | _ "sigs.k8s.io/kind" 32 | _ "sigs.k8s.io/kustomize/kustomize/v5" 33 | ) 34 | -------------------------------------------------------------------------------- /hack/update-containerd.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright 2020 VMware, Inc. 3 | # SPDX-License-Identifier: Apache-2.0 4 | 5 | set -o errexit # Exits immediately on unexpected errors (does not bypass traps) 6 | set -o nounset # Errors if variables are used without first being defined 7 | set -o pipefail # Non-zero exit codes in piped commands causes pipeline to fail 8 | # with that code 9 | 10 | # Change directories to the parent directory of the one in which this script is 11 | # located. 12 | cd "$(dirname "${BASH_SOURCE[0]}")/.." 13 | 14 | usage="$( 15 | cat <&2 23 | return "${exit_code}" 24 | } 25 | function fatal() { error "${@}" || exit "${?}"; } 26 | 27 | [[ ! "${#}" -eq "1" ]] && fatal "$( 28 | cat < /etc/containerd/config.toml < 2 { 48 | return InvalidIPFamily, errors.New("too many CIDRs specified") 49 | } 50 | var foundIPv4 bool 51 | var foundIPv6 bool 52 | for _, cidr := range cidrs { 53 | cidrType := GetIPFamilyFromCidr(cidr) 54 | if cidrType == IPv4IpFamily { 55 | foundIPv4 = true 56 | } else if cidrType == IPv6IpFamily { 57 | foundIPv6 = true 58 | } 59 | } 60 | switch { 61 | case foundIPv4 && foundIPv6: 62 | return DualStackIPFamily, nil 63 | case foundIPv4: 64 | return IPv4IpFamily, nil 65 | case foundIPv6: 66 | return IPv6IpFamily, nil 67 | default: 68 | return InvalidIPFamily, errors.New("Invalid IP Family") 69 | } 70 | } 71 | 72 | // GetClusterIPFamily returns a cluster IPFamily from the configuration provided. 73 | // 1. V4: single-stack ipv4 cluster 74 | // 2. V6: single-stack ipv6 cluster 75 | // 3. V4,V6: dual-stack ipv4 primary cluster 76 | // 4. V6,V4: dual-stack ipv6 primary cluster 77 | // 5. INVALID: invalid cluster 78 | func GetClusterIPFamily(c *capi.Cluster) (string, error) { 79 | var podCIDRs, serviceCIDRs []string 80 | var podsIPFamily, servicesIPFamily string 81 | var err error 82 | 83 | if c.Spec.ClusterNetwork != nil { 84 | if c.Spec.ClusterNetwork.Pods != nil { 85 | podCIDRs = c.Spec.ClusterNetwork.Pods.CIDRBlocks 86 | } 87 | if c.Spec.ClusterNetwork.Services != nil { 88 | serviceCIDRs = c.Spec.ClusterNetwork.Services.CIDRBlocks 89 | } 90 | } 91 | // Return default ipv4 ipfamily when podcidrs and servicecidrs are both empty 92 | if len(podCIDRs) == 0 && len(serviceCIDRs) == 0 { 93 | return IPv4IpFamily, nil 94 | } 95 | 96 | if len(podCIDRs) != 0 { 97 | podsIPFamily, err = ipFamilyFromCIDRStrings(podCIDRs) 98 | if err != nil { 99 | return InvalidIPFamily, fmt.Errorf("pods: %s", err) 100 | } 101 | } 102 | 103 | if len(serviceCIDRs) != 0 { 104 | servicesIPFamily, err = ipFamilyFromCIDRStrings(serviceCIDRs) 105 | if err != nil { 106 | return InvalidIPFamily, fmt.Errorf("services: %s", err) 107 | } 108 | } 109 | 110 | // Return invalid when podcidrs ipfamily doesn't match servicecidrs ipfamily 111 | if podsIPFamily != servicesIPFamily && len(podCIDRs) != 0 && len(serviceCIDRs) != 0 { 112 | return InvalidIPFamily, errors.New("pods and services IP family mismatch") 113 | } 114 | 115 | if podsIPFamily == DualStackIPFamily || servicesIPFamily == DualStackIPFamily { 116 | if podsIPFamily == DualStackIPFamily { 117 | podCIDRType := GetIPFamilyFromCidr(podCIDRs[0]) 118 | if podCIDRType == IPv4IpFamily { 119 | return DualStackIPv4Primary, nil 120 | } else { 121 | return DualStackIPv6Primary, nil 122 | } 123 | } 124 | serviceCIDRType := GetIPFamilyFromCidr(serviceCIDRs[0]) 125 | if serviceCIDRType == IPv4IpFamily { 126 | return DualStackIPv4Primary, nil 127 | } else { 128 | return DualStackIPv6Primary, nil 129 | } 130 | } 131 | 132 | if len(podCIDRs) == 0 { 133 | return servicesIPFamily, nil 134 | } 135 | return podsIPFamily, nil 136 | } 137 | 138 | // GetPrimaryIPFamily returns a cluster primary IPFamily from the configuration provided. 139 | // 1. V4: single-stack ipv4/dual-stack ipv4 primary cluster 140 | // 2. V6: single-stack ipv6/dual-stack ipv6 primary cluster 141 | // 3. INVALID: invalid cluster 142 | func GetPrimaryIPFamily(c *capi.Cluster) (string, error) { 143 | ipFamily, err := GetClusterIPFamily(c) 144 | if err != nil { 145 | return InvalidIPFamily, fmt.Errorf("Invalid IP Family: %s", err) 146 | } 147 | if ipFamily == IPv4IpFamily || ipFamily == DualStackIPv4Primary { 148 | return IPv4IpFamily, nil 149 | } 150 | return IPv6IpFamily, nil 151 | } 152 | -------------------------------------------------------------------------------- /pkg/utils/get_objects.go: -------------------------------------------------------------------------------- 1 | // Copyright 2024 VMware, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | package utils 5 | 6 | import ( 7 | clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" 8 | ) 9 | 10 | func AVIUserSecretName(cluster *clusterv1.Cluster) string { 11 | return cluster.Name + "-avi-credentials" 12 | } 13 | 14 | func AKOAddonSecretName(cluster *clusterv1.Cluster) string { 15 | return cluster.Name + "-load-balancer-and-ingress-service-addon" 16 | } 17 | 18 | func AKOAddonSecretNameForClusterClass(cluster *clusterv1.Cluster) string { 19 | return cluster.Name + "-load-balancer-and-ingress-service-data-values" 20 | } 21 | -------------------------------------------------------------------------------- /pkg/utils/password_generator.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 VMware, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | package utils 5 | 6 | import ( 7 | "math/rand" 8 | "time" 9 | ) 10 | 11 | const ( 12 | numerics = "0123456789" 13 | specials = "~=+%^*/()[]{}/!@#$?|" 14 | uppercases = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" 15 | lowercases = "abcdefghijklmnopqrstuvwxyz" 16 | ) 17 | 18 | // GenereatePassword generate a random password 19 | func GenereatePassword(length int, mustHaveLowercase, mustHaveUppercase, mustHaveSpecial, mustHaveNumeric bool) string { 20 | rand.Seed(time.Now().UnixNano()) 21 | all := uppercases + lowercases + numerics + specials 22 | buf := make([]byte, length) 23 | i := 0 24 | 25 | if mustHaveLowercase { 26 | buf[i] = lowercases[rand.Intn(len(lowercases))] 27 | i++ 28 | } 29 | 30 | if mustHaveUppercase { 31 | buf[i] = uppercases[rand.Intn(len(uppercases))] 32 | i++ 33 | } 34 | 35 | if mustHaveSpecial { 36 | buf[i] = specials[rand.Intn(len(specials))] 37 | i++ 38 | } 39 | 40 | if mustHaveNumeric { 41 | buf[i] = numerics[rand.Intn(len(numerics))] 42 | i++ 43 | } 44 | 45 | for ; i < length; i++ { 46 | buf[i] = all[rand.Intn(len(all))] 47 | } 48 | 49 | rand.Shuffle(len(buf), func(i, j int) { 50 | buf[i], buf[j] = buf[j], buf[i] 51 | }) 52 | 53 | return string(buf) 54 | } 55 | -------------------------------------------------------------------------------- /pkg/utils/password_generator_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 VMware, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | package utils_test 5 | 6 | import ( 7 | "strings" 8 | 9 | "github.com/onsi/ginkgo" 10 | . "github.com/onsi/gomega" 11 | "github.com/utterkeyboar/load-balancer-operator-for-kubernetes/pkg/utils" 12 | ) 13 | 14 | var _ = ginkgo.Describe("Test password generate", func() { 15 | ginkgo.It("should contain lowercase", func() { 16 | pwd := utils.GenereatePassword(5, true, false, false, false) 17 | Expect(strings.ContainsAny(pwd, "abcdefghijklmnopqrstuvwxyz")) 18 | Expect(len(pwd)).To(Equal(5)) 19 | }) 20 | ginkgo.It("should contain uppercase", func() { 21 | pwd := utils.GenereatePassword(5, false, true, false, false) 22 | Expect(strings.ContainsAny(pwd, "ABCDEFGHIJKLMNOPQRSTUVWXYZ")) 23 | Expect(len(pwd)).To(Equal(5)) 24 | }) 25 | ginkgo.It("should contain specials", func() { 26 | pwd := utils.GenereatePassword(5, false, false, true, false) 27 | Expect(strings.ContainsAny(pwd, "~=+%^*/()[]{}/!@#$?|")) 28 | Expect(len(pwd)).To(Equal(5)) 29 | }) 30 | ginkgo.It("should contain digits", func() { 31 | pwd := utils.GenereatePassword(5, false, false, false, true) 32 | Expect(strings.ContainsAny(pwd, "0123456789")) 33 | Expect(len(pwd)).To(Equal(5)) 34 | }) 35 | }) 36 | -------------------------------------------------------------------------------- /pkg/utils/utils_suite_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020 VMware, Inc. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | package utils_test 5 | 6 | import ( 7 | "testing" 8 | 9 | . "github.com/onsi/ginkgo" 10 | . "github.com/onsi/gomega" 11 | ) 12 | 13 | func TestUtils(t *testing.T) { 14 | RegisterFailHandler(Fail) 15 | RunSpecs(t, "Utils Suite") 16 | } 17 | --------------------------------------------------------------------------------