├── .github ├── PULL_REQUEST_TEMPLATE.md ├── dependabot.yml └── workflows │ ├── apidiff.yaml │ ├── build.yaml │ ├── delete-old-versions.yaml │ ├── e2e-branch.yaml │ ├── e2e-workflow.yaml │ ├── lint.yaml │ ├── nightly-publish.yaml │ ├── release.yaml │ ├── scan.yaml │ ├── unit.yaml │ └── verify.yaml ├── .gitignore ├── .golangci.yml ├── .goreleaser.yaml ├── CODEOWNERS ├── Dockerfile.dapper ├── License ├── Makefile ├── README.md ├── charts ├── aks-operator-crd │ ├── Chart.yaml │ └── templates │ │ └── crds.yaml └── aks-operator │ ├── Chart.yaml │ ├── templates │ ├── NOTES.txt │ ├── _helpers.tpl │ ├── clusterrole.yaml │ ├── clusterrolebinding.yaml │ ├── deployment.yaml │ └── serviceaccount.yaml │ └── values.yaml ├── controller ├── aks-cluster-config-handler.go ├── aks-cluster-config-handler_test.go ├── external.go └── suite_test.go ├── examples ├── create-example-udr.yaml ├── create-example.yaml └── import-example.yaml ├── go.mod ├── go.sum ├── main.go ├── package ├── Dockerfile └── entrypoint.sh ├── pkg ├── aks │ ├── check.go │ ├── check_test.go │ ├── client.go │ ├── convert.go │ ├── create.go │ ├── create_test.go │ ├── delete.go │ ├── delete_test.go │ ├── exists.go │ ├── get.go │ ├── services │ │ ├── agentpools.go │ │ ├── groups.go │ │ ├── managedclusters.go │ │ ├── mock_services │ │ │ ├── agentpools_mock.go │ │ │ ├── doc.go │ │ │ ├── groups_mock.go │ │ │ ├── managedclusters_mock.go │ │ │ └── workplaces_mock.go │ │ └── workplaces.go │ ├── suite_test.go │ ├── update.go │ └── update_test.go ├── apis │ └── aks.cattle.io │ │ ├── v1 │ │ ├── doc.go │ │ ├── types.go │ │ ├── zz_generated_deepcopy.go │ │ ├── zz_generated_list_types.go │ │ └── zz_generated_register.go │ │ └── zz_generated_register.go ├── codegen │ ├── boilerplate.go.txt │ ├── cleanup │ │ └── main.go │ └── main.go ├── generated │ └── controllers │ │ ├── aks.cattle.io │ │ ├── factory.go │ │ ├── interface.go │ │ └── v1 │ │ │ ├── aksclusterconfig.go │ │ │ └── interface.go │ │ └── core │ │ ├── factory.go │ │ ├── interface.go │ │ └── v1 │ │ ├── interface.go │ │ ├── node.go │ │ ├── pod.go │ │ └── secret.go ├── test │ ├── cleanup.go │ └── envtest.go ├── utils │ ├── azure.go │ ├── convert.go │ ├── map.go │ └── parse.go └── version │ └── version.go ├── scripts ├── build ├── ci ├── entry ├── go_install.sh ├── package ├── package-helm ├── setup-kind-cluster.sh ├── validate └── version └── test └── e2e ├── Dockerfile.e2e ├── basic_cluster_test.go ├── config ├── config.go └── config.yaml ├── deploy_operator_test.go ├── suite_test.go └── templates └── basic-cluster.yaml /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | 10 | 11 | **What this PR does / why we need it**: 12 | 13 | 14 | 15 | **Which issue(s) this PR fixes** 16 | Issue # 17 | 18 | **Special notes for your reviewer**: 19 | 20 | **Checklist**: 21 | 22 | 23 | - [ ] squashed commits into logical changes 24 | - [ ] includes documentation 25 | - [ ] adds unit tests 26 | - [ ] adds or updates e2e tests 27 | - [ ] backport needed 28 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # Please see the documentation for all configuration options: 2 | # https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates 3 | version: 2 4 | updates: 5 | # GitHub Actions 6 | - package-ecosystem: "github-actions" 7 | directory: "/" 8 | schedule: 9 | interval: "weekly" 10 | commit-message: 11 | prefix: ":seedling:" 12 | # Go modules in main branch 13 | - package-ecosystem: "gomod" 14 | directory: "/" 15 | schedule: 16 | interval: "weekly" 17 | ignore: 18 | # Ignore controller-runtime as it's upgraded manually. 19 | - dependency-name: "sigs.k8s.io/controller-runtime" 20 | # Ignore k8s and its transitives modules as they are upgraded manually 21 | # together with controller-runtime. 22 | - dependency-name: "k8s.io/*" 23 | # Ignore wrangler 24 | - dependency-name: "github.com/rancher/wrangler" 25 | - dependency-name: "github.com/rancher/wrangler/v3" 26 | - dependency-name: "go.etcd.io/*" 27 | - dependency-name: "github.com/Azure/azure-sdk-for-go" 28 | commit-message: 29 | prefix: ":seedling:" 30 | target-branch: "main" 31 | # Go modules in release-v2.11 branch 32 | - package-ecosystem: "gomod" 33 | directory: "/" 34 | schedule: 35 | interval: "weekly" 36 | ignore: 37 | # Ignore controller-runtime as it's upgraded manually. 38 | - dependency-name: "sigs.k8s.io/controller-runtime" 39 | # Ignore k8s and its transitives modules as they are upgraded manually 40 | # together with controller-runtime. 41 | - dependency-name: "k8s.io/*" 42 | # Ignore wrangler 43 | - dependency-name: "github.com/rancher/wrangler" 44 | - dependency-name: "github.com/rancher/wrangler/v3" 45 | - dependency-name: "github.com/rancher/rancher/pkg/apis" 46 | - dependency-name: "go.etcd.io/*" 47 | - dependency-name: "github.com/Azure/azure-sdk-for-go" 48 | commit-message: 49 | prefix: ":seedling:" 50 | target-branch: "release-v2.11" 51 | # Go modules in release-v2.10 branch 52 | - package-ecosystem: "gomod" 53 | directory: "/" 54 | schedule: 55 | interval: "weekly" 56 | ignore: 57 | # Ignore controller-runtime as it's upgraded manually. 58 | - dependency-name: "sigs.k8s.io/controller-runtime" 59 | # Ignore k8s and its transitives modules as they are upgraded manually 60 | # together with controller-runtime. 61 | - dependency-name: "k8s.io/*" 62 | # Ignore wrangler 63 | - dependency-name: "github.com/rancher/wrangler" 64 | - dependency-name: "github.com/rancher/wrangler/v3" 65 | - dependency-name: "github.com/rancher/rancher/pkg/apis" 66 | - dependency-name: "go.etcd.io/*" 67 | - dependency-name: "github.com/Azure/azure-sdk-for-go" 68 | - dependency-name: "github.com/rancher/lasso" 69 | commit-message: 70 | prefix: ":seedling:" 71 | target-branch: "release-v2.10" 72 | # Go modules in release-v2.9 branch 73 | - package-ecosystem: "gomod" 74 | directory: "/" 75 | schedule: 76 | interval: "weekly" 77 | ignore: 78 | # Ignore controller-runtime as it's upgraded manually. 79 | - dependency-name: "sigs.k8s.io/controller-runtime" 80 | # Ignore k8s and its transitives modules as they are upgraded manually 81 | # together with controller-runtime. 82 | - dependency-name: "k8s.io/*" 83 | # Ignore wrangler 84 | - dependency-name: "github.com/rancher/wrangler" 85 | - dependency-name: "github.com/rancher/wrangler/v3" 86 | - dependency-name: "github.com/rancher/rancher/pkg/apis" 87 | - dependency-name: "go.etcd.io/*" 88 | - dependency-name: "github.com/Azure/azure-sdk-for-go" 89 | - dependency-name: "github.com/rancher/lasso" 90 | commit-message: 91 | prefix: ":seedling:" 92 | target-branch: "release-v2.9" 93 | # Go modules in release-v2.8 branch 94 | - package-ecosystem: "gomod" 95 | directory: "/" 96 | schedule: 97 | interval: "weekly" 98 | ignore: 99 | # Ignore controller-runtime as it's upgraded manually. 100 | - dependency-name: "sigs.k8s.io/controller-runtime" 101 | # Ignore k8s and its transitives modules as they are upgraded manually 102 | # together with controller-runtime. 103 | - dependency-name: "k8s.io/*" 104 | # Ignore wrangler 105 | - dependency-name: "github.com/rancher/wrangler" 106 | - dependency-name: "github.com/rancher/wrangler/v3" 107 | - dependency-name: "github.com/rancher/rancher/pkg/apis" 108 | - dependency-name: "go.etcd.io/*" 109 | - dependency-name: "github.com/Azure/azure-sdk-for-go" 110 | - dependency-name: "github.com/rancher/lasso" 111 | commit-message: 112 | prefix: ":seedling:" 113 | target-branch: "release-v2.8" 114 | -------------------------------------------------------------------------------- /.github/workflows/apidiff.yaml: -------------------------------------------------------------------------------- 1 | name: Go API Diff 2 | on: 3 | pull_request: 4 | jobs: 5 | go-apidiff: 6 | runs-on: ubuntu-latest 7 | steps: 8 | - uses: actions/checkout@v4 9 | with: 10 | fetch-depth: 0 11 | - uses: actions/setup-go@v5 12 | with: 13 | go-version: 1.23.x 14 | - name: Generate API diff 15 | run: make apidiff 16 | 17 | -------------------------------------------------------------------------------- /.github/workflows/build.yaml: -------------------------------------------------------------------------------- 1 | name: Build 2 | on: 3 | pull_request: 4 | push: 5 | branches: 6 | - main 7 | jobs: 8 | build: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - name: Checkout code 12 | uses: actions/checkout@v4 13 | - name: Build 14 | run: make image-build 15 | -------------------------------------------------------------------------------- /.github/workflows/delete-old-versions.yaml: -------------------------------------------------------------------------------- 1 | name: Delete Old images and charts 2 | on: 3 | schedule: 4 | - cron: '0 1 * * 1,4' # Every Mondays and Thursdays at 01:00 UTC 5 | workflow_dispatch: 6 | 7 | jobs: 8 | delete_old_packages: 9 | runs-on: ubuntu-latest 10 | permissions: 11 | packages: write 12 | steps: 13 | - name: Delete old aks-operator images 14 | uses: actions/delete-package-versions@v4 15 | with: 16 | package-name: aks-operator 17 | package-type: container 18 | min-versions-to-keep: 30 19 | token: ${{ secrets.GITHUB_TOKEN }} 20 | owner: rancher 21 | 22 | - name: Delete old rancher-aks-operator charts 23 | uses: actions/delete-package-versions@v4 24 | with: 25 | package-name: rancher-aks-operator-chart/rancher-aks-operator 26 | package-type: container 27 | min-versions-to-keep: 7 28 | token: ${{ secrets.GITHUB_TOKEN }} 29 | owner: rancher 30 | 31 | - name: Delete old rancher-aks-operator-crd charts 32 | uses: actions/delete-package-versions@v4 33 | with: 34 | package-name: rancher-aks-operator-crd-chart/rancher-aks-operator-crd 35 | package-type: container 36 | min-versions-to-keep: 7 37 | token: ${{ secrets.GITHUB_TOKEN }} 38 | owner: rancher 39 | -------------------------------------------------------------------------------- /.github/workflows/e2e-branch.yaml: -------------------------------------------------------------------------------- 1 | name: E2E test branch 2 | on: 3 | workflow_call: 4 | secrets: 5 | AZURE_CLIENT_ID: 6 | description: "Azure client ID" 7 | required: true 8 | AZURE_CLIENT_SECRET: 9 | description: "Azure client secret" 10 | required: true 11 | AZURE_SUBSCRIPTION_ID: 12 | description: "Azure subscription ID" 13 | required: true 14 | AZURE_RESOURCE_GROUP: 15 | description: "Azure resource group" 16 | required: true 17 | SLACK_WEBHOOK_URL: 18 | description: "WebHook URL to use for Slack" 19 | required: true 20 | inputs: 21 | branch: 22 | type: string 23 | default: "release-v2.9" 24 | 25 | jobs: 26 | e2e-tests: 27 | env: 28 | BRANCH: ${{ inputs.branch }} 29 | runs-on: ubuntu-latest 30 | steps: 31 | - name: Checkout code 32 | uses: actions/checkout@v4 33 | with: 34 | ref: | 35 | ${{ env.BRANCH }} 36 | - name: Login to GHCR registry 37 | uses: docker/login-action@v3 38 | with: 39 | registry: ghcr.io 40 | username: ${{ github.actor }} 41 | password: ${{ secrets.GITHUB_TOKEN }} 42 | - name: Setup Docker Buildx 43 | uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0 44 | - name: Build and push image 45 | env: 46 | REPO: ghcr.io/rancher 47 | run: | 48 | make image-push 49 | - name: Install Go 50 | uses: actions/setup-go@v5 51 | with: 52 | go-version: 1.23.x 53 | - uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0 54 | with: 55 | version: v0.23.0 56 | install_only: true 57 | - name: Create kind cluster 58 | run: make setup-kind 59 | - name: E2E tests 60 | env: 61 | AZURE_CLIENT_ID: "${{ secrets.AZURE_CLIENT_ID }}" 62 | AZURE_CLIENT_SECRET: ${{ secrets.AZURE_CLIENT_SECRET }} 63 | AZURE_SUBSCRIPTION_ID: ${{ secrets.AZURE_SUBSCRIPTION_ID }} 64 | AZURE_RESOURCE_GROUP: ${{ secrets.AZURE_RESOURCE_GROUP }} 65 | REPO: ghcr.io/rancher 66 | run: make e2e-tests 67 | - name: Archive artifacts 68 | if: always() 69 | uses: actions/upload-artifact@v4.6.2 70 | with: 71 | name: ci-artifacts-${{ env.BRANCH }} 72 | path: _artifacts 73 | if-no-files-found: ignore 74 | - name: Send failed status to slack 75 | if: failure() && github.event_name == 'schedule' 76 | uses: slackapi/slack-github-action@v2.0.0 77 | with: 78 | payload: | 79 | { 80 | "blocks": [ 81 | { 82 | "type": "section", 83 | "text": { 84 | "type": "mrkdwn", 85 | "text": "AKS Operator E2E test run failed." 86 | }, 87 | "accessory": { 88 | "type": "button", 89 | "text": { 90 | "type": "plain_text", 91 | "text": ":github:", 92 | "emoji": true 93 | }, 94 | "url": "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" 95 | } 96 | } 97 | ] 98 | } 99 | env: 100 | SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} 101 | SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK 102 | -------------------------------------------------------------------------------- /.github/workflows/e2e-workflow.yaml: -------------------------------------------------------------------------------- 1 | name: Run E2E test workflow 2 | on: 3 | workflow_dispatch: 4 | schedule: 5 | - cron: 0 22 * * * 6 | permissions: 7 | contents: read 8 | packages: write # Required for pushing images to ghcr.io 9 | jobs: 10 | e2e-test-main: 11 | uses: ./.github/workflows/e2e-branch.yaml 12 | with: 13 | branch: main 14 | secrets: 15 | AZURE_CLIENT_ID: ${{ secrets.AZURE_CLIENT_ID }} 16 | AZURE_CLIENT_SECRET: ${{ secrets.AZURE_CLIENT_SECRET }} 17 | AZURE_SUBSCRIPTION_ID: ${{ secrets.AZURE_SUBSCRIPTION_ID }} 18 | AZURE_RESOURCE_GROUP: ${{ secrets.AZURE_RESOURCE_GROUP }} 19 | SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} 20 | e2e-test-v2_11: 21 | if: ${{ always() }} 22 | needs: e2e-test-main 23 | uses: ./.github/workflows/e2e-branch.yaml 24 | with: 25 | branch: release-v2.11 26 | secrets: 27 | AZURE_CLIENT_ID: ${{ secrets.AZURE_CLIENT_ID }} 28 | AZURE_CLIENT_SECRET: ${{ secrets.AZURE_CLIENT_SECRET }} 29 | AZURE_SUBSCRIPTION_ID: ${{ secrets.AZURE_SUBSCRIPTION_ID }} 30 | AZURE_RESOURCE_GROUP: ${{ secrets.AZURE_RESOURCE_GROUP }} 31 | SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} 32 | e2e-test-v2_10: 33 | if: ${{ always() }} 34 | needs: e2e-test-v2_11 35 | uses: ./.github/workflows/e2e-branch.yaml 36 | with: 37 | branch: release-v2.10 38 | secrets: 39 | AZURE_CLIENT_ID: ${{ secrets.AZURE_CLIENT_ID }} 40 | AZURE_CLIENT_SECRET: ${{ secrets.AZURE_CLIENT_SECRET }} 41 | AZURE_SUBSCRIPTION_ID: ${{ secrets.AZURE_SUBSCRIPTION_ID }} 42 | AZURE_RESOURCE_GROUP: ${{ secrets.AZURE_RESOURCE_GROUP }} 43 | SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} 44 | e2e-test-v2_9: 45 | if: ${{ always() }} 46 | needs: e2e-test-v2_10 47 | uses: ./.github/workflows/e2e-branch.yaml 48 | with: 49 | branch: release-v2.9 50 | secrets: 51 | AZURE_CLIENT_ID: ${{ secrets.AZURE_CLIENT_ID }} 52 | AZURE_CLIENT_SECRET: ${{ secrets.AZURE_CLIENT_SECRET }} 53 | AZURE_SUBSCRIPTION_ID: ${{ secrets.AZURE_SUBSCRIPTION_ID }} 54 | AZURE_RESOURCE_GROUP: ${{ secrets.AZURE_RESOURCE_GROUP }} 55 | SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} 56 | e2e-test-v2_8: 57 | if: ${{ always() }} 58 | needs: e2e-test-v2_9 59 | uses: ./.github/workflows/e2e-branch.yaml 60 | with: 61 | branch: release-v2.8 62 | secrets: 63 | AZURE_CLIENT_ID: ${{ secrets.AZURE_CLIENT_ID }} 64 | AZURE_CLIENT_SECRET: ${{ secrets.AZURE_CLIENT_SECRET }} 65 | AZURE_SUBSCRIPTION_ID: ${{ secrets.AZURE_SUBSCRIPTION_ID }} 66 | AZURE_RESOURCE_GROUP: ${{ secrets.AZURE_RESOURCE_GROUP }} 67 | SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} -------------------------------------------------------------------------------- /.github/workflows/lint.yaml: -------------------------------------------------------------------------------- 1 | name: Lint 2 | on: 3 | pull_request: 4 | push: 5 | branches: [ "main", "release-v*" ] 6 | tags: 7 | - 'v*' 8 | jobs: 9 | lint: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Checkout code 13 | uses: actions/checkout@v4 14 | - name: Install Go 15 | uses: actions/setup-go@v5 16 | with: 17 | go-version: 1.23.x 18 | - name: Analysis 19 | uses: golangci/golangci-lint-action@v6 20 | with: 21 | args: -v 22 | -------------------------------------------------------------------------------- /.github/workflows/nightly-publish.yaml: -------------------------------------------------------------------------------- 1 | name: Nightly 2 | on: 3 | schedule: 4 | - cron: '0 1 * * *' 5 | workflow_dispatch: 6 | jobs: 7 | publish_nightly: 8 | uses: rancher-sandbox/highlander-reusable-workflows/.github/workflows/operator-with-latest-rancher-build.yaml@main 9 | with: 10 | operator_name: aks-operator 11 | operator_commit: ${{ github.sha }} 12 | 13 | publish_images: 14 | permissions: 15 | packages: write # Required for pushing images to ghcr.io 16 | runs-on: ubuntu-latest 17 | steps: 18 | - name: Checkout code 19 | uses: actions/checkout@v4 20 | - name: Login to GHCR registry 21 | uses: docker/login-action@v3 22 | with: 23 | registry: ghcr.io 24 | username: ${{ github.actor }} 25 | password: ${{ secrets.GITHUB_TOKEN }} 26 | - name: Setup Docker Buildx 27 | uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0 28 | - name: Set image tag 29 | run: echo "TAG=v0.0.0-$(date +'%Y%m%d')" >> "$GITHUB_ENV" 30 | - name: Build and push image 31 | env: 32 | REPO: ghcr.io/rancher 33 | run: | 34 | make image-push 35 | 36 | publish_charts: 37 | permissions: 38 | packages: write # Required for pushing charts to ghcr.io 39 | runs-on: ubuntu-latest 40 | needs: publish_images 41 | steps: 42 | - name: Checkout code 43 | uses: actions/checkout@v4 44 | - name: Login to GHCR registry 45 | uses: docker/login-action@v3 46 | with: 47 | registry: ghcr.io 48 | username: ${{ github.actor }} 49 | password: ${{ secrets.GITHUB_TOKEN }} 50 | - name: Install Helm 51 | uses: azure/setup-helm@v4 52 | with: 53 | version: 3.8.0 54 | - name: Set image tag and chart version 55 | run: | 56 | echo "TAG=v0.0.0-$(date +'%Y%m%d')" >> "$GITHUB_ENV" 57 | echo "CHART_VERSION=$(date +'%Y%m%d')" >> "$GITHUB_ENV" 58 | - name: Build charts 59 | env: 60 | REPO: ghcr.io/rancher # used in the Helm chart values.yaml 61 | run: | 62 | make charts 63 | - name: Push charts 64 | run: | 65 | helm push bin/rancher-aks-operator-$CHART_VERSION.tgz oci://ghcr.io/${{ github.repository_owner }}/rancher-aks-operator-chart 66 | helm push bin/rancher-aks-operator-crd-$CHART_VERSION.tgz oci://ghcr.io/${{ github.repository_owner }}/rancher-aks-operator-crd-chart -------------------------------------------------------------------------------- /.github/workflows/release.yaml: -------------------------------------------------------------------------------- 1 | name: Release 2 | 3 | on: 4 | push: 5 | tags: 6 | - 'v*' 7 | 8 | # GitHub settings / example values: 9 | # 10 | # org level vars: 11 | # - PUBLIC_REGISTRY: docker.io 12 | # repo level vars: 13 | # - PUBLIC_REGISTRY_REPO: rancher 14 | # repo level secrets: 15 | # - PUBLIC_REGISTRY_USERNAME 16 | # - PUBLIC_REGISTRY_PASSWORD 17 | 18 | jobs: 19 | publish-images: 20 | permissions: 21 | contents: read 22 | id-token: write # required for reading vault secrets and for cosign's use in ecm-distro-tools/publish-image 23 | strategy: 24 | matrix: 25 | include: 26 | # Three images are created: 27 | # - Multi-arch manifest for both amd64 and arm64 28 | - tag-suffix: "" 29 | platforms: linux/amd64,linux/arm64 30 | # - arm64 manifest 31 | - tag-suffix: "-arm64" 32 | platforms: linux/arm64 33 | # - amd64 manifest 34 | - tag-suffix: "-amd64" 35 | platforms: linux/amd64 36 | runs-on: ubuntu-latest 37 | steps: 38 | - name: Checkout code 39 | uses: actions/checkout@v4 40 | with: 41 | fetch-depth: 0 42 | ref: ${{ github.ref_name}} 43 | - name: Read secrets 44 | uses: rancher-eio/read-vault-secrets@main 45 | with: 46 | secrets: | 47 | secret/data/github/repo/${{ github.repository }}/dockerhub/${{ github.repository_owner }}/credentials username | PUBLIC_REGISTRY_USERNAME ; 48 | secret/data/github/repo/${{ github.repository }}/dockerhub/${{ github.repository_owner }}/credentials password | PUBLIC_REGISTRY_PASSWORD ; 49 | secret/data/github/repo/${{ github.repository }}/rancher-prime-registry/credentials registry | PRIME_REGISTRY ; 50 | secret/data/github/repo/${{ github.repository }}/rancher-prime-registry/credentials username | PRIME_REGISTRY_USERNAME ; 51 | secret/data/github/repo/${{ github.repository }}/rancher-prime-registry/credentials password | PRIME_REGISTRY_PASSWORD ; 52 | - name: Publish images 53 | uses: rancher/ecm-distro-tools/actions/publish-image@master 54 | with: 55 | image: aks-operator 56 | tag: ${{ github.ref_name }}${{ matrix.tag-suffix }} 57 | platforms: ${{ matrix.platforms }} 58 | public-registry: docker.io 59 | public-repo: rancher 60 | public-username: ${{ env.PUBLIC_REGISTRY_USERNAME }} 61 | public-password: ${{ env.PUBLIC_REGISTRY_PASSWORD }} 62 | prime-registry: ${{ env.PRIME_REGISTRY }} 63 | prime-repo: rancher 64 | prime-username: ${{ env.PRIME_REGISTRY_USERNAME }} 65 | prime-password: ${{ env.PRIME_REGISTRY_PASSWORD }} 66 | make-target: image-push 67 | - name: Cleanup checksum files # in order to avoid goreleaser dirty state error, remove once rancher/ecm-distro-tools/actions/publish-image@main gets updated 68 | run: rm -f slsactl_*_checksums.txt* 69 | 70 | release: 71 | permissions: 72 | contents: write # required for creating GH release 73 | runs-on: ubuntu-latest 74 | needs: publish-images 75 | steps: 76 | - name: Checkout code 77 | uses: actions/checkout@v4 78 | with: 79 | fetch-depth: 0 80 | ref: ${{ github.ref_name}} 81 | - name: Create release 82 | env: 83 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # required for creating GH release 84 | GORELEASER_CURRENT_TAG: ${{ github.ref_name }} # specify the tag to be released 85 | id: goreleaser 86 | uses: goreleaser/goreleaser-action@v6 87 | with: 88 | distribution: goreleaser 89 | version: "~> v2" 90 | args: release --clean --verbose 91 | - name: Upload charts to release 92 | env: 93 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # required for updating GH release 94 | REPO: rancher # First name component for Docker repository to reference in `values.yaml` of the Helm chart release, this is expected to be `rancher`, image name is appended to this value 95 | TAG: ${{ github.ref_name }} # image tag to be referenced in `values.yaml` of the Helm chart release 96 | run: | 97 | version=$(echo '${{ steps.goreleaser.outputs.metadata }}' | jq -r '.version') 98 | echo "Publishing helm charts (version: $version)" 99 | 100 | # Both version and appVersion are set to the same value in the Chart.yaml (excluding the 'v' prefix) 101 | CHART_VERSION=$version GIT_TAG=$version make charts 102 | 103 | for f in $(find bin/ -name '*.tgz'); do 104 | echo "Uploading $f to GitHub release $TAG" 105 | gh release upload $TAG $f 106 | done 107 | echo "Charts successfully uploaded to GitHub release $TAG" 108 | 109 | dispatch-dependency: 110 | permissions: 111 | contents: read 112 | id-token: write 113 | actions: write 114 | runs-on: ubuntu-latest 115 | timeout-minutes: 10 116 | needs: publish-images 117 | if: github.event_name == 'push' && github.ref_type == 'tag' 118 | steps: 119 | - name: Read App Secrets 120 | uses: rancher-eio/read-vault-secrets@main 121 | with: 122 | secrets: | 123 | secret/data/github/repo/${{ github.repository }}/github/workflow-dispatcher/app-credentials appId | APP_ID ; 124 | secret/data/github/repo/${{ github.repository }}/github/workflow-dispatcher/app-credentials privateKey | PRIVATE_KEY 125 | 126 | - name: Create App Token 127 | uses: actions/create-github-app-token@v1 128 | id: app-token 129 | with: 130 | app-id: ${{ env.APP_ID }} 131 | private-key: ${{ env.PRIVATE_KEY }} 132 | owner: ${{ github.repository_owner }} 133 | 134 | - name: Run dispatch 135 | env: 136 | GH_TOKEN: ${{ steps.app-token.outputs.token }} 137 | run: | 138 | case ${{ github.ref_name }} in 139 | "v1.12"*) 140 | ACTION_TARGET_BRANCH="main" 141 | ;; 142 | "v1.11"*) 143 | ACTION_TARGET_BRANCH="release/v2.11" 144 | ;; 145 | "v1.10"*) 146 | ACTION_TARGET_BRANCH="release/v2.10" 147 | ;; 148 | "v1.9"*) 149 | ACTION_TARGET_BRANCH="release/v2.9" 150 | ;; 151 | "v1.3"*) 152 | ACTION_TARGET_BRANCH="release/v2.8" 153 | ;; 154 | *) 155 | echo "Not a valid tag, not dispatching event" 156 | exit 0 157 | esac 158 | echo "Running Go get on $ACTION_TARGET_BRANCH" 159 | gh workflow run "Go get" --repo rancher/rancher --ref $ACTION_TARGET_BRANCH -F goget_module=github.com/rancher/aks-operator -F goget_version=${{ github.ref_name }} -F source_author=${{ github.actor }} 160 | -------------------------------------------------------------------------------- /.github/workflows/scan.yaml: -------------------------------------------------------------------------------- 1 | name: Scan 2 | on: 3 | workflow_dispatch: 4 | pull_request: 5 | branches: 6 | - main 7 | push: 8 | branches: 9 | - main 10 | tags: 11 | - "v*" 12 | jobs: 13 | scan: 14 | runs-on: ubuntu-latest 15 | steps: 16 | - name: Checkout code 17 | uses: actions/checkout@v4 18 | with: 19 | fetch-depth: 0 20 | - name: Set up Docker Buildx 21 | id: buildx 22 | uses: docker/setup-buildx-action@v3.10.0 23 | - name: Build image 24 | uses: docker/build-push-action@v6.16.0 25 | with: 26 | context: . 27 | tags: ghcr.io/rancher/aks-operator:${{ github.sha }} 28 | load: true 29 | push: false 30 | file: package/Dockerfile 31 | build-args: | 32 | TAG=${{ github.sha }} 33 | REPO=ghcr.io/rancher/aks-operator 34 | COMMIT=${{ github.sha }} 35 | - name: Run Trivy vulnerability scanner 36 | uses: aquasecurity/trivy-action@0.30.0 37 | env: 38 | TRIVY_DB_REPOSITORY: ghcr.io/aquasecurity/trivy-db,public.ecr.aws/aquasecurity/trivy-db 39 | TRIVY_JAVA_DB_REPOSITORY: ghcr.io/aquasecurity/trivy-java-db,public.ecr.aws/aquasecurity/trivy-java-db 40 | with: 41 | image-ref: "ghcr.io/rancher/aks-operator:${{ github.sha }}" 42 | format: "table" 43 | exit-code: "1" 44 | ignore-unfixed: true 45 | severity: "CRITICAL,HIGH" 46 | -------------------------------------------------------------------------------- /.github/workflows/unit.yaml: -------------------------------------------------------------------------------- 1 | name: Unit tests 2 | on: 3 | pull_request: 4 | push: 5 | branches: [ "main", "release-v*" ] 6 | jobs: 7 | test: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/checkout@v4 11 | - name: Install Go 12 | uses: actions/setup-go@v5 13 | with: 14 | go-version: 1.23.x 15 | - name: Run tests 16 | run: | 17 | make test 18 | -------------------------------------------------------------------------------- /.github/workflows/verify.yaml: -------------------------------------------------------------------------------- 1 | name: Verify 2 | on: 3 | pull_request: 4 | push: 5 | branches: [ "main", "release-v*" ] 6 | jobs: 7 | test: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/checkout@v4 11 | - name: Install Go 12 | uses: actions/setup-go@v5 13 | with: 14 | go-version: 1.23.x 15 | - name: Run make verify 16 | run: | 17 | make verify 18 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea/ 2 | .vscode/ 3 | bin/ 4 | dist/ 5 | vendor/ 6 | .dapper 7 | .DS_Store 8 | _artifacts/ 9 | -------------------------------------------------------------------------------- /.golangci.yml: -------------------------------------------------------------------------------- 1 | run: 2 | timeout: 5m 3 | go: "1.23" 4 | tests: false 5 | allow-parallel-runners: true 6 | 7 | output: 8 | formats: 9 | - format: github-actions 10 | 11 | linters: 12 | disable-all: true 13 | enable: 14 | - dupl # check duplicated code 15 | - goconst # check strings that can turn into constants 16 | - gofmt # check fmt 17 | - goimports # check imports 18 | - gosec # check for security problems 19 | - govet # check vet 20 | - importas # check consistent import aliasing 21 | - ineffassign # check ineffectual assignments 22 | - misspell # check for misspelled English words 23 | - nakedret # check naked returns in functions 24 | - prealloc # check preallocated slice declarations 25 | - revive # replacement for golint 26 | - unconvert # check redundant type conversions 27 | - whitespace # check for trailing whitespace and tabs 28 | linters-settings: 29 | revive: 30 | rules: 31 | # The following rules are recommended https://github.com/mgechev/revive#recommended-configuration 32 | - name: blank-imports 33 | - name: context-as-argument 34 | - name: context-keys-type 35 | - name: dot-imports 36 | - name: error-return 37 | - name: error-strings 38 | - name: error-naming 39 | - name: exported 40 | - name: increment-decrement 41 | - name: var-naming 42 | - name: var-declaration 43 | - name: package-comments 44 | - name: range 45 | - name: receiver-naming 46 | - name: time-naming 47 | - name: indent-error-flow 48 | - name: errorf 49 | - name: empty-block 50 | - name: superfluous-else 51 | - name: unused-parameter 52 | - name: unreachable-code 53 | - name: redefines-builtin-id 54 | importas: 55 | no-unaliased: true 56 | alias: 57 | # Kubernetes 58 | - pkg: k8s.io/api/core/v1 59 | alias: corev1 60 | - pkg: k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 61 | alias: apiextensionsv1 62 | - pkg: k8s.io/api/apps/v1 63 | alias: appsv1 64 | - pkg: k8s.io/apimachinery/pkg/apis/meta/v1 65 | alias: metav1 66 | - pkg: k8s.io/apimachinery/pkg/util/runtime 67 | alias: utilruntime 68 | - pkg: sigs.k8s.io/controller-runtime/pkg/client 69 | alias: runtimeclient 70 | - pkg: k8s.io/apimachinery/pkg/api/errors 71 | alias: apierrors 72 | - pkg: k8s.io/apimachinery/pkg/util/errors 73 | alias: kerrors 74 | - pkg: k8s.io/client-go/kubernetes/scheme 75 | alias: clientgoscheme 76 | # Rancher AKS operator 77 | - pkg: github.com/rancher/aks-operator/pkg/apis/aks.cattle.io/v1 78 | alias: aksv1 79 | - pkg: github.com/rancher/aks-operator/pkg/generated/controllers/aks.cattle.io/v1 80 | alias: akscontrollers 81 | # Core Rancher 82 | - pkg: github.com/rancher/rancher/pkg/apis/management.cattle.io/v3 83 | alias: managementv3 84 | issues: 85 | exclude-rules: 86 | - linters: 87 | - revive 88 | text: "var-naming: don't use an underscore in package name" 89 | path: 'mock(\w+)/doc.go$' 90 | exclude-dirs: 91 | - pkg/generated 92 | exclude-files: 93 | - "zz_generated_*" 94 | -------------------------------------------------------------------------------- /.goreleaser.yaml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | project_name: aks-operator 4 | 5 | before: 6 | hooks: 7 | - go mod tidy 8 | 9 | builds: 10 | - env: 11 | - CGO_ENABLED=0 12 | goos: 13 | - linux 14 | goarch: 15 | - amd64 16 | - arm64 17 | binary: aks-operator 18 | 19 | release: 20 | prerelease: auto 21 | 22 | changelog: 23 | sort: asc 24 | filters: 25 | exclude: 26 | - "^docs:" 27 | - "^test:" -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | # These owners will be the default owners for everything in 2 | # the repo. Unless a later match takes precedence. 3 | 4 | * @rancher/highlander @rancher/infracloud-team 5 | -------------------------------------------------------------------------------- /Dockerfile.dapper: -------------------------------------------------------------------------------- 1 | FROM registry.suse.com/bci/bci-base:15.6 2 | 3 | ARG DAPPER_HOST_ARCH 4 | ENV ARCH=${DAPPER_HOST_ARCH} 5 | 6 | RUN zypper -n update && \ 7 | zypper -n install bash git binutils glibc-devel-static gcc vim less file tar gzip curl sed wget ca-certificates 8 | 9 | ENV GOLANG_ARCH_amd64=amd64 GOLANG_ARCH_arm=armv6l GOLANG_ARCH_arm64=arm64 GOLANG_ARCH=GOLANG_ARCH_${ARCH} \ 10 | GOPATH=/go CGO_ENABLED=0 PATH=/go/bin:/usr/local/go/bin:${PATH} SHELL=/bin/bash 11 | RUN curl -sLf https://go.dev/dl/go1.23.6.linux-${ARCH}.tar.gz | tar -xzf - -C /usr/local/ 12 | # workaround for https://bugzilla.suse.com/show_bug.cgi?id=1183043 13 | RUN if [ "${ARCH}" == "arm64" ]; then \ 14 | zypper -n install binutils-gold ; \ 15 | fi 16 | 17 | RUN if [ "${ARCH}" = "amd64" ]; then \ 18 | curl -sL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s v1.64.5; \ 19 | fi 20 | RUN curl -sL https://get.helm.sh/helm-v3.3.0-linux-${ARCH}.tar.gz | tar xvzf - -C /usr/local/bin --strip-components=1 21 | 22 | ENV DAPPER_ENV REPO TAG DRONE_TAG CROSS 23 | ENV DAPPER_SOURCE /go/src/github.com/rancher/aks-operator/ 24 | ENV DAPPER_OUTPUT ./bin ./dist 25 | ENV DAPPER_DOCKER_SOCKET true 26 | ENV DAPPER_RUN_ARGS "-v aks-operator-pkg:/go/pkg -v aks-operator-cache:/root/.cache" 27 | ENV HOME ${DAPPER_SOURCE} 28 | WORKDIR ${DAPPER_SOURCE} 29 | 30 | ENTRYPOINT ["./scripts/entry"] 31 | CMD ["ci"] 32 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | TARGETS := $(shell ls scripts) 2 | GIT_BRANCH?=$(shell git branch --show-current) 3 | GIT_COMMIT?=$(shell git rev-parse HEAD) 4 | GIT_COMMIT_SHORT?=$(shell git rev-parse --short HEAD) 5 | GIT_TAG?=v0.0.0 6 | ifneq ($(GIT_BRANCH), main) 7 | GIT_TAG?=$(shell git describe --abbrev=0 --tags 2>/dev/null || echo "v0.0.0" ) 8 | endif 9 | TAG?=${GIT_TAG}-${GIT_COMMIT_SHORT} 10 | OPERATOR_CHART?=$(shell find $(ROOT_DIR) -type f -name "rancher-aks-operator-[0-9]*.tgz" -print) 11 | CRD_CHART?=$(shell find $(ROOT_DIR) -type f -name "rancher-aks-operator-crd*.tgz" -print) 12 | CHART_VERSION?=900 # Only used in e2e to avoid downgrades from rancher 13 | REPO?=docker.io/rancher 14 | IMAGE = $(REPO)/aks-operator:$(TAG) 15 | MACHINE := rancher 16 | # Define the target platforms that can be used across the ecosystem. 17 | # Note that what would actually be used for a given project will be 18 | # defined in TARGET_PLATFORMS, and must be a subset of the below: 19 | DEFAULT_PLATFORMS := linux/amd64,linux/arm64,darwin/arm64,darwin/amd64 20 | TARGET_PLATFORMS := linux/amd64,linux/arm64 21 | BUILDX_ARGS ?= --sbom=true --attest type=provenance,mode=max 22 | 23 | CLUSTER_NAME?="aks-operator-e2e" 24 | E2E_CONF_FILE ?= $(ROOT_DIR)/test/e2e/config/config.yaml 25 | 26 | ROOT_DIR:=$(shell dirname $(realpath $(firstword $(MAKEFILE_LIST)))) 27 | BIN_DIR := $(abspath $(ROOT_DIR)/bin) 28 | GO_INSTALL = ./scripts/go_install.sh 29 | 30 | MOCKGEN_VER := v0.4.0 31 | MOCKGEN_BIN := mockgen 32 | MOCKGEN := $(BIN_DIR)/$(MOCKGEN_BIN)-$(MOCKGEN_VER) 33 | 34 | GINKGO_VER := v2.22.2 35 | GINKGO_BIN := ginkgo 36 | GINKGO := $(BIN_DIR)/$(GINKGO_BIN)-$(GINKGO_VER) 37 | 38 | GO_APIDIFF_VER := v0.8.2 39 | GO_APIDIFF_BIN := go-apidiff 40 | GO_APIDIFF := $(BIN_DIR)/$(GO_APIDIFF_BIN)-$(GO_APIDIFF_VER) 41 | 42 | SETUP_ENVTEST_VER := v0.0.0-20211110210527-619e6b92dab9 43 | SETUP_ENVTEST_BIN := setup-envtest 44 | SETUP_ENVTEST := $(BIN_DIR)/$(SETUP_ENVTEST_BIN)-$(SETUP_ENVTEST_VER) 45 | 46 | ifeq ($(shell go env GOOS),darwin) # Use the darwin/amd64 binary until an arm64 version is available 47 | KUBEBUILDER_ASSETS ?= $(shell $(SETUP_ENVTEST) use --use-env -p path --arch amd64 $(KUBEBUILDER_ENVTEST_KUBERNETES_VERSION)) 48 | else 49 | KUBEBUILDER_ASSETS ?= $(shell $(SETUP_ENVTEST) use --use-env -p path $(KUBEBUILDER_ENVTEST_KUBERNETES_VERSION)) 50 | endif 51 | 52 | default: operator 53 | 54 | .dapper: 55 | @echo Downloading dapper 56 | @curl -sL https://releases.rancher.com/dapper/latest/dapper-`uname -s`-`uname -m` > .dapper.tmp 57 | @@chmod +x .dapper.tmp 58 | @./.dapper.tmp -v 59 | @mv .dapper.tmp .dapper 60 | 61 | .PHONY: generate-crd 62 | generate-crd: $(MOCKGEN) 63 | go generate main.go 64 | 65 | .PHONY: generate 66 | generate: 67 | $(MAKE) generate-go 68 | $(MAKE) generate-crd 69 | 70 | .PHONY: $(TARGETS) 71 | $(TARGETS): .dapper 72 | ./.dapper $@ 73 | 74 | $(MOCKGEN): 75 | GOBIN=$(BIN_DIR) $(GO_INSTALL) go.uber.org/mock/mockgen $(MOCKGEN_BIN) $(MOCKGEN_VER) 76 | 77 | $(GINKGO): 78 | GOBIN=$(BIN_DIR) $(GO_INSTALL) github.com/onsi/ginkgo/v2/ginkgo $(GINKGO_BIN) $(GINKGO_VER) 79 | 80 | $(GO_APIDIFF): 81 | GOBIN=$(BIN_DIR) $(GO_INSTALL) github.com/joelanford/go-apidiff $(GO_APIDIFF_BIN) $(GO_APIDIFF_VER) 82 | 83 | $(SETUP_ENVTEST): 84 | GOBIN=$(BIN_DIR) $(GO_INSTALL) sigs.k8s.io/controller-runtime/tools/setup-envtest $(SETUP_ENVTEST_BIN) $(SETUP_ENVTEST_VER) 85 | 86 | .PHONY: operator 87 | operator: 88 | CGO_ENABLED=0 go build -ldflags \ 89 | "-X github.com/rancher/aks-operator/pkg/version.GitCommit=$(GIT_COMMIT) \ 90 | -X github.com/rancher/aks-operator/pkg/version.Version=$(TAG)" \ 91 | -o bin/aks-operator . 92 | 93 | .PHONY: generate-go 94 | generate-go: $(MOCKGEN) 95 | go generate ./pkg/aks/... 96 | 97 | .PHONY: test 98 | test: $(SETUP_ENVTEST) $(GINKGO) 99 | KUBEBUILDER_ASSETS="$(KUBEBUILDER_ASSETS)" $(GINKGO) -v -r -p --trace ./pkg/... ./controller/... 100 | 101 | .PHONY: clean 102 | clean: 103 | rm -rf build bin dist 104 | 105 | ALL_VERIFY_CHECKS = generate 106 | 107 | .PHONY: verify 108 | verify: $(addprefix verify-,$(ALL_VERIFY_CHECKS)) 109 | 110 | .PHONY: verify-generate 111 | verify-generate: generate 112 | @if !(git diff --quiet HEAD); then \ 113 | git diff; \ 114 | echo "generated files are out of date, run make generate"; exit 1; \ 115 | fi 116 | 117 | .PHONY: operator-chart 118 | operator-chart: 119 | mkdir -p $(BIN_DIR) 120 | cp -rf $(ROOT_DIR)/charts/aks-operator $(BIN_DIR)/chart 121 | sed -i -e 's/tag:.*/tag: '${TAG}'/' $(BIN_DIR)/chart/values.yaml 122 | sed -i -e 's|repository:.*|repository: '${REPO}/aks-operator'|' $(BIN_DIR)/chart/values.yaml 123 | helm package --version ${CHART_VERSION} --app-version ${GIT_TAG} -d $(BIN_DIR)/ $(BIN_DIR)/chart 124 | rm -Rf $(BIN_DIR)/chart 125 | 126 | .PHONY: crd-chart 127 | crd-chart: 128 | mkdir -p $(BIN_DIR) 129 | helm package --version ${CHART_VERSION} --app-version ${GIT_TAG} -d $(BIN_DIR)/ $(ROOT_DIR)/charts/aks-operator-crd 130 | rm -Rf $(BIN_DIR)/chart 131 | 132 | .PHONY: charts 133 | charts: 134 | $(MAKE) operator-chart 135 | $(MAKE) crd-chart 136 | 137 | .PHONY: buildx-machine 138 | buildx-machine: ## create rancher dockerbuildx machine targeting platform defined by DEFAULT_PLATFORMS 139 | @docker buildx ls | grep $(MACHINE) || \ 140 | docker buildx create --name=$(MACHINE) --platform=$(DEFAULT_PLATFORMS) 141 | 142 | .PHONY: image-build 143 | image-build: buildx-machine ## build (and load) the container image targeting the current platform. 144 | docker buildx build -f package/Dockerfile \ 145 | --builder $(MACHINE) --build-arg COMMIT=$(GIT_COMMIT) --build-arg VERSION=$(TAG) \ 146 | -t "$(IMAGE)" $(BUILD_ACTION) . 147 | @echo "Built $(IMAGE)" 148 | 149 | .PHONY: image-push 150 | image-push: buildx-machine ## build the container image targeting all platforms defined by TARGET_PLATFORMS and push to a registry. 151 | docker buildx build -f package/Dockerfile \ 152 | --builder $(MACHINE) $(IID_FILE_FLAG) $(BUILDX_ARGS) --build-arg COMMIT=$(GIT_COMMIT) --build-arg VERSION=$(TAG) \ 153 | --platform=$(TARGET_PLATFORMS) -t "$(IMAGE)" --push . 154 | @echo "Pushed $(IMAGE)" 155 | 156 | .PHONY: setup-kind 157 | setup-kind: 158 | CLUSTER_NAME=$(CLUSTER_NAME) $(ROOT_DIR)/scripts/setup-kind-cluster.sh 159 | 160 | .PHONY: e2e-tests 161 | e2e-tests: $(GINKGO) charts 162 | export EXTERNAL_IP=`kubectl get nodes -o jsonpath='{.items[].status.addresses[?(@.type == "InternalIP")].address}'` && \ 163 | export BRIDGE_IP="172.18.0.1" && \ 164 | export CONFIG_PATH=$(E2E_CONF_FILE) && \ 165 | export OPERATOR_CHART=$(OPERATOR_CHART) && \ 166 | export CRD_CHART=$(CRD_CHART) && \ 167 | cd $(ROOT_DIR)/test && $(GINKGO) $(ONLY_DEPLOY) -r -v ./e2e 168 | 169 | .PHONY: kind-e2e-tests 170 | kind-e2e-tests: docker-build-e2e setup-kind 171 | kind load docker-image --name $(CLUSTER_NAME) ${IMAGE} 172 | $(MAKE) e2e-tests 173 | 174 | kind-deploy-operator: 175 | ONLY_DEPLOY="--label-filter=\"do-nothing\"" $(MAKE) kind-e2e-tests 176 | 177 | .PHONY: docker-build 178 | docker-build-e2e: 179 | DOCKER_BUILDKIT=1 docker build \ 180 | -f test/e2e/Dockerfile.e2e \ 181 | --build-arg "TAG=${GIT_TAG}" \ 182 | --build-arg "COMMIT=${GIT_COMMIT}" \ 183 | --build-arg "COMMITDATE=${COMMITDATE}" \ 184 | -t ${IMAGE} . 185 | 186 | .PHOHY: delete-local-kind-cluster 187 | delete-local-kind-cluster: ## Delete the local kind cluster 188 | kind delete cluster --name=$(CLUSTER_NAME) 189 | 190 | APIDIFF_OLD_COMMIT ?= $(shell git rev-parse origin/main) 191 | 192 | .PHONY: apidiff 193 | apidiff: $(GO_APIDIFF) ## Check for API differences 194 | $(GO_APIDIFF) $(APIDIFF_OLD_COMMIT) --print-compatible 195 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Nightly e2e tests](https://github.com/rancher/aks-operator/actions/workflows/e2e-latest-rancher.yaml/badge.svg?branch=main)](https://github.com/rancher/aks-operator/actions/workflows/e2e-latest-rancher.yaml) 2 | 3 | 4 | # rancher/aks-operator 5 | 6 | AKS operator is a Kubernetes CRD controller that controls cluster provisioning in Azure Kubernetes Service using an AKSClusterConfig defined by a Custom Resource Definition. 7 | 8 | ## Build 9 | 10 | Operator binary can be built using the following command: 11 | 12 | ```bash 13 | make operator 14 | ``` 15 | 16 | ## Deploy operator from source 17 | 18 | You can use the following command to deploy a Kind cluster with Rancher manager and operator: 19 | 20 | ```bash 21 | make kind-deploy-operator 22 | ``` 23 | 24 | After this, you can also downscale operator deployment and run operator from a local binary. 25 | 26 | ## Tests 27 | 28 | Running unit tests can be done using the following command: 29 | 30 | ```bash 31 | make test 32 | ``` 33 | 34 | ### E2E 35 | 36 | We run e2e tests after every merged PR and periodically every 24 hours. They are triggered by a [Github action](.github/workflows/e2e-latest-rancher.yaml) 37 | 38 | For running e2e set the following variables and run: 39 | 40 | ```bash 41 | export AZURE_CLIENT_ID="replace_with_your_client_id" 42 | export AZURE_CLIENT_SECRET="replace_with_client_secret" 43 | export AZURE_SUBSCRIPTION_ID="replace_with_subscription_id" 44 | make kind-e2e-tests 45 | ``` 46 | 47 | A Kind cluster will be created, and the e2e tests will be run against it. 48 | 49 | To delete the local Kind cluster once e2e tests are completed, run: 50 | 51 | ```bash 52 | make delete-local-kind-cluster 53 | ``` 54 | 55 | ## Release 56 | 57 | #### When should I release? 58 | 59 | A KEv2 operator should be released if 60 | 61 | * There have been several commits since the last release, 62 | * You need to pull in an update/bug fix/backend code to unblock UI for a feature enhancement in Rancher 63 | * The operator needs to be unRC for a Rancher release 64 | 65 | #### How do I release? 66 | 67 | Tag the latest commit on the `master` branch. For example, if latest tag is: 68 | * `v1.0.8-rc1` you should tag `v1.0.8-rc2`. 69 | * `v1.0.8` you should tag `v1.0.9-rc1`. 70 | 71 | ```bash 72 | # Get the latest upstream changes 73 | # Note: `upstream` must be the remote pointing to `git@github.com:rancher/aks-operator.git`. 74 | git pull upstream master --tags 75 | 76 | # Export the tag of the release to be cut, e.g.: 77 | export RELEASE_TAG=v1.0.8-rc2 78 | 79 | # Create tags locally 80 | git tag -s -a ${RELEASE_TAG} -m ${RELEASE_TAG} 81 | 82 | # Push tags 83 | # Note: `upstream` must be the remote pointing to `git@github.com:rancher/aks-operator.git`. 84 | git push upstream ${RELEASE_TAG} 85 | ``` 86 | 87 | After pushing the release tag, you need to run 2 Github actions. You can find them in the Actions tab of the repo: 88 | 89 | * `Update AKS operator in rancher/rancher` - This action will update the AKS operator in rancher/rancher repo. It will bump go dependencies. 90 | * `Update AKS Operator in rancher/charts` - This action will update the AKS operator in rancher/charts repo. It will bump the chart version. 91 | 92 | #### How do I unRC? 93 | 94 | UnRC is the process of removing the rc from a KEv2 operator tag and means the released version is stable and ready for use. Release the KEv2 operator but instead of bumping the rc, remove the rc. For example, if the latest release of AKS operator is: 95 | * `v1.0.8-rc1`, release the next version without the rc which would be `v1.0.8`. 96 | * `v1.0.8`, that has no rc so release that version or `v1.0.9` if updates are available. 97 | -------------------------------------------------------------------------------- /charts/aks-operator-crd/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: rancher-aks-operator-crd 3 | description: AKS Operator CustomResourceDefinitions 4 | version: 999 5 | appVersion: 999 6 | annotations: 7 | catalog.cattle.io/certified: rancher 8 | catalog.cattle.io/namespace: cattle-system 9 | catalog.cattle.io/hidden: "true" 10 | catalog.cattle.io/release-name: rancher-aks-operator-crd 11 | catalog.cattle.io/os: linux 12 | catalog.cattle.io/permits-os: linux,windows 13 | 14 | -------------------------------------------------------------------------------- /charts/aks-operator-crd/templates/crds.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | annotations: 5 | helm.sh/resource-policy: keep 6 | name: aksclusterconfigs.aks.cattle.io 7 | spec: 8 | group: aks.cattle.io 9 | names: 10 | kind: AKSClusterConfig 11 | plural: aksclusterconfigs 12 | shortNames: 13 | - akscc 14 | singular: aksclusterconfig 15 | preserveUnknownFields: false 16 | scope: Namespaced 17 | versions: 18 | - name: v1 19 | schema: 20 | openAPIV3Schema: 21 | properties: 22 | spec: 23 | properties: 24 | authBaseUrl: 25 | nullable: true 26 | type: string 27 | authorizedIpRanges: 28 | items: 29 | nullable: true 30 | type: string 31 | nullable: true 32 | type: array 33 | azureCredentialSecret: 34 | nullable: true 35 | type: string 36 | baseUrl: 37 | nullable: true 38 | type: string 39 | clusterName: 40 | nullable: true 41 | type: string 42 | dnsPrefix: 43 | nullable: true 44 | type: string 45 | dnsServiceIp: 46 | nullable: true 47 | type: string 48 | dockerBridgeCidr: 49 | nullable: true 50 | type: string 51 | httpApplicationRouting: 52 | nullable: true 53 | type: boolean 54 | imported: 55 | type: boolean 56 | kubernetesVersion: 57 | nullable: true 58 | type: string 59 | linuxAdminUsername: 60 | nullable: true 61 | type: string 62 | loadBalancerSku: 63 | nullable: true 64 | type: string 65 | logAnalyticsWorkspaceGroup: 66 | nullable: true 67 | type: string 68 | logAnalyticsWorkspaceName: 69 | nullable: true 70 | type: string 71 | managedIdentity: 72 | nullable: true 73 | type: boolean 74 | monitoring: 75 | nullable: true 76 | type: boolean 77 | networkPlugin: 78 | nullable: true 79 | type: string 80 | networkPolicy: 81 | nullable: true 82 | type: string 83 | nodePools: 84 | items: 85 | properties: 86 | availabilityZones: 87 | items: 88 | nullable: true 89 | type: string 90 | nullable: true 91 | type: array 92 | count: 93 | nullable: true 94 | type: integer 95 | enableAutoScaling: 96 | nullable: true 97 | type: boolean 98 | maxCount: 99 | nullable: true 100 | type: integer 101 | maxPods: 102 | nullable: true 103 | type: integer 104 | maxSurge: 105 | nullable: true 106 | type: string 107 | minCount: 108 | nullable: true 109 | type: integer 110 | mode: 111 | nullable: true 112 | type: string 113 | name: 114 | nullable: true 115 | type: string 116 | nodeLabels: 117 | additionalProperties: 118 | nullable: true 119 | type: string 120 | nullable: true 121 | type: object 122 | nodeTaints: 123 | items: 124 | nullable: true 125 | type: string 126 | nullable: true 127 | type: array 128 | orchestratorVersion: 129 | nullable: true 130 | type: string 131 | osDiskSizeGB: 132 | nullable: true 133 | type: integer 134 | osDiskType: 135 | nullable: true 136 | type: string 137 | osType: 138 | nullable: true 139 | type: string 140 | vmSize: 141 | nullable: true 142 | type: string 143 | vnetSubnetID: 144 | nullable: true 145 | type: string 146 | type: object 147 | nullable: true 148 | type: array 149 | nodeResourceGroup: 150 | nullable: true 151 | type: string 152 | outboundType: 153 | nullable: true 154 | type: string 155 | podCidr: 156 | nullable: true 157 | type: string 158 | privateCluster: 159 | nullable: true 160 | type: boolean 161 | privateDnsZone: 162 | nullable: true 163 | type: string 164 | resourceGroup: 165 | nullable: true 166 | type: string 167 | resourceLocation: 168 | nullable: true 169 | type: string 170 | serviceCidr: 171 | nullable: true 172 | type: string 173 | sshPublicKey: 174 | nullable: true 175 | type: string 176 | subnet: 177 | nullable: true 178 | type: string 179 | tags: 180 | additionalProperties: 181 | nullable: true 182 | type: string 183 | nullable: true 184 | type: object 185 | userAssignedIdentity: 186 | nullable: true 187 | type: string 188 | virtualNetwork: 189 | nullable: true 190 | type: string 191 | virtualNetworkResourceGroup: 192 | nullable: true 193 | type: string 194 | type: object 195 | status: 196 | properties: 197 | failureMessage: 198 | nullable: true 199 | type: string 200 | phase: 201 | nullable: true 202 | type: string 203 | rbacEnabled: 204 | nullable: true 205 | type: boolean 206 | type: object 207 | type: object 208 | served: true 209 | storage: true 210 | subresources: 211 | status: {} 212 | -------------------------------------------------------------------------------- /charts/aks-operator/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: rancher-aks-operator 3 | description: A Helm chart for provisioning AKS clusters 4 | home: https://github.com/rancher/aks-operator 5 | sources: 6 | - "https://github.com/rancher/aks-operator" 7 | version: 999 8 | appVersion: 999 9 | annotations: 10 | catalog.cattle.io/auto-install: rancher-aks-operator-crd=match 11 | catalog.cattle.io/certified: rancher 12 | catalog.cattle.io/hidden: "true" 13 | catalog.cattle.io/kube-version: ">= 1.18.0-0" 14 | catalog.cattle.io/namespace: cattle-system 15 | catalog.cattle.io/os: linux 16 | catalog.cattle.io/permits-os: linux,windows 17 | catalog.cattle.io/provides-gvr: aksclusterconfigs.aks.cattle.io/v1 18 | catalog.cattle.io/rancher-version: ">= 2.6.0-alpha" 19 | catalog.cattle.io/release-name: rancher-aks-operator 20 | catalog.cattle.io/scope: management 21 | 22 | -------------------------------------------------------------------------------- /charts/aks-operator/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | You have deployed the Rancher AKS operator 2 | Version: {{ .Chart.AppVersion }} 3 | Description: This operator provisions AKS clusters 4 | from AKSClusterConfig CRs. 5 | -------------------------------------------------------------------------------- /charts/aks-operator/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | 3 | {{- define "system_default_registry" -}} 4 | {{- if .Values.global.cattle.systemDefaultRegistry -}} 5 | {{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} 6 | {{- else -}} 7 | {{- "" -}} 8 | {{- end -}} 9 | {{- end -}} 10 | 11 | {{/* 12 | Windows cluster will add default taint for linux nodes, 13 | add below linux tolerations to workloads could be scheduled to those linux nodes 14 | */}} 15 | {{- define "linux-node-tolerations" -}} 16 | - key: "cattle.io/os" 17 | value: "linux" 18 | effect: "NoSchedule" 19 | operator: "Equal" 20 | {{- end -}} 21 | 22 | {{- define "linux-node-selector" -}} 23 | kubernetes.io/os: linux 24 | {{- end -}} 25 | 26 | -------------------------------------------------------------------------------- /charts/aks-operator/templates/clusterrole.yaml: -------------------------------------------------------------------------------- 1 | kind: ClusterRole 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: aks-operator 5 | namespace: cattle-system 6 | rules: 7 | - apiGroups: [''] 8 | resources: ['secrets'] 9 | verbs: ['get', 'list', 'create', 'watch', 'update'] 10 | - apiGroups: ['aks.cattle.io'] 11 | resources: ['aksclusterconfigs'] 12 | verbs: ['get', 'list', 'update', 'watch'] 13 | - apiGroups: ['aks.cattle.io'] 14 | resources: ['aksclusterconfigs/status'] 15 | verbs: ['update'] 16 | -------------------------------------------------------------------------------- /charts/aks-operator/templates/clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: aks-operator 5 | namespace: cattle-system 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: ClusterRole 9 | name: aks-operator 10 | subjects: 11 | - kind: ServiceAccount 12 | name: aks-operator 13 | namespace: cattle-system 14 | -------------------------------------------------------------------------------- /charts/aks-operator/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: aks-config-operator 5 | namespace: cattle-system 6 | spec: 7 | replicas: 1 8 | selector: 9 | matchLabels: 10 | ke.cattle.io/operator: aks 11 | template: 12 | metadata: 13 | labels: 14 | ke.cattle.io/operator: aks 15 | spec: 16 | nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} 17 | {{- if .Values.nodeSelector }} 18 | {{ toYaml .Values.nodeSelector | indent 8 }} 19 | {{- end }} 20 | tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} 21 | {{- if .Values.tolerations }} 22 | {{ toYaml .Values.tolerations | indent 8 }} 23 | {{- end }} 24 | serviceAccountName: aks-operator 25 | {{- if .Values.priorityClassName }} 26 | priorityClassName: "{{.Values.priorityClassName}}" 27 | {{- end }} 28 | securityContext: 29 | fsGroup: 1007 30 | runAsUser: 1007 31 | containers: 32 | - name: aks-operator 33 | image: '{{ template "system_default_registry" $ }}{{ $.Values.aksOperator.image.repository }}:{{ $.Values.aksOperator.image.tag }}' 34 | imagePullPolicy: IfNotPresent 35 | env: 36 | - name: HTTP_PROXY 37 | value: {{ .Values.httpProxy }} 38 | - name: HTTPS_PROXY 39 | value: {{ .Values.httpsProxy }} 40 | - name: NO_PROXY 41 | value: {{ .Values.noProxy }} 42 | securityContext: 43 | allowPrivilegeEscalation: false 44 | readOnlyRootFilesystem: true 45 | privileged: false 46 | capabilities: 47 | drop: 48 | - ALL 49 | {{- if .Values.additionalTrustedCAs }} 50 | # aks-operator mounts the additional CAs in two places: 51 | volumeMounts: 52 | # This directory is owned by the aks-operator user so c_rehash works here. 53 | - mountPath: /etc/rancher/ssl/ca-additional.pem 54 | name: tls-ca-additional-volume 55 | subPath: ca-additional.pem 56 | readOnly: true 57 | # This directory is root-owned so c_rehash doesn't work here, 58 | # but the cert is here in case update-ca-certificates is called in the future or by the OS. 59 | - mountPath: /etc/pki/trust/anchors/ca-additional.pem 60 | name: tls-ca-additional-volume 61 | subPath: ca-additional.pem 62 | readOnly: true 63 | volumes: 64 | - name: tls-ca-additional-volume 65 | secret: 66 | defaultMode: 0400 67 | secretName: tls-ca-additional 68 | {{- end }} 69 | -------------------------------------------------------------------------------- /charts/aks-operator/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | namespace: cattle-system 5 | name: aks-operator 6 | -------------------------------------------------------------------------------- /charts/aks-operator/values.yaml: -------------------------------------------------------------------------------- 1 | global: 2 | cattle: 3 | systemDefaultRegistry: "" 4 | 5 | aksOperator: 6 | image: 7 | repository: rancher/aks-operator 8 | tag: v0.0.0 9 | 10 | httpProxy: "" 11 | httpsProxy: "" 12 | noProxy: "" 13 | additionalTrustedCAs: false 14 | 15 | ## Node labels for pod assignment 16 | ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ 17 | ## 18 | nodeSelector: {} 19 | ## List of node taints to tolerate (requires Kubernetes >= 1.6) 20 | tolerations: [] 21 | 22 | ## PriorityClassName assigned to deployment. 23 | priorityClassName: "" 24 | -------------------------------------------------------------------------------- /controller/external.go: -------------------------------------------------------------------------------- 1 | package controller 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "github.com/rancher/aks-operator/pkg/aks" 8 | "github.com/rancher/aks-operator/pkg/aks/services" 9 | aksv1 "github.com/rancher/aks-operator/pkg/apis/aks.cattle.io/v1" 10 | wranglerv1 "github.com/rancher/wrangler/v3/pkg/generated/controllers/core/v1" 11 | "k8s.io/client-go/rest" 12 | ) 13 | 14 | // GetClusterKubeConfig returns a kubeconfig for a given cluster. This function is imported in rancher/rancher and has to stay for compatibility. 15 | func GetClusterKubeConfig(ctx context.Context, secretsCache wranglerv1.SecretCache, secretClient wranglerv1.SecretClient, spec *aksv1.AKSClusterConfigSpec) (restConfig *rest.Config, err error) { 16 | credentials, err := aks.GetSecrets(secretsCache, secretClient, spec) 17 | if err != nil { 18 | return nil, fmt.Errorf("error getting credentials secret: %w", err) 19 | } 20 | 21 | clientSecretCredential, err := aks.NewClientSecretCredential(credentials) 22 | if err != nil { 23 | return nil, fmt.Errorf("error creating client secret credential: %w", err) 24 | } 25 | 26 | clustersClient, err := services.NewManagedClustersClient(credentials.SubscriptionID, clientSecretCredential, credentials.Cloud) 27 | if err != nil { 28 | return nil, fmt.Errorf("error creating managed cluster client: %w", err) 29 | } 30 | 31 | h := Handler{ 32 | azureClients: azureClients{ 33 | clustersClient: clustersClient, 34 | }, 35 | } 36 | return h.getClusterKubeConfig(ctx, spec) 37 | } 38 | 39 | // BuildUpstreamClusterState creates an AKSClusterConfigSpec (spec for the AKS cluster state) from the existing 40 | // cluster configuration. 41 | func BuildUpstreamClusterState(ctx context.Context, secretsCache wranglerv1.SecretCache, secretClient wranglerv1.SecretClient, spec *aksv1.AKSClusterConfigSpec) (*aksv1.AKSClusterConfigSpec, error) { 42 | credentials, err := aks.GetSecrets(secretsCache, secretClient, spec) 43 | if err != nil { 44 | return nil, fmt.Errorf("error getting credentials secret: %w", err) 45 | } 46 | 47 | clientSecretCredential, err := aks.NewClientSecretCredential(credentials) 48 | if err != nil { 49 | return nil, fmt.Errorf("error creating client secret credential: %w", err) 50 | } 51 | 52 | clustersClient, err := services.NewManagedClustersClient(credentials.SubscriptionID, clientSecretCredential, credentials.Cloud) 53 | if err != nil { 54 | return nil, fmt.Errorf("error creating managed cluster client: %w", err) 55 | } 56 | 57 | h := Handler{ 58 | secretsCache: secretsCache, 59 | secrets: secretClient, 60 | azureClients: azureClients{ 61 | clustersClient: clustersClient, 62 | }, 63 | } 64 | return h.buildUpstreamClusterState(ctx, credentials, spec) 65 | } 66 | -------------------------------------------------------------------------------- /controller/suite_test.go: -------------------------------------------------------------------------------- 1 | package controller 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | . "github.com/onsi/ginkgo/v2" 8 | . "github.com/onsi/gomega" 9 | aksv1 "github.com/rancher/aks-operator/pkg/generated/controllers/aks.cattle.io" 10 | "github.com/rancher/aks-operator/pkg/test" 11 | "github.com/rancher/wrangler/v3/pkg/generated/controllers/core" 12 | "k8s.io/client-go/rest" 13 | "sigs.k8s.io/controller-runtime/pkg/client" 14 | "sigs.k8s.io/controller-runtime/pkg/envtest" 15 | logf "sigs.k8s.io/controller-runtime/pkg/log" 16 | "sigs.k8s.io/controller-runtime/pkg/log/zap" 17 | ) 18 | 19 | var ( 20 | testEnv *envtest.Environment 21 | cfg *rest.Config 22 | cl client.Client 23 | coreFactory *core.Factory 24 | aksFactory *aksv1.Factory 25 | 26 | ctx = context.Background() 27 | ) 28 | 29 | func TestAPIs(t *testing.T) { 30 | RegisterFailHandler(Fail) 31 | RunSpecs(t, "AKS Operator Suite") 32 | } 33 | 34 | var _ = BeforeSuite(func() { 35 | logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) 36 | 37 | By("bootstrapping test environment") 38 | var err error 39 | testEnv = &envtest.Environment{} 40 | cfg, cl, err = test.StartEnvTest(testEnv) 41 | Expect(err).NotTo(HaveOccurred()) 42 | Expect(cfg).NotTo(BeNil()) 43 | Expect(cl).NotTo(BeNil()) 44 | 45 | coreFactory, err = core.NewFactoryFromConfig(cfg) 46 | Expect(err).NotTo(HaveOccurred()) 47 | Expect(coreFactory).NotTo(BeNil()) 48 | 49 | aksFactory, err = aksv1.NewFactoryFromConfig(cfg) 50 | Expect(err).NotTo(HaveOccurred()) 51 | Expect(aksFactory).NotTo(BeNil()) 52 | }) 53 | 54 | var _ = AfterSuite(func() { 55 | By("tearing down the test environment") 56 | Expect(test.StopEnvTest(testEnv)).To(Succeed()) 57 | }) 58 | -------------------------------------------------------------------------------- /examples/create-example-udr.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: aks.cattle.io/v1 2 | kind: AKSClusterConfig 3 | metadata: 4 | name: my-cluster 5 | spec: 6 | resourceLocation: "germanywestcentral" 7 | resourceGroup: "my-group" 8 | clusterName: "my-cluster" 9 | baseUrl: "https://management.azure.com/" 10 | authBaseUrl: "https://login.microsoftonline.com" 11 | azureCredentialSecret: "REPLACE_WITH_K8S_SECRETS_NAME" 12 | dnsPrefix: "example-dns" 13 | privateCluster: false 14 | linuxAdminUsername: "rancher-user" 15 | loadBalancerSku: "" 16 | sshPublicKey: "REPLACE_WITH_SSH_PUBLIC_KEY" 17 | kubernetesVersion: "1.19.9" 18 | nodePools: 19 | - name: "masters" 20 | count: 1 21 | vmSize: "Standard_DS2_v2" 22 | osDiskSizeGB: 128 23 | osDiskType: "Managed" 24 | maxPods: 110 25 | mode: "System" 26 | osType: "Linux" 27 | - name: "workers" 28 | orchestratorVersion: "1.19.9" 29 | count: 6 30 | vmSize: "Standard_DS2_v2" 31 | osDiskSizeGB: 128 32 | osDiskType: "Managed" 33 | maxPods: 110 34 | mode: "User" 35 | osType: "Linux" 36 | enableAutoScaling: true 37 | minCount: 1 38 | maxCount: 6 39 | availabilityZones: [ "1", "2", "3" ] 40 | outboundType: "userDefinedRouting" 41 | -------------------------------------------------------------------------------- /examples/create-example.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: aks.cattle.io/v1 2 | kind: AKSClusterConfig 3 | metadata: 4 | name: my-cluster 5 | spec: 6 | resourceLocation: "germanywestcentral" 7 | resourceGroup: "my-group" 8 | clusterName: "my-cluster" 9 | baseUrl: "https://management.azure.com/" 10 | authBaseUrl: "https://login.microsoftonline.com" 11 | azureCredentialSecret: "REPLACE_WITH_K8S_SECRETS_NAME" 12 | dnsPrefix: "example-dns" 13 | privateCluster: false 14 | linuxAdminUsername: "rancher-user" 15 | loadBalancerSku: "standard" 16 | sshPublicKey: "REPLACE_WITH_SSH_PUBLIC_KEY" 17 | kubernetesVersion: "1.19.9" 18 | nodePools: 19 | - name: "masters" 20 | count: 1 21 | vmSize: "Standard_DS2_v2" 22 | osDiskSizeGB: 128 23 | osDiskType: "Managed" 24 | maxPods: 110 25 | mode: "System" 26 | osType: "Linux" 27 | - name: "workers" 28 | orchestratorVersion: "1.19.9" 29 | count: 6 30 | vmSize: "Standard_DS2_v2" 31 | osDiskSizeGB: 128 32 | osDiskType: "Managed" 33 | maxPods: 110 34 | mode: "User" 35 | osType: "Linux" 36 | enableAutoScaling: true 37 | minCount: 1 38 | maxCount: 6 39 | availabilityZones: [ "1", "2", "3" ] 40 | outboundType: "loadBalancer" 41 | -------------------------------------------------------------------------------- /examples/import-example.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: aks.cattle.io/v1 2 | kind: AKSClusterConfig 3 | metadata: 4 | name: my-import-cluster 5 | spec: 6 | clusterName: my-import-cluster 7 | azureCredentialSecret: "REPLACE_WITH_K8S_SECRETS_NAME" 8 | resourceGroup: "my-group" 9 | resourceLocation: "westus" 10 | imported: true 11 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/rancher/aks-operator 2 | 3 | go 1.23.0 4 | 5 | toolchain go1.23.6 6 | 7 | replace k8s.io/client-go => k8s.io/client-go v0.32.1 8 | 9 | require ( 10 | github.com/Azure/azure-sdk-for-go v68.0.0+incompatible 11 | github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.0 12 | github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.10.0 13 | github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v5 v5.0.0 14 | github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/operationalinsights/armoperationalinsights v1.2.0 15 | github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources v1.2.0 16 | github.com/Azure/go-autorest/autorest v0.11.30 17 | github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46 18 | github.com/onsi/ginkgo/v2 v2.23.4 19 | github.com/onsi/gomega v1.37.0 20 | github.com/pkg/errors v0.9.1 21 | github.com/rancher-sandbox/ele-testhelpers v0.0.0-20231206161614-20a517410736 22 | github.com/rancher/lasso v0.2.2 23 | github.com/rancher/rancher/pkg/apis v0.0.0-20240821150307-952f563826f5 24 | github.com/rancher/wrangler-api v0.6.1-0.20200427172631-a7c2f09b783e 25 | github.com/rancher/wrangler/v3 v3.2.0-rc.3 26 | github.com/sirupsen/logrus v1.9.3 27 | github.com/stretchr/testify v1.10.0 28 | go.uber.org/mock v0.5.2 29 | golang.org/x/net v0.40.0 30 | k8s.io/api v0.32.1 31 | k8s.io/apiextensions-apiserver v0.32.1 32 | k8s.io/apimachinery v0.32.1 33 | k8s.io/apiserver v0.32.1 34 | k8s.io/client-go v12.0.0+incompatible 35 | sigs.k8s.io/controller-runtime v0.19.4 36 | sigs.k8s.io/yaml v1.4.0 37 | ) 38 | 39 | require ( 40 | github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect 41 | github.com/Azure/go-autorest v14.2.0+incompatible // indirect 42 | github.com/Azure/go-autorest/autorest/adal v0.9.23 // indirect 43 | github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect 44 | github.com/Azure/go-autorest/autorest/to v0.4.1-0.20210111195520-9fc88b15294e // indirect 45 | github.com/Azure/go-autorest/autorest/validation v0.3.2-0.20210111195520-9fc88b15294e // indirect 46 | github.com/Azure/go-autorest/logger v0.2.1 // indirect 47 | github.com/Azure/go-autorest/tracing v0.6.0 // indirect 48 | github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 // indirect 49 | github.com/beorn7/perks v1.0.1 // indirect 50 | github.com/blang/semver/v4 v4.0.0 // indirect 51 | github.com/cespare/xxhash/v2 v2.3.0 // indirect 52 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect 53 | github.com/emicklei/go-restful/v3 v3.12.1 // indirect 54 | github.com/evanphx/json-patch v5.9.11+incompatible // indirect 55 | github.com/evanphx/json-patch/v5 v5.9.0 // indirect 56 | github.com/fxamacker/cbor/v2 v2.7.0 // indirect 57 | github.com/ghodss/yaml v1.0.0 // indirect 58 | github.com/go-logr/logr v1.4.2 // indirect 59 | github.com/go-logr/zapr v1.3.0 // indirect 60 | github.com/go-openapi/jsonpointer v0.21.0 // indirect 61 | github.com/go-openapi/jsonreference v0.21.0 // indirect 62 | github.com/go-openapi/swag v0.23.0 // indirect 63 | github.com/go-task/slim-sprig/v3 v3.0.0 // indirect 64 | github.com/gogo/protobuf v1.3.2 // indirect 65 | github.com/golang-jwt/jwt/v4 v4.5.2 // indirect 66 | github.com/golang-jwt/jwt/v5 v5.2.2 // indirect 67 | github.com/golang/protobuf v1.5.4 // indirect 68 | github.com/google/gnostic-models v0.6.9 // indirect 69 | github.com/google/go-cmp v0.7.0 // indirect 70 | github.com/google/gofuzz v1.2.0 // indirect 71 | github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect 72 | github.com/google/uuid v1.6.0 // indirect 73 | github.com/inconshreveable/mousetrap v1.1.0 // indirect 74 | github.com/josharian/intern v1.0.0 // indirect 75 | github.com/json-iterator/go v1.1.12 // indirect 76 | github.com/klauspost/compress v1.17.9 // indirect 77 | github.com/kylelemons/godebug v1.1.0 // indirect 78 | github.com/mailru/easyjson v0.7.7 // indirect 79 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 80 | github.com/modern-go/reflect2 v1.0.2 // indirect 81 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 82 | github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect 83 | github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect 84 | github.com/prometheus/client_golang v1.20.5 // indirect 85 | github.com/prometheus/client_model v0.6.1 // indirect 86 | github.com/prometheus/common v0.55.0 // indirect 87 | github.com/prometheus/procfs v0.15.1 // indirect 88 | github.com/rancher/eks-operator v1.9.1 // indirect 89 | github.com/rancher/fleet/pkg/apis v0.10.0 // indirect 90 | github.com/rancher/gke-operator v1.9.1 // indirect 91 | github.com/rancher/norman v0.0.0-20240708202514-a0127673d1b9 // indirect 92 | github.com/rancher/rke v1.6.0 // indirect 93 | github.com/rancher/wrangler v1.1.1 // indirect 94 | github.com/spf13/cobra v1.8.1 // indirect 95 | github.com/spf13/pflag v1.0.5 // indirect 96 | github.com/x448/float16 v0.8.4 // indirect 97 | go.opentelemetry.io/otel v1.28.0 // indirect 98 | go.opentelemetry.io/otel/trace v1.28.0 // indirect 99 | go.uber.org/automaxprocs v1.6.0 // indirect 100 | go.uber.org/multierr v1.11.0 // indirect 101 | go.uber.org/zap v1.27.0 // indirect 102 | golang.org/x/crypto v0.38.0 // indirect 103 | golang.org/x/mod v0.24.0 // indirect 104 | golang.org/x/oauth2 v0.23.0 // indirect 105 | golang.org/x/sync v0.14.0 // indirect 106 | golang.org/x/sys v0.33.0 // indirect 107 | golang.org/x/term v0.32.0 // indirect 108 | golang.org/x/text v0.25.0 // indirect 109 | golang.org/x/time v0.7.0 // indirect 110 | golang.org/x/tools v0.31.0 // indirect 111 | google.golang.org/protobuf v1.36.5 // indirect 112 | gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect 113 | gopkg.in/inf.v0 v0.9.1 // indirect 114 | gopkg.in/yaml.v2 v2.4.0 // indirect 115 | gopkg.in/yaml.v3 v3.0.1 // indirect 116 | k8s.io/code-generator v0.32.1 // indirect 117 | k8s.io/component-base v0.32.1 // indirect 118 | k8s.io/gengo v0.0.0-20250130153323-76c5745d3511 // indirect 119 | k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 // indirect 120 | k8s.io/klog/v2 v2.130.1 // indirect 121 | k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect 122 | k8s.io/kubernetes v1.30.10 // indirect 123 | k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect 124 | sigs.k8s.io/cli-utils v0.37.2 // indirect 125 | sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect 126 | sigs.k8s.io/structured-merge-diff/v4 v4.4.3 // indirect 127 | ) 128 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | //go:generate go run pkg/codegen/cleanup/main.go 2 | //go:generate go run pkg/codegen/main.go 3 | 4 | package main 5 | 6 | import ( 7 | "flag" 8 | 9 | "github.com/rancher/aks-operator/controller" 10 | aksv1 "github.com/rancher/aks-operator/pkg/generated/controllers/aks.cattle.io" 11 | "github.com/rancher/aks-operator/pkg/version" 12 | core3 "github.com/rancher/wrangler/v3/pkg/generated/controllers/core" 13 | "github.com/rancher/wrangler/v3/pkg/kubeconfig" 14 | "github.com/rancher/wrangler/v3/pkg/signals" 15 | "github.com/rancher/wrangler/v3/pkg/start" 16 | "github.com/sirupsen/logrus" 17 | ) 18 | 19 | var ( 20 | masterURL string 21 | kubeconfigFile string 22 | debug bool 23 | ) 24 | 25 | func init() { 26 | flag.StringVar(&kubeconfigFile, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.") 27 | flag.StringVar(&masterURL, "master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.") 28 | flag.BoolVar(&debug, "debug", false, "Variable to set log level to debug; default is false") 29 | flag.Parse() 30 | } 31 | 32 | func main() { 33 | // set up signals so we handle the first shutdown signal gracefully 34 | ctx := signals.SetupSignalContext() 35 | 36 | if debug { 37 | logrus.SetLevel(logrus.DebugLevel) 38 | logrus.Debugf("Loglevel set to [%v]", logrus.DebugLevel) 39 | } 40 | logrus.Infof("Starting aks-operator (version: %s, commit: %s)", version.Version, version.GitCommit) 41 | 42 | // This will load the kubeconfig file in a style the same as kubectl 43 | cfg, err := kubeconfig.GetNonInteractiveClientConfig(kubeconfigFile).ClientConfig() 44 | if err != nil { 45 | logrus.Fatalf("Error building kubeconfig: %s", err.Error()) 46 | } 47 | 48 | // core 49 | core, err := core3.NewFactoryFromConfig(cfg) 50 | if err != nil { 51 | logrus.Fatalf("Error building core factory: %s", err.Error()) 52 | } 53 | 54 | // Generated sample controller 55 | aks, err := aksv1.NewFactoryFromConfig(cfg) 56 | if err != nil { 57 | logrus.Fatalf("Error building aks factory: %s", err.Error()) 58 | } 59 | 60 | // The typical pattern is to build all your controller/clients then just pass to each handler 61 | // the bare minimum of what they need. This will eventually help with writing tests. So 62 | // don't pass in something like kubeClient, apps, or sample 63 | controller.Register(ctx, 64 | core.Core().V1().Secret(), 65 | aks.Aks().V1().AKSClusterConfig()) 66 | 67 | // Start all the controllers 68 | if err := start.All(ctx, 2, aks, core); err != nil { 69 | logrus.Fatalf("Error starting: %s", err.Error()) 70 | } 71 | 72 | <-ctx.Done() 73 | } 74 | -------------------------------------------------------------------------------- /package/Dockerfile: -------------------------------------------------------------------------------- 1 | # Image that provides cross compilation tooling. 2 | FROM --platform=$BUILDPLATFORM rancher/mirrored-tonistiigi-xx:1.5.0 AS xx 3 | 4 | FROM registry.suse.com/bci/bci-base:15.6 AS base 5 | RUN sed -i 's/^CREATE_MAIL_SPOOL=yes/CREATE_MAIL_SPOOL=no/' /etc/default/useradd 6 | RUN useradd --uid 1007 aks-operator 7 | 8 | FROM --platform=$BUILDPLATFORM registry.suse.com/bci/golang:1.23 AS builder 9 | 10 | WORKDIR /app 11 | COPY go.mod go.sum ./ 12 | RUN go mod download && go mod verify 13 | 14 | COPY ./controller ./controller 15 | COPY ./pkg ./pkg 16 | COPY ./main.go ./main.go 17 | 18 | # Copy xx scripts to your build stage 19 | COPY --from=xx / / 20 | 21 | ARG TARGETPLATFORM 22 | ARG COMMIT 23 | ARG VERSION 24 | ENV CGO_ENABLED=0 25 | RUN xx-go build -ldflags \ 26 | "-X github.com/rancher/aks-operator/pkg/version.GitCommit=${COMMIT} \ 27 | -X github.com/rancher/aks-operator/pkg/version.Version=${VERSION}" \ 28 | -o /aks-operator && \ 29 | xx-verify /aks-operator 30 | 31 | FROM registry.suse.com/bci/bci-micro:15.6 32 | COPY --from=base /etc/passwd /etc/passwd 33 | COPY --from=base /etc/shadow /etc/shadow 34 | COPY --from=builder /aks-operator /usr/bin/aks-operator 35 | 36 | RUN rm -rf /tmp/* /var/tmp/* /usr/share/doc/packages/* 37 | 38 | ENV KUBECONFIG="/home/aks-operator/.kube/config" 39 | ENV SSL_CERT_DIR="/etc/rancher/ssl" 40 | 41 | COPY package/entrypoint.sh /usr/bin 42 | RUN chmod +x /usr/bin/entrypoint.sh 43 | 44 | RUN mkdir -p /etc/rancher/ssl && \ 45 | chown -R aks-operator /etc/rancher/ssl 46 | 47 | USER 1007 48 | ENTRYPOINT ["entrypoint.sh"] 49 | -------------------------------------------------------------------------------- /package/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | if [ -x "$(command -v c_rehash)" ]; then 5 | # c_rehash is run here instead of update-ca-certificates because the latter requires root privileges 6 | # and the aks-operator container is run as non-root user. 7 | c_rehash 8 | fi 9 | aks-operator -------------------------------------------------------------------------------- /pkg/aks/check.go: -------------------------------------------------------------------------------- 1 | package aks 2 | 3 | import ( 4 | "context" 5 | "crypto/sha256" 6 | "fmt" 7 | 8 | "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" 9 | "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/operationalinsights/armoperationalinsights" 10 | "github.com/rancher/aks-operator/pkg/aks/services" 11 | "github.com/sirupsen/logrus" 12 | ) 13 | 14 | const ( 15 | workspaceLength = 63 16 | workspaceNameLength = 46 17 | ) 18 | 19 | // Please keep in sync with 20 | // https://github.com/Azure/azure-cli/blob/release/src/azure-cli/azure/cli/command_modules/acs/custom.py#L3091 21 | var locationToOmsRegionCodeMap = map[string]string{ 22 | "australiasoutheast": "ASE", 23 | "australiaeast": "EAU", 24 | "australiacentral": "CAU", 25 | "canadacentral": "CCA", 26 | "centralindia": "CIN", 27 | "centralus": "CUS", 28 | "eastasia": "EA", 29 | "eastus": "EUS", 30 | "eastus2": "EUS2", 31 | "eastus2euap": "EAP", 32 | "francecentral": "PAR", 33 | "japaneast": "EJP", 34 | "koreacentral": "SE", 35 | "northeurope": "NEU", 36 | "southcentralus": "SCUS", 37 | "southeastasia": "SEA", 38 | "uksouth": "SUK", 39 | "usgovvirginia": "USGV", 40 | "westcentralus": "EUS", 41 | "westeurope": "WEU", 42 | "westus": "WUS", 43 | "westus2": "WUS2", 44 | "brazilsouth": "CQ", 45 | "brazilsoutheast": "BRSE", 46 | "norwayeast": "NOE", 47 | "southafricanorth": "JNB", 48 | "northcentralus": "NCUS", 49 | "uaenorth": "DXB", 50 | "germanywestcentral": "DEWC", 51 | "ukwest": "WUK", 52 | "switzerlandnorth": "CHN", 53 | "switzerlandwest": "CHW", 54 | "uaecentral": "AUH", 55 | // mapping for azure china cloud 56 | "chinaeast": "EAST2", 57 | "chinaeast2": "EAST2", 58 | "chinanorth": "EAST2", 59 | "chinanorth2": "EAST2", 60 | } 61 | 62 | // Please keep in sync with 63 | // https://github.com/Azure/azure-cli/blob/release/src/azure-cli/azure/cli/command_modules/acs/custom.py#L3126 64 | var regionToOmsRegionMap = map[string]string{ 65 | "australiacentral": "australiacentral", 66 | "australiacentral2": "australiacentral", 67 | "australiaeast": "australiaeast", 68 | "australiasoutheast": "australiasoutheast", 69 | "brazilsouth": "brazilsouth", 70 | "canadacentral": "canadacentral", 71 | "canadaeast": "canadacentral", 72 | "centralus": "centralus", 73 | "centralindia": "centralindia", 74 | "eastasia": "eastasia", 75 | "eastus": "eastus", 76 | "eastus2": "eastus2", 77 | "francecentral": "francecentral", 78 | "francesouth": "francecentral", 79 | "japaneast": "japaneast", 80 | "japanwest": "japaneast", 81 | "koreacentral": "koreacentral", 82 | "koreasouth": "koreacentral", 83 | "northcentralus": "northcentralus", 84 | "northeurope": "northeurope", 85 | "southafricanorth": "southafricanorth", 86 | "southafricawest": "southafricanorth", 87 | "southcentralus": "southcentralus", 88 | "southeastasia": "southeastasia", 89 | "southindia": "centralindia", 90 | "uksouth": "uksouth", 91 | "ukwest": "ukwest", 92 | "westcentralus": "eastus", 93 | "westeurope": "westeurope", 94 | "westindia": "centralindia", 95 | "westus": "westus", 96 | "westus2": "westus2", 97 | "norwayeast": "norwayeast", 98 | "norwaywest": "norwayeast", 99 | "switzerlandnorth": "switzerlandnorth", 100 | "switzerlandwest": "switzerlandwest", 101 | "uaenorth": "uaenorth", 102 | "germanywestcentral": "germanywestcentral", 103 | "germanynorth": "germanywestcentral", 104 | "uaecentral": "uaecentral", 105 | "eastus2euap": "eastus2euap", 106 | "brazilsoutheast": "brazilsoutheast", 107 | // mapping for azure china cloud 108 | "chinaeast": "chinaeast2", 109 | "chinaeast2": "chinaeast2", 110 | "chinanorth": "chinaeast2", 111 | "chinanorth2": "chinaeast2", 112 | } 113 | 114 | func CheckLogAnalyticsWorkspaceForMonitoring(ctx context.Context, client services.WorkplacesClientInterface, 115 | location string, group string, wsg string, wsn string) (workspaceID string, err error) { 116 | workspaceRegion, ok := regionToOmsRegionMap[location] 117 | if !ok { 118 | return "", fmt.Errorf("region %s not supported for Log Analytics workspace", location) 119 | } 120 | 121 | workspaceRegionCode, ok := locationToOmsRegionCodeMap[workspaceRegion] 122 | if !ok { 123 | return "", fmt.Errorf("region %s not supported for Log Analytics workspace", workspaceRegion) 124 | } 125 | 126 | workspaceResourceGroup := wsg 127 | if workspaceResourceGroup == "" { 128 | workspaceResourceGroup = group 129 | } 130 | 131 | workspaceName := wsn 132 | if workspaceName == "" { 133 | workspaceName = fmt.Sprintf("%s-%s", group, workspaceRegionCode) 134 | } 135 | 136 | // workspaceName string length can be only 63 137 | if len(workspaceName) > workspaceLength { 138 | workspaceName = generateUniqueLogWorkspace(workspaceName) 139 | } 140 | 141 | if gotRet, gotErr := client.Get(ctx, workspaceResourceGroup, workspaceName, nil); gotErr == nil { 142 | return *gotRet.ID, nil 143 | } 144 | 145 | logrus.Infof("Create Azure Log Analytics Workspace %q on Resource Group %q", workspaceName, workspaceResourceGroup) 146 | 147 | poller, err := client.BeginCreateOrUpdate(ctx, workspaceResourceGroup, workspaceName, armoperationalinsights.Workspace{ 148 | Location: to.Ptr(workspaceRegion), 149 | Properties: &armoperationalinsights.WorkspaceProperties{ 150 | SKU: &armoperationalinsights.WorkspaceSKU{ 151 | Name: to.Ptr(armoperationalinsights.WorkspaceSKUNameEnumStandalone), 152 | }, 153 | }, 154 | }, nil) 155 | if err != nil { 156 | return "", err 157 | } 158 | 159 | resp, err := poller.PollUntilDone(ctx, nil) 160 | if err != nil { 161 | return "", err 162 | } 163 | workspaceID = *resp.ID 164 | return workspaceID, nil 165 | } 166 | 167 | func generateUniqueLogWorkspace(workspaceName string) string { 168 | if len(workspaceName) < workspaceNameLength { 169 | return workspaceName 170 | } 171 | s := workspaceName[0:workspaceNameLength] 172 | h := sha256.New() 173 | h.Write([]byte(workspaceName)) 174 | hexHash := h.Sum(nil) 175 | shaString := fmt.Sprintf("%x", hexHash) 176 | return fmt.Sprintf("%s-%s", s, shaString[0:16]) 177 | } 178 | 179 | var aksRegionsWithAzSupport = map[string]bool{ 180 | "australiaeast": true, 181 | "brazilsouth": true, 182 | "canadacentral": true, 183 | "centralindia": true, 184 | "chinanorth3": true, 185 | "centralus": true, 186 | "eastasia": true, 187 | "eastus": true, 188 | "eastus2": true, 189 | "eastus2euap": true, 190 | "francecentral": true, 191 | "germanywestcentral": true, 192 | "israelcentral": true, 193 | "italynorth": true, 194 | "japaneast": true, 195 | "koreacentral": true, 196 | "mexicocentral": true, 197 | "newzealandnorth": true, 198 | "northeurope": true, 199 | "norwayeast": true, 200 | "polandcentral": true, 201 | "qatarcentral": true, 202 | "southafricanorth": true, 203 | "southcentralus": true, 204 | "southeastasia": true, 205 | "spaincentral": true, 206 | "swedencentral": true, 207 | "switzerlandnorth": true, 208 | "uaenorth": true, 209 | "uksouth": true, 210 | "westeurope": true, 211 | "westus2": true, 212 | "westus3": true, 213 | } 214 | 215 | func CheckAvailabilityZonesSupport(location string) bool { 216 | return aksRegionsWithAzSupport[location] 217 | } 218 | -------------------------------------------------------------------------------- /pkg/aks/check_test.go: -------------------------------------------------------------------------------- 1 | package aks 2 | 3 | import ( 4 | "errors" 5 | "testing" 6 | 7 | "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" 8 | "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/operationalinsights/armoperationalinsights" 9 | . "github.com/onsi/ginkgo/v2" 10 | . "github.com/onsi/gomega" 11 | "github.com/rancher/aks-operator/pkg/aks/services/mock_services" 12 | "github.com/stretchr/testify/assert" 13 | "go.uber.org/mock/gomock" 14 | ) 15 | 16 | func Test_generateUniqueLogWorkspace(t *testing.T) { 17 | tests := []struct { 18 | name string 19 | workspaceName string 20 | want string 21 | }{ 22 | { 23 | name: "basic test", 24 | workspaceName: "ThisIsAValidInputklasjdfkljasjgireqahtawjsfklakjghrehtuirqhjfhwjkdfhjkawhfdjkhafvjkahg", 25 | want: "ThisIsAValidInputklasjdfkljasjgireqahtawjsfkla-fb8fb22278d8eb98", 26 | }, 27 | } 28 | for _, tt := range tests { 29 | got := generateUniqueLogWorkspace(tt.workspaceName) 30 | assert.Equal(t, tt.want, got) 31 | assert.Len(t, got, 63) 32 | } 33 | } 34 | 35 | var _ = Describe("CheckLogAnalyticsWorkspaceForMonitoring", func() { 36 | var ( 37 | workplacesClientMock *mock_services.MockWorkplacesClientInterface 38 | pollerMock *mock_services.MockPoller[armoperationalinsights.WorkspacesClientCreateOrUpdateResponse] 39 | mockController *gomock.Controller 40 | ) 41 | 42 | BeforeEach(func() { 43 | mockController = gomock.NewController(GinkgoT()) 44 | workplacesClientMock = mock_services.NewMockWorkplacesClientInterface(mockController) 45 | pollerMock = mock_services.NewMockPoller[armoperationalinsights.WorkspacesClientCreateOrUpdateResponse](mockController) 46 | }) 47 | 48 | AfterEach(func() { 49 | mockController.Finish() 50 | }) 51 | 52 | It("should return workspaceID if workspace already exists", func() { 53 | workspaceName := "workspaceName" 54 | workspaceResourceGroup := "resourcegroup" 55 | id := "workspaceID" 56 | workplacesClientMock.EXPECT().Get(ctx, workspaceResourceGroup, workspaceName, nil).Return( 57 | armoperationalinsights.WorkspacesClientGetResponse{ 58 | Workspace: armoperationalinsights.Workspace{ 59 | ID: &id, 60 | }, 61 | }, nil) 62 | workspaceID, err := CheckLogAnalyticsWorkspaceForMonitoring(ctx, 63 | workplacesClientMock, 64 | "eastus", workspaceResourceGroup, "", workspaceName) 65 | Expect(err).NotTo(HaveOccurred()) 66 | Expect(workspaceID).To(Equal(id)) 67 | }) 68 | 69 | It("should create workspace if it doesn't exist", func() { 70 | workspaceName := "workspaceName" 71 | workspaceResourceGroup := "resourcegroup" 72 | id := "workspaceID" 73 | skuName := armoperationalinsights.WorkspaceSKUNameEnumStandalone 74 | workplacesClientMock.EXPECT().Get(ctx, workspaceResourceGroup, workspaceName, nil).Return(armoperationalinsights.WorkspacesClientGetResponse{}, errors.New("not found")) 75 | workplacesClientMock.EXPECT().BeginCreateOrUpdate(ctx, workspaceResourceGroup, workspaceName, 76 | armoperationalinsights.Workspace{ 77 | Location: to.Ptr("eastus"), 78 | Properties: &armoperationalinsights.WorkspaceProperties{ 79 | SKU: &armoperationalinsights.WorkspaceSKU{ 80 | Name: &skuName, 81 | }, 82 | }, 83 | }, 84 | nil, 85 | ).Return(pollerMock, nil) 86 | pollerMock.EXPECT().PollUntilDone(ctx, nil).Return(armoperationalinsights.WorkspacesClientCreateOrUpdateResponse{ 87 | Workspace: armoperationalinsights.Workspace{ 88 | ID: to.Ptr(id), 89 | }, 90 | }, nil) 91 | 92 | workspaceID, err := CheckLogAnalyticsWorkspaceForMonitoring(ctx, 93 | workplacesClientMock, 94 | "eastus", workspaceResourceGroup, "", workspaceName) 95 | 96 | Expect(err).NotTo(HaveOccurred()) 97 | Expect(workspaceID).To(Equal(id)) 98 | }) 99 | 100 | It("should fail if CreateOrUpdate returns error", func() { 101 | workplacesClientMock.EXPECT().Get(ctx, gomock.Any(), gomock.Any(), gomock.Any()).Return(armoperationalinsights.WorkspacesClientGetResponse{}, errors.New("not found")) 102 | workplacesClientMock.EXPECT().BeginCreateOrUpdate(ctx, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(pollerMock, errors.New("error")) 103 | 104 | _, err := CheckLogAnalyticsWorkspaceForMonitoring(ctx, 105 | workplacesClientMock, 106 | "eastus", "workspaceResourceGroup", "", "workspaceName") 107 | 108 | Expect(err).To(HaveOccurred()) 109 | }) 110 | 111 | It("should fail if PollUntilDone returns error", func() { 112 | workplacesClientMock.EXPECT().Get(ctx, gomock.Any(), gomock.Any(), gomock.Any()).Return(armoperationalinsights.WorkspacesClientGetResponse{}, errors.New("not found")) 113 | workplacesClientMock.EXPECT().BeginCreateOrUpdate(ctx, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(pollerMock, nil) 114 | pollerMock.EXPECT().PollUntilDone(ctx, nil).Return(armoperationalinsights.WorkspacesClientCreateOrUpdateResponse{}, errors.New("error")) 115 | 116 | _, err := CheckLogAnalyticsWorkspaceForMonitoring(ctx, 117 | workplacesClientMock, 118 | "eastus", "workspaceResourceGroup", "", "workspaceName") 119 | 120 | Expect(err).To(HaveOccurred()) 121 | }) 122 | }) 123 | 124 | var _ = Describe("CheckAvailabilityZonesSupport", func() { 125 | var ( 126 | locationWithAZ = "eastus" 127 | locationWithoutAZ = "westus" 128 | ) 129 | 130 | It("should success for region with availability zones", func() { 131 | result := CheckAvailabilityZonesSupport(locationWithAZ) 132 | Expect(result).To(BeTrue()) 133 | }) 134 | 135 | It("should fail for region with availability zones", func() { 136 | result := CheckAvailabilityZonesSupport(locationWithoutAZ) 137 | Expect(result).To(BeFalse()) 138 | }) 139 | }) 140 | -------------------------------------------------------------------------------- /pkg/aks/client.go: -------------------------------------------------------------------------------- 1 | package aks 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "net/http" 7 | "regexp" 8 | "time" 9 | 10 | "github.com/Azure/azure-sdk-for-go/sdk/azcore" 11 | "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" 12 | "github.com/Azure/azure-sdk-for-go/sdk/azidentity" 13 | "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-11-01/subscriptions" 14 | "github.com/Azure/go-autorest/autorest/azure" 15 | aksv1 "github.com/rancher/aks-operator/pkg/apis/aks.cattle.io/v1" 16 | "github.com/rancher/aks-operator/pkg/utils" 17 | wranglerv1 "github.com/rancher/wrangler/v3/pkg/generated/controllers/core/v1" 18 | "github.com/sirupsen/logrus" 19 | corev1 "k8s.io/api/core/v1" 20 | apierrors "k8s.io/apimachinery/pkg/api/errors" 21 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 22 | ) 23 | 24 | const ( 25 | tenantIDAnnotation = "cluster.management.cattle.io/azure-tenant-id" 26 | tenantIDTimestampAnnotation = "cluster.management.cattle.io/azure-tenant-id-created-at" 27 | tenantIDTimeout = time.Hour 28 | ) 29 | 30 | const ( 31 | defaultClientPollingDelay = time.Second * 5 32 | findTenantIDTimeout = time.Second * 5 33 | ) 34 | 35 | type Credentials struct { 36 | AuthBaseURL *string 37 | BaseURL *string 38 | SubscriptionID string 39 | TenantID string 40 | ClientID string 41 | ClientSecret string 42 | Cloud cloud.Configuration 43 | } 44 | 45 | func NewClientSecretCredential(cred *Credentials) (*azidentity.ClientSecretCredential, error) { 46 | return azidentity.NewClientSecretCredential(cred.TenantID, cred.ClientID, cred.ClientSecret, &azidentity.ClientSecretCredentialOptions{ 47 | ClientOptions: azcore.ClientOptions{ 48 | Cloud: cred.Cloud, 49 | }, 50 | }) 51 | } 52 | 53 | func GetSecrets(_ wranglerv1.SecretCache, secretClient wranglerv1.SecretClient, spec *aksv1.AKSClusterConfigSpec) (*Credentials, error) { 54 | var cred Credentials 55 | 56 | if spec.AzureCredentialSecret == "" { 57 | return nil, fmt.Errorf("secret name not provided") 58 | } 59 | 60 | ns, id := utils.ParseSecretName(spec.AzureCredentialSecret) 61 | secret, err := secretClient.Get(ns, id, metav1.GetOptions{}) 62 | if err != nil { 63 | return nil, fmt.Errorf("couldn't find secret [%s] in namespace [%s]", id, ns) 64 | } 65 | 66 | tenantIDBytes := secret.Data["azurecredentialConfig-tenantId"] 67 | subscriptionIDBytes := secret.Data["azurecredentialConfig-subscriptionId"] 68 | clientIDBytes := secret.Data["azurecredentialConfig-clientId"] 69 | clientSecretBytes := secret.Data["azurecredentialConfig-clientSecret"] 70 | clientEnvironment := "" 71 | if secret.Data["azurecredentialConfig-environment"] != nil { 72 | clientEnvironment = string(secret.Data["azurecredentialConfig-environment"]) 73 | } 74 | cloud, env := GetEnvironment(clientEnvironment) 75 | 76 | cannotBeNilError := "field [azurecredentialConfig-%s] must be provided in cloud credential" 77 | if subscriptionIDBytes == nil { 78 | return nil, fmt.Errorf(cannotBeNilError, "subscriptionId") 79 | } 80 | if clientIDBytes == nil { 81 | return nil, fmt.Errorf(cannotBeNilError, "clientId") 82 | } 83 | if clientSecretBytes == nil { 84 | return nil, fmt.Errorf(cannotBeNilError, "clientSecret") 85 | } 86 | 87 | cred.TenantID = string(tenantIDBytes) 88 | cred.SubscriptionID = string(subscriptionIDBytes) 89 | cred.ClientID = string(clientIDBytes) 90 | cred.ClientSecret = string(clientSecretBytes) 91 | cred.Cloud = cloud 92 | cred.AuthBaseURL = &env.ActiveDirectoryEndpoint 93 | cred.BaseURL = &env.ResourceManagerEndpoint 94 | 95 | if cred.TenantID == "" { 96 | cred.TenantID, err = GetCachedTenantID(secretClient, cred.SubscriptionID, secret) 97 | if err != nil { 98 | return nil, err 99 | } 100 | } 101 | 102 | return &cred, nil 103 | } 104 | 105 | type secretClient interface { 106 | Update(*corev1.Secret) (*corev1.Secret, error) 107 | } 108 | 109 | func GetCachedTenantID(secretClient secretClient, subscriptionID string, secret *corev1.Secret) (string, error) { 110 | annotations := secret.GetAnnotations() 111 | if annotations == nil { 112 | annotations = map[string]string{} 113 | } 114 | tenantAnno, timestamp := annotations[tenantIDAnnotation], annotations[tenantIDTimestampAnnotation] 115 | if tenantAnno != "" && timestamp != "" { 116 | parsedTime, err := time.Parse(time.RFC3339, timestamp) 117 | if err != nil { 118 | return "", err 119 | } 120 | if parsedTime.Add(tenantIDTimeout).After(time.Now().UTC()) { 121 | return tenantAnno, nil 122 | } 123 | } 124 | ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) 125 | defer cancel() 126 | logrus.Debugf("Retrieving tenant ID from Azure public cloud") 127 | 128 | clientEnvironment := "" 129 | if secret.Data["azurecredentialConfig-environment"] != nil { 130 | clientEnvironment = string(secret.Data["azurecredentialConfig-environment"]) 131 | } 132 | _, env := GetEnvironment(clientEnvironment) 133 | 134 | tenantID, err := FindTenantID(ctx, env, subscriptionID) 135 | if err != nil { 136 | return "", err 137 | } 138 | annotations[tenantIDAnnotation] = tenantID 139 | annotations[tenantIDTimestampAnnotation] = time.Now().UTC().Format(time.RFC3339) 140 | secret.Annotations = annotations 141 | _, err = secretClient.Update(secret) 142 | if apierrors.IsConflict(err) { 143 | // Ignore errors when updating the secret object. If the secret cannot be updated 144 | // (perhaps due to a conflict error), the tenant ID will be re-fetched on the next reconcile loop. 145 | logrus.Debugf("Encountered error while updating secret, ignoring: %v", err) 146 | return tenantID, nil 147 | } 148 | return tenantID, err 149 | } 150 | 151 | func GetEnvironment(env string) (cloud.Configuration, azure.Environment) { 152 | switch env { 153 | case "AzureChinaCloud": 154 | return cloud.AzureChina, azure.ChinaCloud 155 | case "AzureUSGovernmentCloud": 156 | return cloud.AzureGovernment, azure.USGovernmentCloud 157 | default: 158 | return cloud.AzurePublic, azure.PublicCloud 159 | } 160 | } 161 | 162 | // This function is used to create a new SubscriptionsClient with the given base URI. 163 | // It is used to make unauthenticated requests to the Azure Resource Manager endpoint. 164 | func NewSubscriptionsClient(baseURI string) subscriptions.Client { 165 | c := subscriptions.NewClientWithBaseURI(baseURI) // used only for unauthenticated requests for generic subs IDs 166 | c.Client.UserAgent += fmt.Sprintf(";rancher-aks-operator") 167 | c.RequestInspector = utils.RequestWithInspection() 168 | c.ResponseInspector = utils.ResponseWithInspection() 169 | c.PollingDelay = defaultClientPollingDelay 170 | return c 171 | } 172 | 173 | // This function is used to find the tenant ID for the subscription ID. It will send an unauthenticated request to 174 | // the Azure Resource Manager endpoint to get the tenant ID from the WWW-Authenticate header. 175 | // Example header: 176 | // 177 | // Bearer authorization_uri="https://login.windows.net/996fe9d1-6171-40aa-945b-4c64b63bf655", 178 | // error="invalid_token", error_description="The authentication failed because of missing 'Authorization' header." 179 | func FindTenantID(ctx context.Context, env azure.Environment, subscriptionID string) (string, error) { 180 | goCtx, cancel := context.WithTimeout(ctx, findTenantIDTimeout) 181 | defer cancel() 182 | const hdrKey = "WWW-Authenticate" 183 | c := NewSubscriptionsClient(env.ResourceManagerEndpoint) 184 | 185 | // we expect this request to fail (err != nil), but we are only interested 186 | // in headers, so surface the error if the Response is not present (i.e. 187 | // network error etc) 188 | subs, err := c.Get(goCtx, subscriptionID) 189 | if subs.Response.Response == nil { 190 | return "", fmt.Errorf("Request failed: %v", err) 191 | } 192 | 193 | // Expecting 401 StatusUnauthorized here, just read the header 194 | if subs.StatusCode != http.StatusUnauthorized { 195 | return "", fmt.Errorf("unexpected response from Get Subscription: %v", err) 196 | } 197 | hdr := subs.Header.Get(hdrKey) 198 | if hdr == "" { 199 | return "", fmt.Errorf("header %v not found in Get Subscription response", hdrKey) 200 | } 201 | 202 | // Example value for hdr: 203 | // Bearer authorization_uri="https://login.windows.net/996fe9d1-6171-40aa-945b-4c64b63bf655", error="invalid_token", error_description="The authentication failed because of missing 'Authorization' header." 204 | r := regexp.MustCompile(`authorization_uri=".*/([0-9a-f\-]+)"`) 205 | m := r.FindStringSubmatch(hdr) 206 | if m == nil { 207 | return "", fmt.Errorf("could not find the tenant ID in header: %s %q", hdrKey, hdr) 208 | } 209 | return m[1], nil 210 | } 211 | -------------------------------------------------------------------------------- /pkg/aks/convert.go: -------------------------------------------------------------------------------- 1 | package aks 2 | 3 | import ( 4 | "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" 5 | ) 6 | 7 | // String returns a string value for the passed string pointer. It returns the empty string if the 8 | // pointer is nil. 9 | func String(s *string) string { 10 | if s != nil { 11 | return *s 12 | } 13 | return "" 14 | } 15 | 16 | // Bool returns a bool value for the passed bool pointer. It returns false if the pointer is nil. 17 | func Bool(b *bool) bool { 18 | if b != nil { 19 | return *b 20 | } 21 | return false 22 | } 23 | 24 | // StringSlice returns a string slice value for the passed string slice pointer. It returns a nil 25 | // slice if the pointer is nil. 26 | func StringSlice(s *[]string) []string { 27 | if s != nil { 28 | return *s 29 | } 30 | return nil 31 | } 32 | 33 | // StringMapPtr returns a map of string pointers built from the passed map of strings. 34 | func StringMapPtr(ms map[string]string) map[string]*string { 35 | msp := make(map[string]*string, len(ms)) 36 | for k, s := range ms { 37 | msp[k] = to.Ptr(s) 38 | } 39 | return msp 40 | } 41 | 42 | // StringMap returns a map of strings built from the map of string pointers. The empty string is 43 | // used for nil pointers. 44 | func StringMap(msp map[string]*string) map[string]string { 45 | ms := make(map[string]string, len(msp)) 46 | for k, sp := range msp { 47 | if sp != nil { 48 | ms[k] = *sp 49 | } else { 50 | ms[k] = "" 51 | } 52 | } 53 | return ms 54 | } 55 | 56 | // Int32 returns an int value for the passed int pointer. It returns 0 if the pointer is nil. 57 | func Int32(i *int32) int32 { 58 | if i != nil { 59 | return *i 60 | } 61 | return 0 62 | } 63 | -------------------------------------------------------------------------------- /pkg/aks/delete.go: -------------------------------------------------------------------------------- 1 | package aks 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/rancher/aks-operator/pkg/aks/services" 7 | aksv1 "github.com/rancher/aks-operator/pkg/apis/aks.cattle.io/v1" 8 | "github.com/sirupsen/logrus" 9 | ) 10 | 11 | // RemoveCluster Delete AKS managed Kubernetes cluster 12 | func RemoveCluster(ctx context.Context, clusterClient services.ManagedClustersClientInterface, spec *aksv1.AKSClusterConfigSpec) error { 13 | poller, err := clusterClient.BeginDelete(ctx, spec.ResourceGroup, spec.ClusterName, nil) 14 | if err != nil { 15 | return err 16 | } 17 | 18 | resp, err := poller.PollUntilDone(ctx, nil) 19 | if err != nil { 20 | logrus.Errorf("can't get the AKS cluster deletion response: %v", err) 21 | return err 22 | } 23 | 24 | logrus.Infof("Cluster %v removed successfully", spec.ClusterName) 25 | logrus.Debugf("Cluster removal status %v", resp) 26 | 27 | return nil 28 | } 29 | 30 | // RemoveAgentPool Delete AKS Agent Pool 31 | func RemoveAgentPool(ctx context.Context, agentPoolClient services.AgentPoolsClientInterface, spec *aksv1.AKSClusterConfigSpec, np *aksv1.AKSNodePool) error { 32 | _, err := agentPoolClient.BeginDelete(ctx, spec.ResourceGroup, spec.ClusterName, *np.Name) 33 | 34 | return err 35 | } 36 | -------------------------------------------------------------------------------- /pkg/aks/delete_test.go: -------------------------------------------------------------------------------- 1 | package aks 2 | 3 | import ( 4 | "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v5" 5 | . "github.com/onsi/ginkgo/v2" 6 | . "github.com/onsi/gomega" 7 | "github.com/rancher/aks-operator/pkg/aks/services/mock_services" 8 | aksv1 "github.com/rancher/aks-operator/pkg/apis/aks.cattle.io/v1" 9 | "go.uber.org/mock/gomock" 10 | ) 11 | 12 | var _ = Describe("RemoveCluster", func() { 13 | var ( 14 | mockController *gomock.Controller 15 | clusterClientMock *mock_services.MockManagedClustersClientInterface 16 | pollerMock *mock_services.MockPoller[armcontainerservice.ManagedClustersClientDeleteResponse] 17 | clusterSpec *aksv1.AKSClusterConfigSpec 18 | ) 19 | 20 | BeforeEach(func() { 21 | mockController = gomock.NewController(GinkgoT()) 22 | clusterClientMock = mock_services.NewMockManagedClustersClientInterface(mockController) 23 | pollerMock = mock_services.NewMockPoller[armcontainerservice.ManagedClustersClientDeleteResponse](mockController) 24 | clusterSpec = &aksv1.AKSClusterConfigSpec{ 25 | ResourceGroup: "resourcegroup", 26 | ClusterName: "clustername", 27 | } 28 | }) 29 | 30 | AfterEach(func() { 31 | mockController.Finish() 32 | }) 33 | 34 | It("should successfully delete cluster", func() { 35 | clusterClientMock.EXPECT().BeginDelete(ctx, clusterSpec.ResourceGroup, clusterSpec.ClusterName, nil).Return(pollerMock, nil) 36 | pollerMock.EXPECT().PollUntilDone(ctx, nil).Return(armcontainerservice.ManagedClustersClientDeleteResponse{}, nil) 37 | Expect(RemoveCluster(ctx, clusterClientMock, clusterSpec)).To(Succeed()) 38 | }) 39 | }) 40 | -------------------------------------------------------------------------------- /pkg/aks/exists.go: -------------------------------------------------------------------------------- 1 | package aks 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/rancher/aks-operator/pkg/aks/services" 7 | aksv1 "github.com/rancher/aks-operator/pkg/apis/aks.cattle.io/v1" 8 | ) 9 | 10 | func ExistsResourceGroup(ctx context.Context, groupsClient services.ResourceGroupsClientInterface, resourceGroup string) (bool, error) { 11 | resp, err := groupsClient.CheckExistence(ctx, resourceGroup, nil) 12 | if err != nil { 13 | return false, err 14 | } 15 | return resp.Success, err 16 | } 17 | 18 | // ExistsCluster Check if AKS managed Kubernetes cluster exist 19 | func ExistsCluster(ctx context.Context, clusterClient services.ManagedClustersClientInterface, spec *aksv1.AKSClusterConfigSpec) (bool, error) { 20 | resp, err := clusterClient.Get(ctx, spec.ResourceGroup, spec.ClusterName, nil) 21 | if err != nil { 22 | return false, err 23 | } 24 | 25 | return String(resp.Name) == spec.ClusterName, err 26 | } 27 | -------------------------------------------------------------------------------- /pkg/aks/get.go: -------------------------------------------------------------------------------- 1 | package aks 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v5" 7 | "github.com/rancher/aks-operator/pkg/aks/services" 8 | ) 9 | 10 | func GetClusterAccessProfile(ctx context.Context, clusterClient services.ManagedClustersClientInterface, resourceGroupName string, resourceName string, roleName string) (armcontainerservice.ManagedClustersClientGetAccessProfileResponse, error) { 11 | return clusterClient.GetAccessProfile(ctx, resourceGroupName, resourceName, roleName, nil) 12 | } 13 | -------------------------------------------------------------------------------- /pkg/aks/services/agentpools.go: -------------------------------------------------------------------------------- 1 | package services 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/Azure/azure-sdk-for-go/sdk/azcore" 7 | "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" 8 | "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" 9 | "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" 10 | "github.com/Azure/azure-sdk-for-go/sdk/azidentity" 11 | "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v5" 12 | ) 13 | 14 | type AgentPoolsClientInterface interface { 15 | BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, clusterName string, agentPoolName string, parameters armcontainerservice.AgentPool) (*runtime.Poller[armcontainerservice.AgentPoolsClientCreateOrUpdateResponse], error) 16 | BeginDelete(ctx context.Context, resourceGroupName string, clusterName string, agentPoolName string) (*runtime.Poller[armcontainerservice.AgentPoolsClientDeleteResponse], error) 17 | } 18 | 19 | type agentPoolClient struct { 20 | agentPoolClient *armcontainerservice.AgentPoolsClient 21 | } 22 | 23 | func NewAgentPoolClient(subscriptionID string, credential *azidentity.ClientSecretCredential, cloud cloud.Configuration) (*agentPoolClient, error) { 24 | options := arm.ClientOptions{ 25 | ClientOptions: azcore.ClientOptions{ 26 | Cloud: cloud, 27 | }, 28 | } 29 | clientFactory, err := armcontainerservice.NewClientFactory(subscriptionID, credential, &options) 30 | if err != nil { 31 | return nil, err 32 | } 33 | 34 | return &agentPoolClient{ 35 | agentPoolClient: clientFactory.NewAgentPoolsClient(), 36 | }, nil 37 | } 38 | 39 | func (cl *agentPoolClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, clusterName string, agentPoolName string, parameters armcontainerservice.AgentPool) (*runtime.Poller[armcontainerservice.AgentPoolsClientCreateOrUpdateResponse], error) { 40 | return cl.agentPoolClient.BeginCreateOrUpdate(ctx, resourceGroupName, clusterName, agentPoolName, parameters, nil) 41 | } 42 | 43 | func (cl *agentPoolClient) BeginDelete(ctx context.Context, resourceGroupName string, clusterName string, agentPoolName string) (*runtime.Poller[armcontainerservice.AgentPoolsClientDeleteResponse], error) { 44 | return cl.agentPoolClient.BeginDelete(ctx, resourceGroupName, clusterName, agentPoolName, nil) 45 | } 46 | -------------------------------------------------------------------------------- /pkg/aks/services/groups.go: -------------------------------------------------------------------------------- 1 | package services 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/Azure/azure-sdk-for-go/sdk/azcore" 7 | "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" 8 | "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" 9 | "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" 10 | "github.com/Azure/azure-sdk-for-go/sdk/azidentity" 11 | "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources" 12 | ) 13 | 14 | type ResourceGroupsClientInterface interface { 15 | CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceGroup armresources.ResourceGroup, options *armresources.ResourceGroupsClientCreateOrUpdateOptions) (armresources.ResourceGroupsClientCreateOrUpdateResponse, error) 16 | BeginDelete(ctx context.Context, resourceGroupName string, options *armresources.ResourceGroupsClientBeginDeleteOptions) (*runtime.Poller[armresources.ResourceGroupsClientDeleteResponse], error) 17 | CheckExistence(ctx context.Context, resourceGroupName string, options *armresources.ResourceGroupsClientCheckExistenceOptions) (armresources.ResourceGroupsClientCheckExistenceResponse, error) 18 | } 19 | 20 | type resourceGroupsClient struct { 21 | armresourcesGroupsClient *armresources.ResourceGroupsClient 22 | } 23 | 24 | func NewResourceGroupsClient(subscriptionID string, credential *azidentity.ClientSecretCredential, cloud cloud.Configuration) (*resourceGroupsClient, error) { 25 | options := arm.ClientOptions{ 26 | ClientOptions: azcore.ClientOptions{ 27 | Cloud: cloud, 28 | }, 29 | } 30 | clientFactory, err := armresources.NewClientFactory(subscriptionID, credential, &options) 31 | if err != nil { 32 | return nil, err 33 | } 34 | 35 | return &resourceGroupsClient{ 36 | armresourcesGroupsClient: clientFactory.NewResourceGroupsClient(), 37 | }, nil 38 | } 39 | 40 | func (cl *resourceGroupsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceGroup armresources.ResourceGroup, options *armresources.ResourceGroupsClientCreateOrUpdateOptions) (armresources.ResourceGroupsClientCreateOrUpdateResponse, error) { 41 | return cl.armresourcesGroupsClient.CreateOrUpdate(ctx, resourceGroupName, resourceGroup, options) 42 | } 43 | 44 | func (cl *resourceGroupsClient) BeginDelete(ctx context.Context, resourceGroupName string, options *armresources.ResourceGroupsClientBeginDeleteOptions) (*runtime.Poller[armresources.ResourceGroupsClientDeleteResponse], error) { 45 | return cl.armresourcesGroupsClient.BeginDelete(ctx, resourceGroupName, options) 46 | } 47 | 48 | func (cl *resourceGroupsClient) CheckExistence(ctx context.Context, resourceGroupName string, options *armresources.ResourceGroupsClientCheckExistenceOptions) (armresources.ResourceGroupsClientCheckExistenceResponse, error) { 49 | return cl.armresourcesGroupsClient.CheckExistence(ctx, resourceGroupName, options) 50 | } 51 | -------------------------------------------------------------------------------- /pkg/aks/services/managedclusters.go: -------------------------------------------------------------------------------- 1 | package services 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/Azure/azure-sdk-for-go/sdk/azcore" 7 | "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" 8 | "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" 9 | "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" 10 | "github.com/Azure/azure-sdk-for-go/sdk/azidentity" 11 | "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v5" 12 | ) 13 | 14 | type Poller[T any] interface { 15 | PollUntilDone(ctx context.Context, options *runtime.PollUntilDoneOptions) (T, error) 16 | } 17 | 18 | type ManagedClustersClientInterface interface { 19 | BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, parameters armcontainerservice.ManagedCluster, options *armcontainerservice.ManagedClustersClientBeginCreateOrUpdateOptions) (Poller[armcontainerservice.ManagedClustersClientCreateOrUpdateResponse], error) 20 | Get(ctx context.Context, resourceGroupName string, resourceName string, options *armcontainerservice.ManagedClustersClientGetOptions) (armcontainerservice.ManagedClustersClientGetResponse, error) 21 | BeginDelete(ctx context.Context, resourceGroupName string, resourceName string, options *armcontainerservice.ManagedClustersClientBeginDeleteOptions) (Poller[armcontainerservice.ManagedClustersClientDeleteResponse], error) 22 | GetAccessProfile(ctx context.Context, resourceGroupName string, resourceName string, roleName string, options *armcontainerservice.ManagedClustersClientGetAccessProfileOptions) (armcontainerservice.ManagedClustersClientGetAccessProfileResponse, error) 23 | BeginUpdateTags(ctx context.Context, resourceGroupName string, resourceName string, parameters armcontainerservice.TagsObject, options *armcontainerservice.ManagedClustersClientBeginUpdateTagsOptions) (Poller[armcontainerservice.ManagedClustersClientUpdateTagsResponse], error) 24 | } 25 | 26 | type managedClustersClient struct { 27 | armManagedClustersClient *armcontainerservice.ManagedClustersClient 28 | } 29 | 30 | func NewManagedClustersClient(subscriptionID string, credential *azidentity.ClientSecretCredential, cloud cloud.Configuration) (*managedClustersClient, error) { 31 | options := arm.ClientOptions{ 32 | ClientOptions: azcore.ClientOptions{ 33 | Cloud: cloud, 34 | }, 35 | } 36 | clientFactory, err := armcontainerservice.NewClientFactory(subscriptionID, credential, &options) 37 | if err != nil { 38 | return nil, err 39 | } 40 | 41 | return &managedClustersClient{ 42 | armManagedClustersClient: clientFactory.NewManagedClustersClient(), 43 | }, nil 44 | } 45 | 46 | func (cl *managedClustersClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, resourceName string, parameters armcontainerservice.ManagedCluster, options *armcontainerservice.ManagedClustersClientBeginCreateOrUpdateOptions) (Poller[armcontainerservice.ManagedClustersClientCreateOrUpdateResponse], error) { 47 | return cl.armManagedClustersClient.BeginCreateOrUpdate(ctx, resourceGroupName, resourceName, parameters, options) 48 | } 49 | 50 | func (cl *managedClustersClient) Get(ctx context.Context, resourceGroupName string, resourceName string, options *armcontainerservice.ManagedClustersClientGetOptions) (armcontainerservice.ManagedClustersClientGetResponse, error) { 51 | return cl.armManagedClustersClient.Get(ctx, resourceGroupName, resourceName, options) 52 | } 53 | 54 | func (cl *managedClustersClient) BeginDelete(ctx context.Context, resourceGroupName string, resourceName string, options *armcontainerservice.ManagedClustersClientBeginDeleteOptions) (Poller[armcontainerservice.ManagedClustersClientDeleteResponse], error) { 55 | return cl.armManagedClustersClient.BeginDelete(ctx, resourceGroupName, resourceName, options) 56 | } 57 | 58 | func (cl *managedClustersClient) GetAccessProfile(ctx context.Context, resourceGroupName string, resourceName string, roleName string, options *armcontainerservice.ManagedClustersClientGetAccessProfileOptions) (armcontainerservice.ManagedClustersClientGetAccessProfileResponse, error) { 59 | return cl.armManagedClustersClient.GetAccessProfile(ctx, resourceGroupName, resourceName, roleName, options) 60 | } 61 | 62 | func (cl *managedClustersClient) BeginUpdateTags(ctx context.Context, resourceGroupName string, resourceName string, parameters armcontainerservice.TagsObject, options *armcontainerservice.ManagedClustersClientBeginUpdateTagsOptions) (Poller[armcontainerservice.ManagedClustersClientUpdateTagsResponse], error) { 63 | return cl.armManagedClustersClient.BeginUpdateTags(ctx, resourceGroupName, resourceName, parameters, options) 64 | } 65 | -------------------------------------------------------------------------------- /pkg/aks/services/mock_services/agentpools_mock.go: -------------------------------------------------------------------------------- 1 | // Code generated by MockGen. DO NOT EDIT. 2 | // Source: ../agentpools.go 3 | // 4 | // Generated by this command: 5 | // 6 | // mockgen -destination agentpools_mock.go -package mock_services -source ../agentpools.go AgentPoolsClientInterface 7 | // 8 | 9 | // Package mock_services is a generated GoMock package. 10 | package mock_services 11 | 12 | import ( 13 | context "context" 14 | reflect "reflect" 15 | 16 | runtime "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" 17 | armcontainerservice "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v5" 18 | gomock "go.uber.org/mock/gomock" 19 | ) 20 | 21 | // MockAgentPoolsClientInterface is a mock of AgentPoolsClientInterface interface. 22 | type MockAgentPoolsClientInterface struct { 23 | ctrl *gomock.Controller 24 | recorder *MockAgentPoolsClientInterfaceMockRecorder 25 | } 26 | 27 | // MockAgentPoolsClientInterfaceMockRecorder is the mock recorder for MockAgentPoolsClientInterface. 28 | type MockAgentPoolsClientInterfaceMockRecorder struct { 29 | mock *MockAgentPoolsClientInterface 30 | } 31 | 32 | // NewMockAgentPoolsClientInterface creates a new mock instance. 33 | func NewMockAgentPoolsClientInterface(ctrl *gomock.Controller) *MockAgentPoolsClientInterface { 34 | mock := &MockAgentPoolsClientInterface{ctrl: ctrl} 35 | mock.recorder = &MockAgentPoolsClientInterfaceMockRecorder{mock} 36 | return mock 37 | } 38 | 39 | // EXPECT returns an object that allows the caller to indicate expected use. 40 | func (m *MockAgentPoolsClientInterface) EXPECT() *MockAgentPoolsClientInterfaceMockRecorder { 41 | return m.recorder 42 | } 43 | 44 | // BeginCreateOrUpdate mocks base method. 45 | func (m *MockAgentPoolsClientInterface) BeginCreateOrUpdate(ctx context.Context, resourceGroupName, clusterName, agentPoolName string, parameters armcontainerservice.AgentPool) (*runtime.Poller[armcontainerservice.AgentPoolsClientCreateOrUpdateResponse], error) { 46 | m.ctrl.T.Helper() 47 | ret := m.ctrl.Call(m, "BeginCreateOrUpdate", ctx, resourceGroupName, clusterName, agentPoolName, parameters) 48 | ret0, _ := ret[0].(*runtime.Poller[armcontainerservice.AgentPoolsClientCreateOrUpdateResponse]) 49 | ret1, _ := ret[1].(error) 50 | return ret0, ret1 51 | } 52 | 53 | // BeginCreateOrUpdate indicates an expected call of BeginCreateOrUpdate. 54 | func (mr *MockAgentPoolsClientInterfaceMockRecorder) BeginCreateOrUpdate(ctx, resourceGroupName, clusterName, agentPoolName, parameters any) *gomock.Call { 55 | mr.mock.ctrl.T.Helper() 56 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BeginCreateOrUpdate", reflect.TypeOf((*MockAgentPoolsClientInterface)(nil).BeginCreateOrUpdate), ctx, resourceGroupName, clusterName, agentPoolName, parameters) 57 | } 58 | 59 | // BeginDelete mocks base method. 60 | func (m *MockAgentPoolsClientInterface) BeginDelete(ctx context.Context, resourceGroupName, clusterName, agentPoolName string) (*runtime.Poller[armcontainerservice.AgentPoolsClientDeleteResponse], error) { 61 | m.ctrl.T.Helper() 62 | ret := m.ctrl.Call(m, "BeginDelete", ctx, resourceGroupName, clusterName, agentPoolName) 63 | ret0, _ := ret[0].(*runtime.Poller[armcontainerservice.AgentPoolsClientDeleteResponse]) 64 | ret1, _ := ret[1].(error) 65 | return ret0, ret1 66 | } 67 | 68 | // BeginDelete indicates an expected call of BeginDelete. 69 | func (mr *MockAgentPoolsClientInterfaceMockRecorder) BeginDelete(ctx, resourceGroupName, clusterName, agentPoolName any) *gomock.Call { 70 | mr.mock.ctrl.T.Helper() 71 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BeginDelete", reflect.TypeOf((*MockAgentPoolsClientInterface)(nil).BeginDelete), ctx, resourceGroupName, clusterName, agentPoolName) 72 | } 73 | -------------------------------------------------------------------------------- /pkg/aks/services/mock_services/doc.go: -------------------------------------------------------------------------------- 1 | package mock_services 2 | 3 | // Run go generate to regenerate this mock. 4 | // 5 | //go:generate ../../../../bin/mockgen -destination agentpools_mock.go -package mock_services -source ../agentpools.go AgentPoolsClientInterface 6 | //go:generate ../../../../bin/mockgen -destination groups_mock.go -package mock_services -source ../groups.go ResourceGroupsClientInterface 7 | //go:generate ../../../../bin/mockgen -destination managedclusters_mock.go -package mock_services -source ../managedclusters.go ManagedClustersClientInterface 8 | //go:generate ../../../../bin/mockgen -destination workplaces_mock.go -package mock_services -source ../workplaces.go WorkplacesClientInterface 9 | -------------------------------------------------------------------------------- /pkg/aks/services/mock_services/groups_mock.go: -------------------------------------------------------------------------------- 1 | // Code generated by MockGen. DO NOT EDIT. 2 | // Source: ../groups.go 3 | // 4 | // Generated by this command: 5 | // 6 | // mockgen -destination groups_mock.go -package mock_services -source ../groups.go ResourceGroupsClientInterface 7 | // 8 | 9 | // Package mock_services is a generated GoMock package. 10 | package mock_services 11 | 12 | import ( 13 | context "context" 14 | reflect "reflect" 15 | 16 | runtime "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" 17 | armresources "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources" 18 | gomock "go.uber.org/mock/gomock" 19 | ) 20 | 21 | // MockResourceGroupsClientInterface is a mock of ResourceGroupsClientInterface interface. 22 | type MockResourceGroupsClientInterface struct { 23 | ctrl *gomock.Controller 24 | recorder *MockResourceGroupsClientInterfaceMockRecorder 25 | } 26 | 27 | // MockResourceGroupsClientInterfaceMockRecorder is the mock recorder for MockResourceGroupsClientInterface. 28 | type MockResourceGroupsClientInterfaceMockRecorder struct { 29 | mock *MockResourceGroupsClientInterface 30 | } 31 | 32 | // NewMockResourceGroupsClientInterface creates a new mock instance. 33 | func NewMockResourceGroupsClientInterface(ctrl *gomock.Controller) *MockResourceGroupsClientInterface { 34 | mock := &MockResourceGroupsClientInterface{ctrl: ctrl} 35 | mock.recorder = &MockResourceGroupsClientInterfaceMockRecorder{mock} 36 | return mock 37 | } 38 | 39 | // EXPECT returns an object that allows the caller to indicate expected use. 40 | func (m *MockResourceGroupsClientInterface) EXPECT() *MockResourceGroupsClientInterfaceMockRecorder { 41 | return m.recorder 42 | } 43 | 44 | // BeginDelete mocks base method. 45 | func (m *MockResourceGroupsClientInterface) BeginDelete(ctx context.Context, resourceGroupName string, options *armresources.ResourceGroupsClientBeginDeleteOptions) (*runtime.Poller[armresources.ResourceGroupsClientDeleteResponse], error) { 46 | m.ctrl.T.Helper() 47 | ret := m.ctrl.Call(m, "BeginDelete", ctx, resourceGroupName, options) 48 | ret0, _ := ret[0].(*runtime.Poller[armresources.ResourceGroupsClientDeleteResponse]) 49 | ret1, _ := ret[1].(error) 50 | return ret0, ret1 51 | } 52 | 53 | // BeginDelete indicates an expected call of BeginDelete. 54 | func (mr *MockResourceGroupsClientInterfaceMockRecorder) BeginDelete(ctx, resourceGroupName, options any) *gomock.Call { 55 | mr.mock.ctrl.T.Helper() 56 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BeginDelete", reflect.TypeOf((*MockResourceGroupsClientInterface)(nil).BeginDelete), ctx, resourceGroupName, options) 57 | } 58 | 59 | // CheckExistence mocks base method. 60 | func (m *MockResourceGroupsClientInterface) CheckExistence(ctx context.Context, resourceGroupName string, options *armresources.ResourceGroupsClientCheckExistenceOptions) (armresources.ResourceGroupsClientCheckExistenceResponse, error) { 61 | m.ctrl.T.Helper() 62 | ret := m.ctrl.Call(m, "CheckExistence", ctx, resourceGroupName, options) 63 | ret0, _ := ret[0].(armresources.ResourceGroupsClientCheckExistenceResponse) 64 | ret1, _ := ret[1].(error) 65 | return ret0, ret1 66 | } 67 | 68 | // CheckExistence indicates an expected call of CheckExistence. 69 | func (mr *MockResourceGroupsClientInterfaceMockRecorder) CheckExistence(ctx, resourceGroupName, options any) *gomock.Call { 70 | mr.mock.ctrl.T.Helper() 71 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CheckExistence", reflect.TypeOf((*MockResourceGroupsClientInterface)(nil).CheckExistence), ctx, resourceGroupName, options) 72 | } 73 | 74 | // CreateOrUpdate mocks base method. 75 | func (m *MockResourceGroupsClientInterface) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceGroup armresources.ResourceGroup, options *armresources.ResourceGroupsClientCreateOrUpdateOptions) (armresources.ResourceGroupsClientCreateOrUpdateResponse, error) { 76 | m.ctrl.T.Helper() 77 | ret := m.ctrl.Call(m, "CreateOrUpdate", ctx, resourceGroupName, resourceGroup, options) 78 | ret0, _ := ret[0].(armresources.ResourceGroupsClientCreateOrUpdateResponse) 79 | ret1, _ := ret[1].(error) 80 | return ret0, ret1 81 | } 82 | 83 | // CreateOrUpdate indicates an expected call of CreateOrUpdate. 84 | func (mr *MockResourceGroupsClientInterfaceMockRecorder) CreateOrUpdate(ctx, resourceGroupName, resourceGroup, options any) *gomock.Call { 85 | mr.mock.ctrl.T.Helper() 86 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateOrUpdate", reflect.TypeOf((*MockResourceGroupsClientInterface)(nil).CreateOrUpdate), ctx, resourceGroupName, resourceGroup, options) 87 | } 88 | -------------------------------------------------------------------------------- /pkg/aks/services/mock_services/managedclusters_mock.go: -------------------------------------------------------------------------------- 1 | // Code generated by MockGen. DO NOT EDIT. 2 | // Source: ../managedclusters.go 3 | // 4 | // Generated by this command: 5 | // 6 | // mockgen -destination managedclusters_mock.go -package mock_services -source ../managedclusters.go ManagedClustersClientInterface 7 | // 8 | 9 | // Package mock_services is a generated GoMock package. 10 | package mock_services 11 | 12 | import ( 13 | context "context" 14 | reflect "reflect" 15 | 16 | runtime "github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime" 17 | armcontainerservice "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/containerservice/armcontainerservice/v5" 18 | services "github.com/rancher/aks-operator/pkg/aks/services" 19 | gomock "go.uber.org/mock/gomock" 20 | ) 21 | 22 | // MockPoller is a mock of Poller interface. 23 | type MockPoller[T any] struct { 24 | ctrl *gomock.Controller 25 | recorder *MockPollerMockRecorder[T] 26 | } 27 | 28 | // MockPollerMockRecorder is the mock recorder for MockPoller. 29 | type MockPollerMockRecorder[T any] struct { 30 | mock *MockPoller[T] 31 | } 32 | 33 | // NewMockPoller creates a new mock instance. 34 | func NewMockPoller[T any](ctrl *gomock.Controller) *MockPoller[T] { 35 | mock := &MockPoller[T]{ctrl: ctrl} 36 | mock.recorder = &MockPollerMockRecorder[T]{mock} 37 | return mock 38 | } 39 | 40 | // EXPECT returns an object that allows the caller to indicate expected use. 41 | func (m *MockPoller[T]) EXPECT() *MockPollerMockRecorder[T] { 42 | return m.recorder 43 | } 44 | 45 | // PollUntilDone mocks base method. 46 | func (m *MockPoller[T]) PollUntilDone(ctx context.Context, options *runtime.PollUntilDoneOptions) (T, error) { 47 | m.ctrl.T.Helper() 48 | ret := m.ctrl.Call(m, "PollUntilDone", ctx, options) 49 | ret0, _ := ret[0].(T) 50 | ret1, _ := ret[1].(error) 51 | return ret0, ret1 52 | } 53 | 54 | // PollUntilDone indicates an expected call of PollUntilDone. 55 | func (mr *MockPollerMockRecorder[T]) PollUntilDone(ctx, options any) *gomock.Call { 56 | mr.mock.ctrl.T.Helper() 57 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PollUntilDone", reflect.TypeOf((*MockPoller[T])(nil).PollUntilDone), ctx, options) 58 | } 59 | 60 | // MockManagedClustersClientInterface is a mock of ManagedClustersClientInterface interface. 61 | type MockManagedClustersClientInterface struct { 62 | ctrl *gomock.Controller 63 | recorder *MockManagedClustersClientInterfaceMockRecorder 64 | } 65 | 66 | // MockManagedClustersClientInterfaceMockRecorder is the mock recorder for MockManagedClustersClientInterface. 67 | type MockManagedClustersClientInterfaceMockRecorder struct { 68 | mock *MockManagedClustersClientInterface 69 | } 70 | 71 | // NewMockManagedClustersClientInterface creates a new mock instance. 72 | func NewMockManagedClustersClientInterface(ctrl *gomock.Controller) *MockManagedClustersClientInterface { 73 | mock := &MockManagedClustersClientInterface{ctrl: ctrl} 74 | mock.recorder = &MockManagedClustersClientInterfaceMockRecorder{mock} 75 | return mock 76 | } 77 | 78 | // EXPECT returns an object that allows the caller to indicate expected use. 79 | func (m *MockManagedClustersClientInterface) EXPECT() *MockManagedClustersClientInterfaceMockRecorder { 80 | return m.recorder 81 | } 82 | 83 | // BeginCreateOrUpdate mocks base method. 84 | func (m *MockManagedClustersClientInterface) BeginCreateOrUpdate(ctx context.Context, resourceGroupName, resourceName string, parameters armcontainerservice.ManagedCluster, options *armcontainerservice.ManagedClustersClientBeginCreateOrUpdateOptions) (services.Poller[armcontainerservice.ManagedClustersClientCreateOrUpdateResponse], error) { 85 | m.ctrl.T.Helper() 86 | ret := m.ctrl.Call(m, "BeginCreateOrUpdate", ctx, resourceGroupName, resourceName, parameters, options) 87 | ret0, _ := ret[0].(services.Poller[armcontainerservice.ManagedClustersClientCreateOrUpdateResponse]) 88 | ret1, _ := ret[1].(error) 89 | return ret0, ret1 90 | } 91 | 92 | // BeginCreateOrUpdate indicates an expected call of BeginCreateOrUpdate. 93 | func (mr *MockManagedClustersClientInterfaceMockRecorder) BeginCreateOrUpdate(ctx, resourceGroupName, resourceName, parameters, options any) *gomock.Call { 94 | mr.mock.ctrl.T.Helper() 95 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BeginCreateOrUpdate", reflect.TypeOf((*MockManagedClustersClientInterface)(nil).BeginCreateOrUpdate), ctx, resourceGroupName, resourceName, parameters, options) 96 | } 97 | 98 | // BeginDelete mocks base method. 99 | func (m *MockManagedClustersClientInterface) BeginDelete(ctx context.Context, resourceGroupName, resourceName string, options *armcontainerservice.ManagedClustersClientBeginDeleteOptions) (services.Poller[armcontainerservice.ManagedClustersClientDeleteResponse], error) { 100 | m.ctrl.T.Helper() 101 | ret := m.ctrl.Call(m, "BeginDelete", ctx, resourceGroupName, resourceName, options) 102 | ret0, _ := ret[0].(services.Poller[armcontainerservice.ManagedClustersClientDeleteResponse]) 103 | ret1, _ := ret[1].(error) 104 | return ret0, ret1 105 | } 106 | 107 | // BeginDelete indicates an expected call of BeginDelete. 108 | func (mr *MockManagedClustersClientInterfaceMockRecorder) BeginDelete(ctx, resourceGroupName, resourceName, options any) *gomock.Call { 109 | mr.mock.ctrl.T.Helper() 110 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BeginDelete", reflect.TypeOf((*MockManagedClustersClientInterface)(nil).BeginDelete), ctx, resourceGroupName, resourceName, options) 111 | } 112 | 113 | // BeginUpdateTags mocks base method. 114 | func (m *MockManagedClustersClientInterface) BeginUpdateTags(ctx context.Context, resourceGroupName, resourceName string, parameters armcontainerservice.TagsObject, options *armcontainerservice.ManagedClustersClientBeginUpdateTagsOptions) (services.Poller[armcontainerservice.ManagedClustersClientUpdateTagsResponse], error) { 115 | m.ctrl.T.Helper() 116 | ret := m.ctrl.Call(m, "BeginUpdateTags", ctx, resourceGroupName, resourceName, parameters, options) 117 | ret0, _ := ret[0].(services.Poller[armcontainerservice.ManagedClustersClientUpdateTagsResponse]) 118 | ret1, _ := ret[1].(error) 119 | return ret0, ret1 120 | } 121 | 122 | // BeginUpdateTags indicates an expected call of BeginUpdateTags. 123 | func (mr *MockManagedClustersClientInterfaceMockRecorder) BeginUpdateTags(ctx, resourceGroupName, resourceName, parameters, options any) *gomock.Call { 124 | mr.mock.ctrl.T.Helper() 125 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BeginUpdateTags", reflect.TypeOf((*MockManagedClustersClientInterface)(nil).BeginUpdateTags), ctx, resourceGroupName, resourceName, parameters, options) 126 | } 127 | 128 | // Get mocks base method. 129 | func (m *MockManagedClustersClientInterface) Get(ctx context.Context, resourceGroupName, resourceName string, options *armcontainerservice.ManagedClustersClientGetOptions) (armcontainerservice.ManagedClustersClientGetResponse, error) { 130 | m.ctrl.T.Helper() 131 | ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, resourceName, options) 132 | ret0, _ := ret[0].(armcontainerservice.ManagedClustersClientGetResponse) 133 | ret1, _ := ret[1].(error) 134 | return ret0, ret1 135 | } 136 | 137 | // Get indicates an expected call of Get. 138 | func (mr *MockManagedClustersClientInterfaceMockRecorder) Get(ctx, resourceGroupName, resourceName, options any) *gomock.Call { 139 | mr.mock.ctrl.T.Helper() 140 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockManagedClustersClientInterface)(nil).Get), ctx, resourceGroupName, resourceName, options) 141 | } 142 | 143 | // GetAccessProfile mocks base method. 144 | func (m *MockManagedClustersClientInterface) GetAccessProfile(ctx context.Context, resourceGroupName, resourceName, roleName string, options *armcontainerservice.ManagedClustersClientGetAccessProfileOptions) (armcontainerservice.ManagedClustersClientGetAccessProfileResponse, error) { 145 | m.ctrl.T.Helper() 146 | ret := m.ctrl.Call(m, "GetAccessProfile", ctx, resourceGroupName, resourceName, roleName, options) 147 | ret0, _ := ret[0].(armcontainerservice.ManagedClustersClientGetAccessProfileResponse) 148 | ret1, _ := ret[1].(error) 149 | return ret0, ret1 150 | } 151 | 152 | // GetAccessProfile indicates an expected call of GetAccessProfile. 153 | func (mr *MockManagedClustersClientInterfaceMockRecorder) GetAccessProfile(ctx, resourceGroupName, resourceName, roleName, options any) *gomock.Call { 154 | mr.mock.ctrl.T.Helper() 155 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAccessProfile", reflect.TypeOf((*MockManagedClustersClientInterface)(nil).GetAccessProfile), ctx, resourceGroupName, resourceName, roleName, options) 156 | } 157 | -------------------------------------------------------------------------------- /pkg/aks/services/mock_services/workplaces_mock.go: -------------------------------------------------------------------------------- 1 | // Code generated by MockGen. DO NOT EDIT. 2 | // Source: ../workplaces.go 3 | // 4 | // Generated by this command: 5 | // 6 | // mockgen -destination workplaces_mock.go -package mock_services -source ../workplaces.go WorkplacesClientInterface 7 | // 8 | 9 | // Package mock_services is a generated GoMock package. 10 | package mock_services 11 | 12 | import ( 13 | context "context" 14 | reflect "reflect" 15 | 16 | armoperationalinsights "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/operationalinsights/armoperationalinsights" 17 | services "github.com/rancher/aks-operator/pkg/aks/services" 18 | gomock "go.uber.org/mock/gomock" 19 | ) 20 | 21 | // MockWorkplacesClientInterface is a mock of WorkplacesClientInterface interface. 22 | type MockWorkplacesClientInterface struct { 23 | ctrl *gomock.Controller 24 | recorder *MockWorkplacesClientInterfaceMockRecorder 25 | } 26 | 27 | // MockWorkplacesClientInterfaceMockRecorder is the mock recorder for MockWorkplacesClientInterface. 28 | type MockWorkplacesClientInterfaceMockRecorder struct { 29 | mock *MockWorkplacesClientInterface 30 | } 31 | 32 | // NewMockWorkplacesClientInterface creates a new mock instance. 33 | func NewMockWorkplacesClientInterface(ctrl *gomock.Controller) *MockWorkplacesClientInterface { 34 | mock := &MockWorkplacesClientInterface{ctrl: ctrl} 35 | mock.recorder = &MockWorkplacesClientInterfaceMockRecorder{mock} 36 | return mock 37 | } 38 | 39 | // EXPECT returns an object that allows the caller to indicate expected use. 40 | func (m *MockWorkplacesClientInterface) EXPECT() *MockWorkplacesClientInterfaceMockRecorder { 41 | return m.recorder 42 | } 43 | 44 | // BeginCreateOrUpdate mocks base method. 45 | func (m *MockWorkplacesClientInterface) BeginCreateOrUpdate(ctx context.Context, resourceGroupName, workspaceName string, parameters armoperationalinsights.Workspace, options *armoperationalinsights.WorkspacesClientBeginCreateOrUpdateOptions) (services.Poller[armoperationalinsights.WorkspacesClientCreateOrUpdateResponse], error) { 46 | m.ctrl.T.Helper() 47 | ret := m.ctrl.Call(m, "BeginCreateOrUpdate", ctx, resourceGroupName, workspaceName, parameters, options) 48 | ret0, _ := ret[0].(services.Poller[armoperationalinsights.WorkspacesClientCreateOrUpdateResponse]) 49 | ret1, _ := ret[1].(error) 50 | return ret0, ret1 51 | } 52 | 53 | // BeginCreateOrUpdate indicates an expected call of BeginCreateOrUpdate. 54 | func (mr *MockWorkplacesClientInterfaceMockRecorder) BeginCreateOrUpdate(ctx, resourceGroupName, workspaceName, parameters, options any) *gomock.Call { 55 | mr.mock.ctrl.T.Helper() 56 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BeginCreateOrUpdate", reflect.TypeOf((*MockWorkplacesClientInterface)(nil).BeginCreateOrUpdate), ctx, resourceGroupName, workspaceName, parameters, options) 57 | } 58 | 59 | // Get mocks base method. 60 | func (m *MockWorkplacesClientInterface) Get(ctx context.Context, resourceGroupName, workspaceName string, options *armoperationalinsights.WorkspacesClientGetOptions) (armoperationalinsights.WorkspacesClientGetResponse, error) { 61 | m.ctrl.T.Helper() 62 | ret := m.ctrl.Call(m, "Get", ctx, resourceGroupName, workspaceName, options) 63 | ret0, _ := ret[0].(armoperationalinsights.WorkspacesClientGetResponse) 64 | ret1, _ := ret[1].(error) 65 | return ret0, ret1 66 | } 67 | 68 | // Get indicates an expected call of Get. 69 | func (mr *MockWorkplacesClientInterfaceMockRecorder) Get(ctx, resourceGroupName, workspaceName, options any) *gomock.Call { 70 | mr.mock.ctrl.T.Helper() 71 | return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockWorkplacesClientInterface)(nil).Get), ctx, resourceGroupName, workspaceName, options) 72 | } 73 | -------------------------------------------------------------------------------- /pkg/aks/services/workplaces.go: -------------------------------------------------------------------------------- 1 | package services 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/Azure/azure-sdk-for-go/sdk/azcore" 7 | "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm" 8 | "github.com/Azure/azure-sdk-for-go/sdk/azcore/cloud" 9 | "github.com/Azure/azure-sdk-for-go/sdk/azidentity" 10 | "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/operationalinsights/armoperationalinsights" 11 | ) 12 | 13 | type WorkplacesClientInterface interface { 14 | BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, parameters armoperationalinsights.Workspace, options *armoperationalinsights.WorkspacesClientBeginCreateOrUpdateOptions) (Poller[armoperationalinsights.WorkspacesClientCreateOrUpdateResponse], error) 15 | Get(ctx context.Context, resourceGroupName string, workspaceName string, options *armoperationalinsights.WorkspacesClientGetOptions) (armoperationalinsights.WorkspacesClientGetResponse, error) 16 | } 17 | 18 | type workplacesClient struct { 19 | armWorkspacesClient *armoperationalinsights.WorkspacesClient 20 | } 21 | 22 | func NewWorkplacesClient(subscriptionID string, credential *azidentity.ClientSecretCredential, cloud cloud.Configuration) (*workplacesClient, error) { 23 | options := arm.ClientOptions{ 24 | ClientOptions: azcore.ClientOptions{ 25 | Cloud: cloud, 26 | }, 27 | } 28 | clientFactory, err := armoperationalinsights.NewClientFactory(subscriptionID, credential, &options) 29 | if err != nil { 30 | return nil, err 31 | } 32 | 33 | return &workplacesClient{ 34 | armWorkspacesClient: clientFactory.NewWorkspacesClient(), 35 | }, nil 36 | } 37 | 38 | func (c *workplacesClient) BeginCreateOrUpdate(ctx context.Context, resourceGroupName string, workspaceName string, parameters armoperationalinsights.Workspace, options *armoperationalinsights.WorkspacesClientBeginCreateOrUpdateOptions) (Poller[armoperationalinsights.WorkspacesClientCreateOrUpdateResponse], error) { 39 | return c.armWorkspacesClient.BeginCreateOrUpdate(ctx, resourceGroupName, workspaceName, parameters, options) 40 | } 41 | 42 | func (c *workplacesClient) Get(ctx context.Context, resourceGroupName string, workspaceName string, options *armoperationalinsights.WorkspacesClientGetOptions) (armoperationalinsights.WorkspacesClientGetResponse, error) { 43 | return c.armWorkspacesClient.Get(ctx, resourceGroupName, workspaceName, options) 44 | } 45 | -------------------------------------------------------------------------------- /pkg/aks/suite_test.go: -------------------------------------------------------------------------------- 1 | package aks 2 | 3 | import ( 4 | "testing" 5 | 6 | . "github.com/onsi/ginkgo/v2" 7 | . "github.com/onsi/gomega" 8 | "golang.org/x/net/context" 9 | ) 10 | 11 | var ( 12 | ctx = context.Background() 13 | ) 14 | 15 | func TestAPIs(t *testing.T) { 16 | RegisterFailHandler(Fail) 17 | RunSpecs(t, "AKS Suite") 18 | } 19 | -------------------------------------------------------------------------------- /pkg/apis/aks.cattle.io/v1/doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2019 Wrangler Sample Controller Authors 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Code generated by main. DO NOT EDIT. 18 | 19 | // +k8s:deepcopy-gen=package 20 | // +groupName=aks.cattle.io 21 | package v1 22 | -------------------------------------------------------------------------------- /pkg/apis/aks.cattle.io/v1/types.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2017 The Kubernetes Authors. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package v1 18 | 19 | import ( 20 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 21 | ) 22 | 23 | // +genclient 24 | // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object 25 | // +kubebuilder:printcolumn:name="ClusterName",type="string",JSONPath=".spec.clusterName" 26 | // +kubebuilder:printcolumn:name="KubernetesVersion",type="string",JSONPath=".spec.kubernetesVersion" 27 | // +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase" 28 | // +kubebuilder:printcolumn:name="FailureMessage",type="string",JSONPath=".status.failureMessage" 29 | 30 | type AKSClusterConfig struct { 31 | metav1.TypeMeta `json:",inline"` 32 | metav1.ObjectMeta `json:"metadata,omitempty"` 33 | 34 | Spec AKSClusterConfigSpec `json:"spec"` 35 | Status AKSClusterConfigStatus `json:"status"` 36 | } 37 | 38 | // AKSClusterConfigSpec is the spec for a AKSClusterConfig resource 39 | type AKSClusterConfigSpec struct { 40 | // Importer indicates that the cluster was imported. 41 | // +optional 42 | // +kubebuilder:default=false 43 | Imported bool `json:"imported" norman:"noupdate"` 44 | // Location specifies the region to create the private endpoint. 45 | ResourceLocation string `json:"resourceLocation" norman:"noupdate"` 46 | // ResourceGroup is the name of the Azure resource group for this AKS Cluster. 47 | // Immutable. 48 | ResourceGroup string `json:"resourceGroup" norman:"noupdate"` 49 | // AKS ClusterName allows you to specify the name of the AKS cluster in Azure. 50 | ClusterName string `json:"clusterName" norman:"noupdate"` 51 | // AzureCredentialSecret is the name of the secret containing the Azure credentials. 52 | AzureCredentialSecret string `json:"azureCredentialSecret"` 53 | // BaseURL is the Azure Resource Manager endpoint. 54 | // +optional 55 | BaseURL *string `json:"baseUrl" norman:"pointer"` 56 | // AuthBaseURL is the Azure Active Directory endpoint. 57 | // +optional 58 | AuthBaseURL *string `json:"authBaseUrl" norman:"pointer"` 59 | // NetworkPlugin used for building Kubernetes network. 60 | // Allowed values are "azure", "kubenet". 61 | // Immutable. 62 | // +kubebuilder:validation:Enum=azure;kubenet 63 | // +optional 64 | NetworkPlugin *string `json:"networkPlugin" norman:"pointer"` 65 | // VirualNetworkResourceGroup is the name of the Azure resource group for the VNet and Subnet. 66 | // +optional 67 | VirtualNetworkResourceGroup *string `json:"virtualNetworkResourceGroup" norman:"pointer"` 68 | // VirtualNetwork describes the vnet for the AKS cluster. Will be created if it does not exist. 69 | // +optional 70 | VirtualNetwork *string `json:"virtualNetwork" norman:"pointer"` 71 | // Subnet describes a subnet for an AKS cluster. 72 | Subnet *string `json:"subnet" norman:"pointer"` 73 | // NeworkDNSServiceIP is an IP address assigned to the Kubernetes DNS service. 74 | // It must be within the Kubernetes service address range specified in serviceCidr. 75 | // Immutable. 76 | // +optional 77 | NetworkDNSServiceIP *string `json:"dnsServiceIp" norman:"pointer"` 78 | // NetworkService CIDR is the network service cidr. 79 | NetworkServiceCIDR *string `json:"serviceCidr" norman:"pointer"` 80 | // NetworkDockerBridgeCIDR is the network docker bridge cidr. 81 | // Setting the dockerBridgeCidr field is no longer supported, 82 | // see https://github.com/Azure/AKS/issues/3534 83 | NetworkDockerBridgeCIDR *string `json:"dockerBridgeCidr" norman:"pointer"` 84 | // NetworkPodCIDR is the network pod cidr. 85 | NetworkPodCIDR *string `json:"podCidr" norman:"pointer"` 86 | // NodeResourceGroupName is the name of the resource group 87 | // containing cluster IaaS resources. 88 | // +optional 89 | NodeResourceGroup *string `json:"nodeResourceGroup,omitempty" norman:"pointer"` 90 | // Outbound configuration used by Nodes. 91 | // Immutable. 92 | // +kubebuilder:validation:Enum=loadBalancer;managedNATGateway;userAssignedNATGateway;userDefinedRouting 93 | // +optional 94 | OutboundType *string `json:"outboundType" norman:"pointer"` 95 | // LoadBalancerSKU is the SKU of the loadBalancer to be provisioned. 96 | // Immutable. 97 | // +kubebuilder:validation:Enum=Basic;Standard 98 | // +optional 99 | LoadBalancerSKU *string `json:"loadBalancerSku" norman:"pointer"` 100 | // NetworkPolicy used for building Kubernetes network. 101 | // Allowed values are "azure", "calico". 102 | // Immutable. 103 | // +kubebuilder:validation:Enum=azure;calico 104 | // +optional 105 | NetworkPolicy *string `json:"networkPolicy" norman:"pointer"` 106 | // LinuxAdminUsername is a string literal containing a linux admin username. 107 | // +optional 108 | LinuxAdminUsername *string `json:"linuxAdminUsername,omitempty" norman:"pointer"` 109 | // LinuxSSHPublicKey is a string literal containing a ssh public key. 110 | // +optional 111 | LinuxSSHPublicKey *string `json:"sshPublicKey,omitempty" norman:"pointer"` 112 | // DNSPrefix is the DNS prefix to use with hosted Kubernetes API server FQDN. 113 | DNSPrefix *string `json:"dnsPrefix,omitempty" norman:"pointer"` 114 | // Version defines the desired Kubernetes version. 115 | // +kubebuilder:validation:MinLength:=2 116 | KubernetesVersion *string `json:"kubernetesVersion" norman:"pointer"` 117 | // Tags is an optional set of tags to add to Azure resources managed by the Azure provider, in addition to the 118 | // ones added by default. 119 | // +optional 120 | Tags map[string]string `json:"tags"` 121 | // NodePools is a list of node pools associated with the AKS cluster. 122 | NodePools []AKSNodePool `json:"nodePools"` 123 | // PrivateCluster - Whether to create the cluster as a private cluster or not. 124 | // +optional 125 | PrivateCluster *bool `json:"privateCluster"` 126 | // PrivateDNSZone - Private dns zone mode for private cluster. 127 | // +kubebuilder:validation:Enum=System;None 128 | // +optional 129 | PrivateDNSZone *string `json:"privateDnsZone" norman:"pointer"` 130 | // AuthorizedIPRanges - Authorized IP Ranges to kubernetes API server. 131 | // +optional 132 | AuthorizedIPRanges *[]string `json:"authorizedIpRanges" norman:"pointer"` 133 | // HTTPApplicationRouting is enabling add-on for the cluster. 134 | // Immutable. 135 | // +optional 136 | HTTPApplicationRouting *bool `json:"httpApplicationRouting"` 137 | // Monitoring is enabling add-on for the AKS cluster. 138 | Monitoring *bool `json:"monitoring"` 139 | // LogAnalyticsWorkspaceResourceGroup is the name of the resource group for the Log Analytics Workspace. 140 | // +optional 141 | LogAnalyticsWorkspaceGroup *string `json:"logAnalyticsWorkspaceGroup" norman:"pointer"` 142 | // LogAnalyticsWorkspaceName is the name of the Log Analytics Workspace. 143 | // +optional 144 | LogAnalyticsWorkspaceName *string `json:"logAnalyticsWorkspaceName" norman:"pointer"` 145 | // ManagedIdentity - Should a managed identity be enabled or not? 146 | ManagedIdentity *bool `json:"managedIdentity" norman:"pointer"` 147 | // UserAssignedIdentity - User assigned identity to be used for the cluster. 148 | UserAssignedIdentity *string `json:"userAssignedIdentity" norman:"pointer"` 149 | } 150 | 151 | type AKSClusterConfigStatus struct { 152 | Phase string `json:"phase"` 153 | FailureMessage string `json:"failureMessage"` 154 | RBACEnabled *bool `json:"rbacEnabled"` 155 | } 156 | 157 | type AKSNodePool struct { 158 | // Name is the name of the node pool. 159 | Name *string `json:"name,omitempty" norman:"pointer"` 160 | // NodeCount is the number of nodes in the node pool. 161 | Count *int32 `json:"count,omitempty"` 162 | // MaxPods is the maximum number of pods that can run on each node. 163 | MaxPods *int32 `json:"maxPods,omitempty"` 164 | // VMSize is the size of the Virtual Machine. 165 | VMSize string `json:"vmSize,omitempty"` 166 | // OsDiskSizeGB is the disk size of the OS disk in GB. 167 | // +kubebuilder:validation:Minimum=0 168 | OsDiskSizeGB *int32 `json:"osDiskSizeGB,omitempty"` 169 | // OSDiskType is the type of the OS disk. 170 | // +kubebuilder:validation:Enum=Standard_LRS;Premium_LRS;StandardSSD_LRS;UltraSSD_LRS;Ephemeral;Managed 171 | OsDiskType string `json:"osDiskType,omitempty"` 172 | // Mode is the mode of the node pool. 173 | // +kubebuilder:validation:Enum=System;User 174 | Mode string `json:"mode,omitempty"` 175 | // OsType is the type of the OS. 176 | OsType string `json:"osType,omitempty"` 177 | // OrchestratorVersion is the version of the Kubernetes. 178 | // +kubebuilder:validation:MinLength:=2 179 | OrchestratorVersion *string `json:"orchestratorVersion,omitempty" norman:"pointer"` 180 | // AvailabilityZones is the list of availability zones. 181 | // +optional 182 | // +kubebuilder:validation:UniqueItems:=true 183 | AvailabilityZones *[]string `json:"availabilityZones,omitempty" norman:"pointer"` 184 | // MaxSurge is the maximum number of nodes that can be added to the node pool during an upgrade. 185 | // +optional 186 | MaxSurge *string `json:"maxSurge,omitempty"` 187 | // MaxCount is the maximum number of nodes in the node pool. 188 | // +kubebuilder:validation:Minimum=0 189 | MaxCount *int32 `json:"maxCount,omitempty"` 190 | // MinCount is the minimum number of nodes in the node pool. 191 | // +kubebuilder:validation:Minimum=0 192 | MinCount *int32 `json:"minCount,omitempty"` 193 | // EnableAutoScaling is whether to enable auto scaling or not. 194 | // +optional 195 | EnableAutoScaling *bool `json:"enableAutoScaling,omitempty"` 196 | // VnetSubnetID is the ID of the subnet. 197 | VnetSubnetID *string `json:"vnetSubnetID,omitempty" norman:"pointer"` 198 | // NodeLabels is the list of node labels. 199 | // +optional 200 | NodeLabels map[string]*string `json:"nodeLabels,omitempty"` 201 | // NodeTaints is the list of node taints. 202 | // +kubebuilder:validation:UniqueItems:=true 203 | // +optional 204 | NodeTaints *[]string `json:"nodeTaints,omitempty"` 205 | } 206 | -------------------------------------------------------------------------------- /pkg/apis/aks.cattle.io/v1/zz_generated_list_types.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2019 Wrangler Sample Controller Authors 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Code generated by main. DO NOT EDIT. 18 | 19 | // +k8s:deepcopy-gen=package 20 | // +groupName=aks.cattle.io 21 | package v1 22 | 23 | import ( 24 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 25 | ) 26 | 27 | // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object 28 | 29 | // AKSClusterConfigList is a list of AKSClusterConfig resources 30 | type AKSClusterConfigList struct { 31 | metav1.TypeMeta `json:",inline"` 32 | metav1.ListMeta `json:"metadata"` 33 | 34 | Items []AKSClusterConfig `json:"items"` 35 | } 36 | 37 | func NewAKSClusterConfig(namespace, name string, obj AKSClusterConfig) *AKSClusterConfig { 38 | obj.APIVersion, obj.Kind = SchemeGroupVersion.WithKind("AKSClusterConfig").ToAPIVersionAndKind() 39 | obj.Name = name 40 | obj.Namespace = namespace 41 | return &obj 42 | } 43 | -------------------------------------------------------------------------------- /pkg/apis/aks.cattle.io/v1/zz_generated_register.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2019 Wrangler Sample Controller Authors 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Code generated by main. DO NOT EDIT. 18 | 19 | // +k8s:deepcopy-gen=package 20 | // +groupName=aks.cattle.io 21 | package v1 22 | 23 | import ( 24 | aks "github.com/rancher/aks-operator/pkg/apis/aks.cattle.io" 25 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 26 | "k8s.io/apimachinery/pkg/runtime" 27 | "k8s.io/apimachinery/pkg/runtime/schema" 28 | ) 29 | 30 | var ( 31 | AKSClusterConfigResourceName = "aksclusterconfigs" 32 | ) 33 | 34 | // SchemeGroupVersion is group version used to register these objects 35 | var SchemeGroupVersion = schema.GroupVersion{Group: aks.GroupName, Version: "v1"} 36 | 37 | // Kind takes an unqualified kind and returns back a Group qualified GroupKind 38 | func Kind(kind string) schema.GroupKind { 39 | return SchemeGroupVersion.WithKind(kind).GroupKind() 40 | } 41 | 42 | // Resource takes an unqualified resource and returns a Group qualified GroupResource 43 | func Resource(resource string) schema.GroupResource { 44 | return SchemeGroupVersion.WithResource(resource).GroupResource() 45 | } 46 | 47 | var ( 48 | SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) 49 | AddToScheme = SchemeBuilder.AddToScheme 50 | ) 51 | 52 | // Adds the list of known types to Scheme. 53 | func addKnownTypes(scheme *runtime.Scheme) error { 54 | scheme.AddKnownTypes(SchemeGroupVersion, 55 | &AKSClusterConfig{}, 56 | &AKSClusterConfigList{}, 57 | ) 58 | metav1.AddToGroupVersion(scheme, SchemeGroupVersion) 59 | return nil 60 | } 61 | -------------------------------------------------------------------------------- /pkg/apis/aks.cattle.io/zz_generated_register.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2019 Wrangler Sample Controller Authors 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Code generated by main. DO NOT EDIT. 18 | 19 | package aks 20 | 21 | const ( 22 | // Package-wide consts from generator "zz_generated_register". 23 | GroupName = "aks.cattle.io" 24 | ) 25 | -------------------------------------------------------------------------------- /pkg/codegen/boilerplate.go.txt: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2019 Wrangler Sample Controller Authors 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | -------------------------------------------------------------------------------- /pkg/codegen/cleanup/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "os" 5 | 6 | "github.com/rancher/wrangler/v3/pkg/cleanup" 7 | "github.com/sirupsen/logrus" 8 | ) 9 | 10 | func main() { 11 | if err := cleanup.Cleanup("./pkg/apis"); err != nil { 12 | logrus.Fatal(err) 13 | } 14 | if err := os.RemoveAll("./pkg/generated"); err != nil { 15 | logrus.Fatal(err) 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /pkg/codegen/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | 7 | aksv1 "github.com/rancher/aks-operator/pkg/apis/aks.cattle.io/v1" 8 | _ "github.com/rancher/wrangler-api/pkg/generated/controllers/apiextensions.k8s.io" 9 | controllergen "github.com/rancher/wrangler/v3/pkg/controller-gen" 10 | "github.com/rancher/wrangler/v3/pkg/controller-gen/args" 11 | "github.com/rancher/wrangler/v3/pkg/crd" 12 | "github.com/rancher/wrangler/v3/pkg/yaml" 13 | corev1 "k8s.io/api/core/v1" 14 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 15 | "k8s.io/apimachinery/pkg/runtime/schema" 16 | ) 17 | 18 | func main() { 19 | os.Unsetenv("GOPATH") 20 | 21 | controllergen.Run(args.Options{ 22 | OutputPackage: "github.com/rancher/aks-operator/pkg/generated", 23 | Boilerplate: "pkg/codegen/boilerplate.go.txt", 24 | Groups: map[string]args.Group{ 25 | "aks.cattle.io": { 26 | Types: []interface{}{ 27 | "./pkg/apis/aks.cattle.io/v1", 28 | }, 29 | GenerateTypes: true, 30 | }, 31 | corev1.GroupName: { 32 | Types: []interface{}{ 33 | corev1.Pod{}, 34 | corev1.Node{}, 35 | corev1.Secret{}, 36 | }, 37 | }, 38 | }, 39 | }) 40 | 41 | aksClusterConfig := newCRD(&aksv1.AKSClusterConfig{}, func(c crd.CRD) crd.CRD { 42 | c.ShortNames = []string{"akscc"} 43 | return c 44 | }) 45 | 46 | obj, err := aksClusterConfig.ToCustomResourceDefinition() 47 | if err != nil { 48 | panic(err) 49 | } 50 | 51 | obj.(*unstructured.Unstructured).SetAnnotations(map[string]string{ 52 | "helm.sh/resource-policy": "keep", 53 | }) 54 | 55 | aksCCYaml, err := yaml.Export(obj) 56 | if err != nil { 57 | panic(err) 58 | } 59 | 60 | if err := saveCRDYaml("aks-operator-crd", string(aksCCYaml)); err != nil { 61 | panic(err) 62 | } 63 | 64 | fmt.Printf("obj yaml: %s", aksCCYaml) 65 | } 66 | 67 | func newCRD(obj interface{}, customize func(crd.CRD) crd.CRD) crd.CRD { 68 | crd := crd.CRD{ 69 | GVK: schema.GroupVersionKind{ 70 | Group: "aks.cattle.io", 71 | Version: "v1", 72 | }, 73 | Status: true, 74 | SchemaObject: obj, 75 | } 76 | if customize != nil { 77 | crd = customize(crd) 78 | } 79 | return crd 80 | } 81 | 82 | func saveCRDYaml(name, yaml string) error { 83 | filename := fmt.Sprintf("./charts/%s/templates/crds.yaml", name) 84 | save, err := os.Create(filename) 85 | if err != nil { 86 | return err 87 | } 88 | 89 | defer save.Close() 90 | if err := save.Chmod(0755); err != nil { 91 | return err 92 | } 93 | 94 | if _, err := fmt.Fprint(save, yaml); err != nil { 95 | return err 96 | } 97 | 98 | return nil 99 | } 100 | -------------------------------------------------------------------------------- /pkg/generated/controllers/aks.cattle.io/factory.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2019 Wrangler Sample Controller Authors 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Code generated by main. DO NOT EDIT. 18 | 19 | package aks 20 | 21 | import ( 22 | "github.com/rancher/lasso/pkg/controller" 23 | "github.com/rancher/wrangler/v3/pkg/generic" 24 | "k8s.io/client-go/rest" 25 | ) 26 | 27 | type Factory struct { 28 | *generic.Factory 29 | } 30 | 31 | func NewFactoryFromConfigOrDie(config *rest.Config) *Factory { 32 | f, err := NewFactoryFromConfig(config) 33 | if err != nil { 34 | panic(err) 35 | } 36 | return f 37 | } 38 | 39 | func NewFactoryFromConfig(config *rest.Config) (*Factory, error) { 40 | return NewFactoryFromConfigWithOptions(config, nil) 41 | } 42 | 43 | func NewFactoryFromConfigWithNamespace(config *rest.Config, namespace string) (*Factory, error) { 44 | return NewFactoryFromConfigWithOptions(config, &FactoryOptions{ 45 | Namespace: namespace, 46 | }) 47 | } 48 | 49 | type FactoryOptions = generic.FactoryOptions 50 | 51 | func NewFactoryFromConfigWithOptions(config *rest.Config, opts *FactoryOptions) (*Factory, error) { 52 | f, err := generic.NewFactoryFromConfigWithOptions(config, opts) 53 | return &Factory{ 54 | Factory: f, 55 | }, err 56 | } 57 | 58 | func NewFactoryFromConfigWithOptionsOrDie(config *rest.Config, opts *FactoryOptions) *Factory { 59 | f, err := NewFactoryFromConfigWithOptions(config, opts) 60 | if err != nil { 61 | panic(err) 62 | } 63 | return f 64 | } 65 | 66 | func (c *Factory) Aks() Interface { 67 | return New(c.ControllerFactory()) 68 | } 69 | 70 | func (c *Factory) WithAgent(userAgent string) Interface { 71 | return New(controller.NewSharedControllerFactoryWithAgent(userAgent, c.ControllerFactory())) 72 | } 73 | -------------------------------------------------------------------------------- /pkg/generated/controllers/aks.cattle.io/interface.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2019 Wrangler Sample Controller Authors 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Code generated by main. DO NOT EDIT. 18 | 19 | package aks 20 | 21 | import ( 22 | v1 "github.com/rancher/aks-operator/pkg/generated/controllers/aks.cattle.io/v1" 23 | "github.com/rancher/lasso/pkg/controller" 24 | ) 25 | 26 | type Interface interface { 27 | V1() v1.Interface 28 | } 29 | 30 | type group struct { 31 | controllerFactory controller.SharedControllerFactory 32 | } 33 | 34 | // New returns a new Interface. 35 | func New(controllerFactory controller.SharedControllerFactory) Interface { 36 | return &group{ 37 | controllerFactory: controllerFactory, 38 | } 39 | } 40 | 41 | func (g *group) V1() v1.Interface { 42 | return v1.New(g.controllerFactory) 43 | } 44 | -------------------------------------------------------------------------------- /pkg/generated/controllers/aks.cattle.io/v1/aksclusterconfig.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2019 Wrangler Sample Controller Authors 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Code generated by main. DO NOT EDIT. 18 | 19 | package v1 20 | 21 | import ( 22 | "context" 23 | "sync" 24 | "time" 25 | 26 | v1 "github.com/rancher/aks-operator/pkg/apis/aks.cattle.io/v1" 27 | "github.com/rancher/wrangler/v3/pkg/apply" 28 | "github.com/rancher/wrangler/v3/pkg/condition" 29 | "github.com/rancher/wrangler/v3/pkg/generic" 30 | "github.com/rancher/wrangler/v3/pkg/kv" 31 | "k8s.io/apimachinery/pkg/api/equality" 32 | "k8s.io/apimachinery/pkg/api/errors" 33 | "k8s.io/apimachinery/pkg/runtime" 34 | "k8s.io/apimachinery/pkg/runtime/schema" 35 | ) 36 | 37 | // AKSClusterConfigController interface for managing AKSClusterConfig resources. 38 | type AKSClusterConfigController interface { 39 | generic.ControllerInterface[*v1.AKSClusterConfig, *v1.AKSClusterConfigList] 40 | } 41 | 42 | // AKSClusterConfigClient interface for managing AKSClusterConfig resources in Kubernetes. 43 | type AKSClusterConfigClient interface { 44 | generic.ClientInterface[*v1.AKSClusterConfig, *v1.AKSClusterConfigList] 45 | } 46 | 47 | // AKSClusterConfigCache interface for retrieving AKSClusterConfig resources in memory. 48 | type AKSClusterConfigCache interface { 49 | generic.CacheInterface[*v1.AKSClusterConfig] 50 | } 51 | 52 | // AKSClusterConfigStatusHandler is executed for every added or modified AKSClusterConfig. Should return the new status to be updated 53 | type AKSClusterConfigStatusHandler func(obj *v1.AKSClusterConfig, status v1.AKSClusterConfigStatus) (v1.AKSClusterConfigStatus, error) 54 | 55 | // AKSClusterConfigGeneratingHandler is the top-level handler that is executed for every AKSClusterConfig event. It extends AKSClusterConfigStatusHandler by a returning a slice of child objects to be passed to apply.Apply 56 | type AKSClusterConfigGeneratingHandler func(obj *v1.AKSClusterConfig, status v1.AKSClusterConfigStatus) ([]runtime.Object, v1.AKSClusterConfigStatus, error) 57 | 58 | // RegisterAKSClusterConfigStatusHandler configures a AKSClusterConfigController to execute a AKSClusterConfigStatusHandler for every events observed. 59 | // If a non-empty condition is provided, it will be updated in the status conditions for every handler execution 60 | func RegisterAKSClusterConfigStatusHandler(ctx context.Context, controller AKSClusterConfigController, condition condition.Cond, name string, handler AKSClusterConfigStatusHandler) { 61 | statusHandler := &aKSClusterConfigStatusHandler{ 62 | client: controller, 63 | condition: condition, 64 | handler: handler, 65 | } 66 | controller.AddGenericHandler(ctx, name, generic.FromObjectHandlerToHandler(statusHandler.sync)) 67 | } 68 | 69 | // RegisterAKSClusterConfigGeneratingHandler configures a AKSClusterConfigController to execute a AKSClusterConfigGeneratingHandler for every events observed, passing the returned objects to the provided apply.Apply. 70 | // If a non-empty condition is provided, it will be updated in the status conditions for every handler execution 71 | func RegisterAKSClusterConfigGeneratingHandler(ctx context.Context, controller AKSClusterConfigController, apply apply.Apply, 72 | condition condition.Cond, name string, handler AKSClusterConfigGeneratingHandler, opts *generic.GeneratingHandlerOptions) { 73 | statusHandler := &aKSClusterConfigGeneratingHandler{ 74 | AKSClusterConfigGeneratingHandler: handler, 75 | apply: apply, 76 | name: name, 77 | gvk: controller.GroupVersionKind(), 78 | } 79 | if opts != nil { 80 | statusHandler.opts = *opts 81 | } 82 | controller.OnChange(ctx, name, statusHandler.Remove) 83 | RegisterAKSClusterConfigStatusHandler(ctx, controller, condition, name, statusHandler.Handle) 84 | } 85 | 86 | type aKSClusterConfigStatusHandler struct { 87 | client AKSClusterConfigClient 88 | condition condition.Cond 89 | handler AKSClusterConfigStatusHandler 90 | } 91 | 92 | // sync is executed on every resource addition or modification. Executes the configured handlers and sends the updated status to the Kubernetes API 93 | func (a *aKSClusterConfigStatusHandler) sync(key string, obj *v1.AKSClusterConfig) (*v1.AKSClusterConfig, error) { 94 | if obj == nil { 95 | return obj, nil 96 | } 97 | 98 | origStatus := obj.Status.DeepCopy() 99 | obj = obj.DeepCopy() 100 | newStatus, err := a.handler(obj, obj.Status) 101 | if err != nil { 102 | // Revert to old status on error 103 | newStatus = *origStatus.DeepCopy() 104 | } 105 | 106 | if a.condition != "" { 107 | if errors.IsConflict(err) { 108 | a.condition.SetError(&newStatus, "", nil) 109 | } else { 110 | a.condition.SetError(&newStatus, "", err) 111 | } 112 | } 113 | if !equality.Semantic.DeepEqual(origStatus, &newStatus) { 114 | if a.condition != "" { 115 | // Since status has changed, update the lastUpdatedTime 116 | a.condition.LastUpdated(&newStatus, time.Now().UTC().Format(time.RFC3339)) 117 | } 118 | 119 | var newErr error 120 | obj.Status = newStatus 121 | newObj, newErr := a.client.UpdateStatus(obj) 122 | if err == nil { 123 | err = newErr 124 | } 125 | if newErr == nil { 126 | obj = newObj 127 | } 128 | } 129 | return obj, err 130 | } 131 | 132 | type aKSClusterConfigGeneratingHandler struct { 133 | AKSClusterConfigGeneratingHandler 134 | apply apply.Apply 135 | opts generic.GeneratingHandlerOptions 136 | gvk schema.GroupVersionKind 137 | name string 138 | seen sync.Map 139 | } 140 | 141 | // Remove handles the observed deletion of a resource, cascade deleting every associated resource previously applied 142 | func (a *aKSClusterConfigGeneratingHandler) Remove(key string, obj *v1.AKSClusterConfig) (*v1.AKSClusterConfig, error) { 143 | if obj != nil { 144 | return obj, nil 145 | } 146 | 147 | obj = &v1.AKSClusterConfig{} 148 | obj.Namespace, obj.Name = kv.RSplit(key, "/") 149 | obj.SetGroupVersionKind(a.gvk) 150 | 151 | if a.opts.UniqueApplyForResourceVersion { 152 | a.seen.Delete(key) 153 | } 154 | 155 | return nil, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). 156 | WithOwner(obj). 157 | WithSetID(a.name). 158 | ApplyObjects() 159 | } 160 | 161 | // Handle executes the configured AKSClusterConfigGeneratingHandler and pass the resulting objects to apply.Apply, finally returning the new status of the resource 162 | func (a *aKSClusterConfigGeneratingHandler) Handle(obj *v1.AKSClusterConfig, status v1.AKSClusterConfigStatus) (v1.AKSClusterConfigStatus, error) { 163 | if !obj.DeletionTimestamp.IsZero() { 164 | return status, nil 165 | } 166 | 167 | objs, newStatus, err := a.AKSClusterConfigGeneratingHandler(obj, status) 168 | if err != nil { 169 | return newStatus, err 170 | } 171 | if !a.isNewResourceVersion(obj) { 172 | return newStatus, nil 173 | } 174 | 175 | err = generic.ConfigureApplyForObject(a.apply, obj, &a.opts). 176 | WithOwner(obj). 177 | WithSetID(a.name). 178 | ApplyObjects(objs...) 179 | if err != nil { 180 | return newStatus, err 181 | } 182 | a.storeResourceVersion(obj) 183 | return newStatus, nil 184 | } 185 | 186 | // isNewResourceVersion detects if a specific resource version was already successfully processed. 187 | // Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions 188 | func (a *aKSClusterConfigGeneratingHandler) isNewResourceVersion(obj *v1.AKSClusterConfig) bool { 189 | if !a.opts.UniqueApplyForResourceVersion { 190 | return true 191 | } 192 | 193 | // Apply once per resource version 194 | key := obj.Namespace + "/" + obj.Name 195 | previous, ok := a.seen.Load(key) 196 | return !ok || previous != obj.ResourceVersion 197 | } 198 | 199 | // storeResourceVersion keeps track of the latest resource version of an object for which Apply was executed 200 | // Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions 201 | func (a *aKSClusterConfigGeneratingHandler) storeResourceVersion(obj *v1.AKSClusterConfig) { 202 | if !a.opts.UniqueApplyForResourceVersion { 203 | return 204 | } 205 | 206 | key := obj.Namespace + "/" + obj.Name 207 | a.seen.Store(key, obj.ResourceVersion) 208 | } 209 | -------------------------------------------------------------------------------- /pkg/generated/controllers/aks.cattle.io/v1/interface.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2019 Wrangler Sample Controller Authors 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Code generated by main. DO NOT EDIT. 18 | 19 | package v1 20 | 21 | import ( 22 | v1 "github.com/rancher/aks-operator/pkg/apis/aks.cattle.io/v1" 23 | "github.com/rancher/lasso/pkg/controller" 24 | "github.com/rancher/wrangler/v3/pkg/generic" 25 | "github.com/rancher/wrangler/v3/pkg/schemes" 26 | "k8s.io/apimachinery/pkg/runtime/schema" 27 | ) 28 | 29 | func init() { 30 | schemes.Register(v1.AddToScheme) 31 | } 32 | 33 | type Interface interface { 34 | AKSClusterConfig() AKSClusterConfigController 35 | } 36 | 37 | func New(controllerFactory controller.SharedControllerFactory) Interface { 38 | return &version{ 39 | controllerFactory: controllerFactory, 40 | } 41 | } 42 | 43 | type version struct { 44 | controllerFactory controller.SharedControllerFactory 45 | } 46 | 47 | func (v *version) AKSClusterConfig() AKSClusterConfigController { 48 | return generic.NewController[*v1.AKSClusterConfig, *v1.AKSClusterConfigList](schema.GroupVersionKind{Group: "aks.cattle.io", Version: "v1", Kind: "AKSClusterConfig"}, "aksclusterconfigs", true, v.controllerFactory) 49 | } 50 | -------------------------------------------------------------------------------- /pkg/generated/controllers/core/factory.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2019 Wrangler Sample Controller Authors 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Code generated by main. DO NOT EDIT. 18 | 19 | package core 20 | 21 | import ( 22 | "github.com/rancher/lasso/pkg/controller" 23 | "github.com/rancher/wrangler/v3/pkg/generic" 24 | "k8s.io/client-go/rest" 25 | ) 26 | 27 | type Factory struct { 28 | *generic.Factory 29 | } 30 | 31 | func NewFactoryFromConfigOrDie(config *rest.Config) *Factory { 32 | f, err := NewFactoryFromConfig(config) 33 | if err != nil { 34 | panic(err) 35 | } 36 | return f 37 | } 38 | 39 | func NewFactoryFromConfig(config *rest.Config) (*Factory, error) { 40 | return NewFactoryFromConfigWithOptions(config, nil) 41 | } 42 | 43 | func NewFactoryFromConfigWithNamespace(config *rest.Config, namespace string) (*Factory, error) { 44 | return NewFactoryFromConfigWithOptions(config, &FactoryOptions{ 45 | Namespace: namespace, 46 | }) 47 | } 48 | 49 | type FactoryOptions = generic.FactoryOptions 50 | 51 | func NewFactoryFromConfigWithOptions(config *rest.Config, opts *FactoryOptions) (*Factory, error) { 52 | f, err := generic.NewFactoryFromConfigWithOptions(config, opts) 53 | return &Factory{ 54 | Factory: f, 55 | }, err 56 | } 57 | 58 | func NewFactoryFromConfigWithOptionsOrDie(config *rest.Config, opts *FactoryOptions) *Factory { 59 | f, err := NewFactoryFromConfigWithOptions(config, opts) 60 | if err != nil { 61 | panic(err) 62 | } 63 | return f 64 | } 65 | 66 | func (c *Factory) Core() Interface { 67 | return New(c.ControllerFactory()) 68 | } 69 | 70 | func (c *Factory) WithAgent(userAgent string) Interface { 71 | return New(controller.NewSharedControllerFactoryWithAgent(userAgent, c.ControllerFactory())) 72 | } 73 | -------------------------------------------------------------------------------- /pkg/generated/controllers/core/interface.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2019 Wrangler Sample Controller Authors 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Code generated by main. DO NOT EDIT. 18 | 19 | package core 20 | 21 | import ( 22 | v1 "github.com/rancher/aks-operator/pkg/generated/controllers/core/v1" 23 | "github.com/rancher/lasso/pkg/controller" 24 | ) 25 | 26 | type Interface interface { 27 | V1() v1.Interface 28 | } 29 | 30 | type group struct { 31 | controllerFactory controller.SharedControllerFactory 32 | } 33 | 34 | // New returns a new Interface. 35 | func New(controllerFactory controller.SharedControllerFactory) Interface { 36 | return &group{ 37 | controllerFactory: controllerFactory, 38 | } 39 | } 40 | 41 | func (g *group) V1() v1.Interface { 42 | return v1.New(g.controllerFactory) 43 | } 44 | -------------------------------------------------------------------------------- /pkg/generated/controllers/core/v1/interface.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2019 Wrangler Sample Controller Authors 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Code generated by main. DO NOT EDIT. 18 | 19 | package v1 20 | 21 | import ( 22 | "github.com/rancher/lasso/pkg/controller" 23 | "github.com/rancher/wrangler/v3/pkg/generic" 24 | "github.com/rancher/wrangler/v3/pkg/schemes" 25 | v1 "k8s.io/api/core/v1" 26 | "k8s.io/apimachinery/pkg/runtime/schema" 27 | ) 28 | 29 | func init() { 30 | schemes.Register(v1.AddToScheme) 31 | } 32 | 33 | type Interface interface { 34 | Node() NodeController 35 | Pod() PodController 36 | Secret() SecretController 37 | } 38 | 39 | func New(controllerFactory controller.SharedControllerFactory) Interface { 40 | return &version{ 41 | controllerFactory: controllerFactory, 42 | } 43 | } 44 | 45 | type version struct { 46 | controllerFactory controller.SharedControllerFactory 47 | } 48 | 49 | func (v *version) Node() NodeController { 50 | return generic.NewNonNamespacedController[*v1.Node, *v1.NodeList](schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Node"}, "nodes", v.controllerFactory) 51 | } 52 | 53 | func (v *version) Pod() PodController { 54 | return generic.NewController[*v1.Pod, *v1.PodList](schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"}, "pods", true, v.controllerFactory) 55 | } 56 | 57 | func (v *version) Secret() SecretController { 58 | return generic.NewController[*v1.Secret, *v1.SecretList](schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Secret"}, "secrets", true, v.controllerFactory) 59 | } 60 | -------------------------------------------------------------------------------- /pkg/generated/controllers/core/v1/node.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2019 Wrangler Sample Controller Authors 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Code generated by main. DO NOT EDIT. 18 | 19 | package v1 20 | 21 | import ( 22 | "context" 23 | "sync" 24 | "time" 25 | 26 | "github.com/rancher/wrangler/v3/pkg/apply" 27 | "github.com/rancher/wrangler/v3/pkg/condition" 28 | "github.com/rancher/wrangler/v3/pkg/generic" 29 | "github.com/rancher/wrangler/v3/pkg/kv" 30 | v1 "k8s.io/api/core/v1" 31 | "k8s.io/apimachinery/pkg/api/equality" 32 | "k8s.io/apimachinery/pkg/api/errors" 33 | "k8s.io/apimachinery/pkg/runtime" 34 | "k8s.io/apimachinery/pkg/runtime/schema" 35 | ) 36 | 37 | // NodeController interface for managing Node resources. 38 | type NodeController interface { 39 | generic.NonNamespacedControllerInterface[*v1.Node, *v1.NodeList] 40 | } 41 | 42 | // NodeClient interface for managing Node resources in Kubernetes. 43 | type NodeClient interface { 44 | generic.NonNamespacedClientInterface[*v1.Node, *v1.NodeList] 45 | } 46 | 47 | // NodeCache interface for retrieving Node resources in memory. 48 | type NodeCache interface { 49 | generic.NonNamespacedCacheInterface[*v1.Node] 50 | } 51 | 52 | // NodeStatusHandler is executed for every added or modified Node. Should return the new status to be updated 53 | type NodeStatusHandler func(obj *v1.Node, status v1.NodeStatus) (v1.NodeStatus, error) 54 | 55 | // NodeGeneratingHandler is the top-level handler that is executed for every Node event. It extends NodeStatusHandler by a returning a slice of child objects to be passed to apply.Apply 56 | type NodeGeneratingHandler func(obj *v1.Node, status v1.NodeStatus) ([]runtime.Object, v1.NodeStatus, error) 57 | 58 | // RegisterNodeStatusHandler configures a NodeController to execute a NodeStatusHandler for every events observed. 59 | // If a non-empty condition is provided, it will be updated in the status conditions for every handler execution 60 | func RegisterNodeStatusHandler(ctx context.Context, controller NodeController, condition condition.Cond, name string, handler NodeStatusHandler) { 61 | statusHandler := &nodeStatusHandler{ 62 | client: controller, 63 | condition: condition, 64 | handler: handler, 65 | } 66 | controller.AddGenericHandler(ctx, name, generic.FromObjectHandlerToHandler(statusHandler.sync)) 67 | } 68 | 69 | // RegisterNodeGeneratingHandler configures a NodeController to execute a NodeGeneratingHandler for every events observed, passing the returned objects to the provided apply.Apply. 70 | // If a non-empty condition is provided, it will be updated in the status conditions for every handler execution 71 | func RegisterNodeGeneratingHandler(ctx context.Context, controller NodeController, apply apply.Apply, 72 | condition condition.Cond, name string, handler NodeGeneratingHandler, opts *generic.GeneratingHandlerOptions) { 73 | statusHandler := &nodeGeneratingHandler{ 74 | NodeGeneratingHandler: handler, 75 | apply: apply, 76 | name: name, 77 | gvk: controller.GroupVersionKind(), 78 | } 79 | if opts != nil { 80 | statusHandler.opts = *opts 81 | } 82 | controller.OnChange(ctx, name, statusHandler.Remove) 83 | RegisterNodeStatusHandler(ctx, controller, condition, name, statusHandler.Handle) 84 | } 85 | 86 | type nodeStatusHandler struct { 87 | client NodeClient 88 | condition condition.Cond 89 | handler NodeStatusHandler 90 | } 91 | 92 | // sync is executed on every resource addition or modification. Executes the configured handlers and sends the updated status to the Kubernetes API 93 | func (a *nodeStatusHandler) sync(key string, obj *v1.Node) (*v1.Node, error) { 94 | if obj == nil { 95 | return obj, nil 96 | } 97 | 98 | origStatus := obj.Status.DeepCopy() 99 | obj = obj.DeepCopy() 100 | newStatus, err := a.handler(obj, obj.Status) 101 | if err != nil { 102 | // Revert to old status on error 103 | newStatus = *origStatus.DeepCopy() 104 | } 105 | 106 | if a.condition != "" { 107 | if errors.IsConflict(err) { 108 | a.condition.SetError(&newStatus, "", nil) 109 | } else { 110 | a.condition.SetError(&newStatus, "", err) 111 | } 112 | } 113 | if !equality.Semantic.DeepEqual(origStatus, &newStatus) { 114 | if a.condition != "" { 115 | // Since status has changed, update the lastUpdatedTime 116 | a.condition.LastUpdated(&newStatus, time.Now().UTC().Format(time.RFC3339)) 117 | } 118 | 119 | var newErr error 120 | obj.Status = newStatus 121 | newObj, newErr := a.client.UpdateStatus(obj) 122 | if err == nil { 123 | err = newErr 124 | } 125 | if newErr == nil { 126 | obj = newObj 127 | } 128 | } 129 | return obj, err 130 | } 131 | 132 | type nodeGeneratingHandler struct { 133 | NodeGeneratingHandler 134 | apply apply.Apply 135 | opts generic.GeneratingHandlerOptions 136 | gvk schema.GroupVersionKind 137 | name string 138 | seen sync.Map 139 | } 140 | 141 | // Remove handles the observed deletion of a resource, cascade deleting every associated resource previously applied 142 | func (a *nodeGeneratingHandler) Remove(key string, obj *v1.Node) (*v1.Node, error) { 143 | if obj != nil { 144 | return obj, nil 145 | } 146 | 147 | obj = &v1.Node{} 148 | obj.Namespace, obj.Name = kv.RSplit(key, "/") 149 | obj.SetGroupVersionKind(a.gvk) 150 | 151 | if a.opts.UniqueApplyForResourceVersion { 152 | a.seen.Delete(key) 153 | } 154 | 155 | return nil, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). 156 | WithOwner(obj). 157 | WithSetID(a.name). 158 | ApplyObjects() 159 | } 160 | 161 | // Handle executes the configured NodeGeneratingHandler and pass the resulting objects to apply.Apply, finally returning the new status of the resource 162 | func (a *nodeGeneratingHandler) Handle(obj *v1.Node, status v1.NodeStatus) (v1.NodeStatus, error) { 163 | if !obj.DeletionTimestamp.IsZero() { 164 | return status, nil 165 | } 166 | 167 | objs, newStatus, err := a.NodeGeneratingHandler(obj, status) 168 | if err != nil { 169 | return newStatus, err 170 | } 171 | if !a.isNewResourceVersion(obj) { 172 | return newStatus, nil 173 | } 174 | 175 | err = generic.ConfigureApplyForObject(a.apply, obj, &a.opts). 176 | WithOwner(obj). 177 | WithSetID(a.name). 178 | ApplyObjects(objs...) 179 | if err != nil { 180 | return newStatus, err 181 | } 182 | a.storeResourceVersion(obj) 183 | return newStatus, nil 184 | } 185 | 186 | // isNewResourceVersion detects if a specific resource version was already successfully processed. 187 | // Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions 188 | func (a *nodeGeneratingHandler) isNewResourceVersion(obj *v1.Node) bool { 189 | if !a.opts.UniqueApplyForResourceVersion { 190 | return true 191 | } 192 | 193 | // Apply once per resource version 194 | key := obj.Namespace + "/" + obj.Name 195 | previous, ok := a.seen.Load(key) 196 | return !ok || previous != obj.ResourceVersion 197 | } 198 | 199 | // storeResourceVersion keeps track of the latest resource version of an object for which Apply was executed 200 | // Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions 201 | func (a *nodeGeneratingHandler) storeResourceVersion(obj *v1.Node) { 202 | if !a.opts.UniqueApplyForResourceVersion { 203 | return 204 | } 205 | 206 | key := obj.Namespace + "/" + obj.Name 207 | a.seen.Store(key, obj.ResourceVersion) 208 | } 209 | -------------------------------------------------------------------------------- /pkg/generated/controllers/core/v1/pod.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2019 Wrangler Sample Controller Authors 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Code generated by main. DO NOT EDIT. 18 | 19 | package v1 20 | 21 | import ( 22 | "context" 23 | "sync" 24 | "time" 25 | 26 | "github.com/rancher/wrangler/v3/pkg/apply" 27 | "github.com/rancher/wrangler/v3/pkg/condition" 28 | "github.com/rancher/wrangler/v3/pkg/generic" 29 | "github.com/rancher/wrangler/v3/pkg/kv" 30 | v1 "k8s.io/api/core/v1" 31 | "k8s.io/apimachinery/pkg/api/equality" 32 | "k8s.io/apimachinery/pkg/api/errors" 33 | "k8s.io/apimachinery/pkg/runtime" 34 | "k8s.io/apimachinery/pkg/runtime/schema" 35 | ) 36 | 37 | // PodController interface for managing Pod resources. 38 | type PodController interface { 39 | generic.ControllerInterface[*v1.Pod, *v1.PodList] 40 | } 41 | 42 | // PodClient interface for managing Pod resources in Kubernetes. 43 | type PodClient interface { 44 | generic.ClientInterface[*v1.Pod, *v1.PodList] 45 | } 46 | 47 | // PodCache interface for retrieving Pod resources in memory. 48 | type PodCache interface { 49 | generic.CacheInterface[*v1.Pod] 50 | } 51 | 52 | // PodStatusHandler is executed for every added or modified Pod. Should return the new status to be updated 53 | type PodStatusHandler func(obj *v1.Pod, status v1.PodStatus) (v1.PodStatus, error) 54 | 55 | // PodGeneratingHandler is the top-level handler that is executed for every Pod event. It extends PodStatusHandler by a returning a slice of child objects to be passed to apply.Apply 56 | type PodGeneratingHandler func(obj *v1.Pod, status v1.PodStatus) ([]runtime.Object, v1.PodStatus, error) 57 | 58 | // RegisterPodStatusHandler configures a PodController to execute a PodStatusHandler for every events observed. 59 | // If a non-empty condition is provided, it will be updated in the status conditions for every handler execution 60 | func RegisterPodStatusHandler(ctx context.Context, controller PodController, condition condition.Cond, name string, handler PodStatusHandler) { 61 | statusHandler := &podStatusHandler{ 62 | client: controller, 63 | condition: condition, 64 | handler: handler, 65 | } 66 | controller.AddGenericHandler(ctx, name, generic.FromObjectHandlerToHandler(statusHandler.sync)) 67 | } 68 | 69 | // RegisterPodGeneratingHandler configures a PodController to execute a PodGeneratingHandler for every events observed, passing the returned objects to the provided apply.Apply. 70 | // If a non-empty condition is provided, it will be updated in the status conditions for every handler execution 71 | func RegisterPodGeneratingHandler(ctx context.Context, controller PodController, apply apply.Apply, 72 | condition condition.Cond, name string, handler PodGeneratingHandler, opts *generic.GeneratingHandlerOptions) { 73 | statusHandler := &podGeneratingHandler{ 74 | PodGeneratingHandler: handler, 75 | apply: apply, 76 | name: name, 77 | gvk: controller.GroupVersionKind(), 78 | } 79 | if opts != nil { 80 | statusHandler.opts = *opts 81 | } 82 | controller.OnChange(ctx, name, statusHandler.Remove) 83 | RegisterPodStatusHandler(ctx, controller, condition, name, statusHandler.Handle) 84 | } 85 | 86 | type podStatusHandler struct { 87 | client PodClient 88 | condition condition.Cond 89 | handler PodStatusHandler 90 | } 91 | 92 | // sync is executed on every resource addition or modification. Executes the configured handlers and sends the updated status to the Kubernetes API 93 | func (a *podStatusHandler) sync(key string, obj *v1.Pod) (*v1.Pod, error) { 94 | if obj == nil { 95 | return obj, nil 96 | } 97 | 98 | origStatus := obj.Status.DeepCopy() 99 | obj = obj.DeepCopy() 100 | newStatus, err := a.handler(obj, obj.Status) 101 | if err != nil { 102 | // Revert to old status on error 103 | newStatus = *origStatus.DeepCopy() 104 | } 105 | 106 | if a.condition != "" { 107 | if errors.IsConflict(err) { 108 | a.condition.SetError(&newStatus, "", nil) 109 | } else { 110 | a.condition.SetError(&newStatus, "", err) 111 | } 112 | } 113 | if !equality.Semantic.DeepEqual(origStatus, &newStatus) { 114 | if a.condition != "" { 115 | // Since status has changed, update the lastUpdatedTime 116 | a.condition.LastUpdated(&newStatus, time.Now().UTC().Format(time.RFC3339)) 117 | } 118 | 119 | var newErr error 120 | obj.Status = newStatus 121 | newObj, newErr := a.client.UpdateStatus(obj) 122 | if err == nil { 123 | err = newErr 124 | } 125 | if newErr == nil { 126 | obj = newObj 127 | } 128 | } 129 | return obj, err 130 | } 131 | 132 | type podGeneratingHandler struct { 133 | PodGeneratingHandler 134 | apply apply.Apply 135 | opts generic.GeneratingHandlerOptions 136 | gvk schema.GroupVersionKind 137 | name string 138 | seen sync.Map 139 | } 140 | 141 | // Remove handles the observed deletion of a resource, cascade deleting every associated resource previously applied 142 | func (a *podGeneratingHandler) Remove(key string, obj *v1.Pod) (*v1.Pod, error) { 143 | if obj != nil { 144 | return obj, nil 145 | } 146 | 147 | obj = &v1.Pod{} 148 | obj.Namespace, obj.Name = kv.RSplit(key, "/") 149 | obj.SetGroupVersionKind(a.gvk) 150 | 151 | if a.opts.UniqueApplyForResourceVersion { 152 | a.seen.Delete(key) 153 | } 154 | 155 | return nil, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). 156 | WithOwner(obj). 157 | WithSetID(a.name). 158 | ApplyObjects() 159 | } 160 | 161 | // Handle executes the configured PodGeneratingHandler and pass the resulting objects to apply.Apply, finally returning the new status of the resource 162 | func (a *podGeneratingHandler) Handle(obj *v1.Pod, status v1.PodStatus) (v1.PodStatus, error) { 163 | if !obj.DeletionTimestamp.IsZero() { 164 | return status, nil 165 | } 166 | 167 | objs, newStatus, err := a.PodGeneratingHandler(obj, status) 168 | if err != nil { 169 | return newStatus, err 170 | } 171 | if !a.isNewResourceVersion(obj) { 172 | return newStatus, nil 173 | } 174 | 175 | err = generic.ConfigureApplyForObject(a.apply, obj, &a.opts). 176 | WithOwner(obj). 177 | WithSetID(a.name). 178 | ApplyObjects(objs...) 179 | if err != nil { 180 | return newStatus, err 181 | } 182 | a.storeResourceVersion(obj) 183 | return newStatus, nil 184 | } 185 | 186 | // isNewResourceVersion detects if a specific resource version was already successfully processed. 187 | // Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions 188 | func (a *podGeneratingHandler) isNewResourceVersion(obj *v1.Pod) bool { 189 | if !a.opts.UniqueApplyForResourceVersion { 190 | return true 191 | } 192 | 193 | // Apply once per resource version 194 | key := obj.Namespace + "/" + obj.Name 195 | previous, ok := a.seen.Load(key) 196 | return !ok || previous != obj.ResourceVersion 197 | } 198 | 199 | // storeResourceVersion keeps track of the latest resource version of an object for which Apply was executed 200 | // Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions 201 | func (a *podGeneratingHandler) storeResourceVersion(obj *v1.Pod) { 202 | if !a.opts.UniqueApplyForResourceVersion { 203 | return 204 | } 205 | 206 | key := obj.Namespace + "/" + obj.Name 207 | a.seen.Store(key, obj.ResourceVersion) 208 | } 209 | -------------------------------------------------------------------------------- /pkg/generated/controllers/core/v1/secret.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2019 Wrangler Sample Controller Authors 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Code generated by main. DO NOT EDIT. 18 | 19 | package v1 20 | 21 | import ( 22 | "github.com/rancher/wrangler/v3/pkg/generic" 23 | v1 "k8s.io/api/core/v1" 24 | ) 25 | 26 | // SecretController interface for managing Secret resources. 27 | type SecretController interface { 28 | generic.ControllerInterface[*v1.Secret, *v1.SecretList] 29 | } 30 | 31 | // SecretClient interface for managing Secret resources in Kubernetes. 32 | type SecretClient interface { 33 | generic.ClientInterface[*v1.Secret, *v1.SecretList] 34 | } 35 | 36 | // SecretCache interface for retrieving Secret resources in memory. 37 | type SecretCache interface { 38 | generic.CacheInterface[*v1.Secret] 39 | } 40 | -------------------------------------------------------------------------------- /pkg/test/cleanup.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "github.com/pkg/errors" 8 | corev1 "k8s.io/api/core/v1" 9 | apierrors "k8s.io/apimachinery/pkg/api/errors" 10 | kerrors "k8s.io/apimachinery/pkg/util/errors" 11 | "k8s.io/apimachinery/pkg/util/wait" 12 | runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" 13 | ) 14 | 15 | var ( 16 | cacheSyncBackoff = wait.Backoff{ 17 | Duration: 100 * time.Millisecond, 18 | Factor: 1.5, 19 | Steps: 8, 20 | Jitter: 0.4, 21 | } 22 | ) 23 | 24 | // CleanupAndWait deletes all the given objects and waits for the cache to be updated accordingly. 25 | func CleanupAndWait(ctx context.Context, cl runtimeclient.Client, objs ...runtimeclient.Object) error { 26 | if err := cleanup(ctx, cl, objs...); err != nil { 27 | return err 28 | } 29 | 30 | // Makes sure the cache is updated with the deleted object 31 | errs := []error{} 32 | for _, o := range objs { 33 | // Ignoring namespaces because in testenv the namespace cleaner is not running. 34 | if o.GetObjectKind().GroupVersionKind().GroupKind() == corev1.SchemeGroupVersion.WithKind("Namespace").GroupKind() { 35 | continue 36 | } 37 | 38 | oCopy := o.DeepCopyObject().(runtimeclient.Object) 39 | key := runtimeclient.ObjectKeyFromObject(o) 40 | err := wait.ExponentialBackoff( 41 | cacheSyncBackoff, 42 | func() (done bool, err error) { 43 | if err := cl.Get(ctx, key, oCopy); err != nil { 44 | if apierrors.IsNotFound(err) { 45 | return true, nil 46 | } 47 | if o.GetName() == "" { // resource is being deleted 48 | return true, nil 49 | } 50 | return false, err 51 | } 52 | return false, nil 53 | }) 54 | errs = append(errs, errors.Wrapf(err, "key %s, %s is not being deleted from the testenv client cache", o.GetObjectKind().GroupVersionKind().String(), key)) 55 | } 56 | return kerrors.NewAggregate(errs) 57 | } 58 | 59 | // cleanup deletes all the given objects. 60 | func cleanup(ctx context.Context, cl runtimeclient.Client, objs ...runtimeclient.Object) error { 61 | errs := []error{} 62 | for _, o := range objs { 63 | copyObj := o.DeepCopyObject().(runtimeclient.Object) 64 | 65 | if err := cl.Get(ctx, runtimeclient.ObjectKeyFromObject(o), copyObj); err != nil { 66 | if apierrors.IsNotFound(err) { 67 | continue 68 | } 69 | if o.GetName() == "" { // resource is being deleted 70 | continue 71 | } 72 | errs = append(errs, err) 73 | continue 74 | } 75 | 76 | // Remove finalizers from the object 77 | if copyObj.GetFinalizers() != nil { 78 | copyObj.SetFinalizers(nil) 79 | } 80 | 81 | err := cl.Update(ctx, copyObj) 82 | if apierrors.IsNotFound(err) { 83 | continue 84 | } 85 | errs = append(errs, err) 86 | 87 | err = cl.Delete(ctx, copyObj) 88 | if apierrors.IsNotFound(err) { 89 | continue 90 | } 91 | errs = append(errs, err) 92 | } 93 | return kerrors.NewAggregate(errs) 94 | } 95 | -------------------------------------------------------------------------------- /pkg/test/envtest.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "errors" 5 | "path" 6 | goruntime "runtime" 7 | 8 | aksv1 "github.com/rancher/aks-operator/pkg/apis/aks.cattle.io/v1" 9 | apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" 10 | "k8s.io/apimachinery/pkg/runtime" 11 | utilruntime "k8s.io/apimachinery/pkg/util/runtime" 12 | clientgoscheme "k8s.io/client-go/kubernetes/scheme" 13 | "k8s.io/client-go/rest" 14 | runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" 15 | "sigs.k8s.io/controller-runtime/pkg/envtest" 16 | ) 17 | 18 | var ( 19 | scheme = runtime.NewScheme() 20 | ) 21 | 22 | func init() { 23 | utilruntime.Must(clientgoscheme.AddToScheme(scheme)) 24 | utilruntime.Must(aksv1.AddToScheme(scheme)) 25 | } 26 | 27 | func StartEnvTest(testEnv *envtest.Environment) (*rest.Config, runtimeclient.Client, error) { 28 | // Get the root of the current file to use in CRD paths. 29 | _, filename, _, _ := goruntime.Caller(0) //nolint:dogsled 30 | root := path.Join(path.Dir(filename), "..", "..", "..", "aks-operator") 31 | 32 | testEnv.CRDs = []*apiextensionsv1.CustomResourceDefinition{ 33 | // Add later if needed. 34 | } 35 | testEnv.CRDDirectoryPaths = []string{ 36 | path.Join(root, "charts", "aks-operator-crd", "templates"), 37 | } 38 | testEnv.ErrorIfCRDPathMissing = true 39 | 40 | cfg, err := testEnv.Start() 41 | if err != nil { 42 | return nil, nil, err 43 | } 44 | 45 | if cfg == nil { 46 | return nil, nil, errors.New("envtest.Environment.Start() returned nil config") 47 | } 48 | 49 | cl, err := runtimeclient.New(cfg, runtimeclient.Options{Scheme: scheme}) 50 | if err != nil { 51 | return nil, nil, err 52 | } 53 | 54 | return cfg, cl, nil 55 | } 56 | 57 | func StopEnvTest(testEnv *envtest.Environment) error { 58 | return testEnv.Stop() 59 | } 60 | -------------------------------------------------------------------------------- /pkg/utils/azure.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "net/http" 5 | 6 | "github.com/Azure/go-autorest/autorest" 7 | "github.com/Azure/go-autorest/autorest/azure" 8 | "github.com/sirupsen/logrus" 9 | ) 10 | 11 | // RequestWithInspection logs the request URL and method before sending the request to Azure API server for processing. 12 | func RequestWithInspection() autorest.PrepareDecorator { 13 | return func(p autorest.Preparer) autorest.Preparer { 14 | return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { 15 | logrus.Info("Azure request", logrus.Fields{ 16 | "method": r.Method, 17 | "request": r.URL.String(), 18 | }) 19 | return p.Prepare(r) 20 | }) 21 | } 22 | } 23 | 24 | // ResponseWithInspection logs the response status, request URL, and request ID after receiving the response from Azure API server. 25 | func ResponseWithInspection() autorest.RespondDecorator { 26 | return func(r autorest.Responder) autorest.Responder { 27 | return autorest.ResponderFunc(func(resp *http.Response) error { 28 | logrus.Info("Azure response", logrus.Fields{ 29 | "status": resp.Status, 30 | "method": resp.Request.Method, 31 | "request": resp.Request.URL.String(), 32 | "x-ms-request-id": azure.ExtractRequestID(resp), 33 | }) 34 | return r.Respond(resp) 35 | }) 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /pkg/utils/convert.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" 4 | 5 | func ConvertToSliceOfPointers[T any](ptrToSlice *[]T) []*T { 6 | ret := make([]*T, 0) 7 | if ptrToSlice == nil { 8 | return ret 9 | } 10 | 11 | for _, v := range *ptrToSlice { 12 | ret = append(ret, to.Ptr(v)) 13 | } 14 | 15 | return ret 16 | } 17 | 18 | func ConvertToPointerOfSlice[T any](sliceToPtr []*T) *[]T { 19 | ret := make([]T, 0) 20 | if sliceToPtr == nil { 21 | return nil 22 | } 23 | 24 | for _, v := range sliceToPtr { 25 | ret = append(ret, *v) 26 | } 27 | 28 | return to.Ptr(ret) 29 | } 30 | -------------------------------------------------------------------------------- /pkg/utils/map.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "fmt" 5 | 6 | aksv1 "github.com/rancher/aks-operator/pkg/apis/aks.cattle.io/v1" 7 | ) 8 | 9 | func BuildNodePoolMap(nodePools []aksv1.AKSNodePool, clusterName string) (map[string]*aksv1.AKSNodePool, error) { 10 | ret := make(map[string]*aksv1.AKSNodePool, len(nodePools)) 11 | for i := range nodePools { 12 | if nodePools[i].Name != nil { 13 | if _, ok := ret[*nodePools[i].Name]; ok { 14 | return nil, fmt.Errorf("cluster [%s] cannot have multiple nodepools with name %s", clusterName, *nodePools[i].Name) 15 | } 16 | ret[*nodePools[i].Name] = &nodePools[i] 17 | } 18 | } 19 | return ret, nil 20 | } 21 | -------------------------------------------------------------------------------- /pkg/utils/parse.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | import ( 4 | "strings" 5 | ) 6 | 7 | func ParseSecretName(ref string) (namespace string, name string) { 8 | parts := strings.SplitN(ref, ":", 2) 9 | if len(parts) == 1 { 10 | return "", parts[0] 11 | } 12 | return parts[0], parts[1] 13 | } 14 | -------------------------------------------------------------------------------- /pkg/version/version.go: -------------------------------------------------------------------------------- 1 | package version 2 | 3 | var ( 4 | GitCommit string 5 | Version string 6 | ) 7 | -------------------------------------------------------------------------------- /scripts/build: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source $(dirname $0)/version 4 | 5 | cd $(dirname $0)/.. 6 | 7 | mkdir -p bin 8 | if [ "$(uname)" = "Linux" ]; then 9 | OTHER_LINKFLAGS="-extldflags -static -s" 10 | fi 11 | CGO_ENABLED=0 go build -ldflags "$OTHER_LINKFLAGS" -o bin/aks-operator 12 | -------------------------------------------------------------------------------- /scripts/ci: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | cd $(dirname $0) 6 | 7 | ./validate 8 | ./build 9 | ./package 10 | -------------------------------------------------------------------------------- /scripts/entry: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | mkdir -p bin dist 5 | if [ -e ./scripts/$1 ]; then 6 | ./scripts/"$@" 7 | else 8 | exec "$@" 9 | fi 10 | 11 | chown -R $DAPPER_UID:$DAPPER_GID . 12 | -------------------------------------------------------------------------------- /scripts/go_install.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -o errexit 4 | set -o nounset 5 | set -o pipefail 6 | 7 | if [ -z "${1}" ]; then 8 | echo "must provide module as first parameter" 9 | exit 1 10 | fi 11 | 12 | if [ -z "${2}" ]; then 13 | echo "must provide binary name as second parameter" 14 | exit 1 15 | fi 16 | 17 | if [ -z "${3}" ]; then 18 | echo "must provide version as third parameter" 19 | exit 1 20 | fi 21 | 22 | if [ -z "${GOBIN}" ]; then 23 | echo "GOBIN is not set. Must set GOBIN to install the bin in a specified directory." 24 | exit 1 25 | fi 26 | 27 | rm "${GOBIN}/${2}"* 2> /dev/null || true 28 | 29 | # install the golang module specified as the first argument 30 | go install -tags tools "${1}@${3}" 31 | mv "${GOBIN}/${2}" "${GOBIN}/${2}-${3}" 32 | ln -sf "${GOBIN}/${2}-${3}" "${GOBIN}/${2}" 33 | -------------------------------------------------------------------------------- /scripts/package: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | source $(dirname $0)/version 5 | 6 | cd $(dirname $0)/.. 7 | 8 | mkdir -p dist/artifacts 9 | cp bin/aks-operator dist/artifacts/aks-operator-linux${SUFFIX} 10 | for i in bin/aks-operator-*; do 11 | if [ -e "$i" ]; then 12 | cp $i dist/artifacts 13 | fi 14 | done 15 | 16 | ./scripts/package-helm 17 | -------------------------------------------------------------------------------- /scripts/package-helm: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | if ! hash helm 2>/dev/null; then 5 | exit 0 6 | fi 7 | 8 | cd $(dirname $0)/.. 9 | . ./scripts/version 10 | 11 | rm -rf build/charts 12 | mkdir -p build dist/artifacts 13 | cp -rf charts build/ 14 | 15 | sed -i \ 16 | -e 's/^version:.*/version: '${HELM_VERSION}'/' \ 17 | -e 's/appVersion:.*/appVersion: '${HELM_VERSION}'/' \ 18 | build/charts/aks-operator/Chart.yaml 19 | 20 | sed -i \ 21 | -e 's/tag:.*/tag: '${HELM_TAG}'/' \ 22 | build/charts/aks-operator/values.yaml 23 | 24 | sed -i \ 25 | -e 's/^version:.*/version: '${HELM_VERSION}'/' \ 26 | -e 's/appVersion:.*/appVersion: '${HELM_VERSION}'/' \ 27 | build/charts/aks-operator-crd/Chart.yaml 28 | 29 | helm package -d ./dist/artifacts ./build/charts/aks-operator 30 | helm package -d ./dist/artifacts ./build/charts/aks-operator-crd 31 | -------------------------------------------------------------------------------- /scripts/setup-kind-cluster.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | KUBE_VERSION=${KUBE_VERSION:-v1.32.2} 6 | CLUSTER_NAME="${CLUSTER_NAME:-operator-e2e}" 7 | 8 | if ! kind get clusters | grep "$CLUSTER_NAME"; then 9 | cat << EOF > kind.config 10 | kind: Cluster 11 | apiVersion: kind.x-k8s.io/v1alpha4 12 | nodes: 13 | - role: control-plane 14 | image: kindest/node:$KUBE_VERSION 15 | kubeadmConfigPatches: 16 | - | 17 | kind: InitConfiguration 18 | nodeRegistration: 19 | kubeletExtraArgs: 20 | node-labels: "ingress-ready=true" 21 | EOF 22 | kind create cluster --name $CLUSTER_NAME --config kind.config 23 | rm -rf kind.config 24 | fi 25 | 26 | kubectl cluster-info --context kind-$CLUSTER_NAME 27 | echo "Sleep to give times to node to populate with all info" 28 | kubectl wait --for=condition=Ready node/$CLUSTER_NAME-control-plane 29 | # Label the nodes with node-role.kubernetes.io/master as it appears that 30 | # label is no longer added on >=1.24.X clusters while it was set on <=1.23.X 31 | # https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/kubeadm/2067-rename-master-label-taint 32 | # https://kubernetes.io/blog/2022/04/07/upcoming-changes-in-kubernetes-1-24/#api-removals-deprecations-and-other-changes-for-kubernetes-1-24 33 | # system-upgrade-controller 0.9.1 still uses it to schedule pods 34 | kubectl label nodes --all node-role.kubernetes.io/master= 35 | kubectl get nodes -o wide 36 | -------------------------------------------------------------------------------- /scripts/validate: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | cd $(dirname $0)/.. 6 | 7 | if ! command -v golangci-lint; then 8 | echo Skipping validation: no golangci-lint available 9 | exit 10 | fi 11 | 12 | echo 'Running: golangci-lint' 13 | golangci-lint run 14 | 15 | echo 'Running: go mod verify' 16 | go mod verify 17 | 18 | echo 'Running: go fmt' 19 | go fmt 20 | if [ -n "$(git status --porcelain --untracked-files=no)" ]; then 21 | echo 'go fmt produced differences' 22 | exit 1 23 | fi 24 | 25 | echo 'Running: go generate' 26 | go generate 27 | if [ -n "$(git status --porcelain --untracked-files=no)" ]; then 28 | echo 'go generate produced differences' 29 | exit 1 30 | fi 31 | 32 | echo 'Running: go mod tidy' 33 | go mod tidy 34 | if [ -n "$(git status --porcelain --untracked-files=no)" ]; then 35 | echo 'go mod tidy produced differences' 36 | exit 1 37 | fi 38 | 39 | echo 'Running: go test' 40 | go test -cover -tags=test ./pkg/aks/... 41 | -------------------------------------------------------------------------------- /scripts/version: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ -n "$(git status --porcelain --untracked-files=no)" ]; then 4 | DIRTY="-dirty" 5 | fi 6 | 7 | COMMIT=$(git rev-parse --short HEAD) 8 | GIT_TAG=${DRONE_TAG:-$(git tag -l --contains HEAD | head -n 1)} 9 | 10 | if [[ -z "$DIRTY" && -n "$GIT_TAG" ]]; then 11 | VERSION=$GIT_TAG 12 | else 13 | VERSION="${COMMIT}${DIRTY}" 14 | fi 15 | 16 | if [ -z "$ARCH" ]; then 17 | ARCH=$(go env GOHOSTARCH) 18 | fi 19 | 20 | SUFFIX="-${ARCH}" 21 | 22 | HELM_TAG=${TAG:-${VERSION}} 23 | HELM_VERSION=${HELM_TAG/v/} 24 | TAG=${TAG:-${VERSION}${SUFFIX}} 25 | REPO=${REPO:-rancher} 26 | 27 | if echo $TAG | grep -q dirty; then 28 | TAG=dev 29 | HELM_TAG=dev 30 | HELM_VERSION=0.0.0-dev 31 | fi 32 | -------------------------------------------------------------------------------- /test/e2e/Dockerfile.e2e: -------------------------------------------------------------------------------- 1 | FROM registry.suse.com/bci/golang:1.23 AS build 2 | RUN zypper -n install -l openssl-devel 3 | WORKDIR /src 4 | COPY go.mod go.sum /src/ 5 | RUN go mod download 6 | COPY main.go /src/ 7 | COPY controller /src/controller 8 | COPY pkg /src/pkg 9 | FROM build AS build-operator 10 | ARG TAG=v0.0.0 11 | ARG COMMIT="" 12 | ARG COMMITDATE="" 13 | ENV CGO_ENABLED=0 14 | RUN go build \ 15 | -ldflags "-w -s \ 16 | -X github.com/rancher/aks-operator/pkg/version.Version=$TAG \ 17 | -X github.com/rancher/aks-operator/pkg/version.Commit=$COMMIT \ 18 | -X github.com/rancher/aks-operator/pkg/version.CommitDate=$COMMITDATE" \ 19 | -o /usr/sbin/aks-operator . 20 | 21 | FROM scratch AS aks-operator 22 | COPY --from=build /var/lib/ca-certificates/ca-bundle.pem /etc/ssl/certs/ca-certificates.crt 23 | COPY --from=build-operator /usr/sbin/aks-operator /usr/sbin/aks-operator 24 | ENTRYPOINT ["/usr/sbin/aks-operator"] 25 | 26 | -------------------------------------------------------------------------------- /test/e2e/basic_cluster_test.go: -------------------------------------------------------------------------------- 1 | package e2e 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/Azure/azure-sdk-for-go/sdk/azcore/to" 7 | . "github.com/onsi/ginkgo/v2" 8 | . "github.com/onsi/gomega" 9 | aksv1 "github.com/rancher/aks-operator/pkg/apis/aks.cattle.io/v1" 10 | managementv3 "github.com/rancher/rancher/pkg/apis/management.cattle.io/v3" 11 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 12 | 13 | runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" 14 | ) 15 | 16 | var _ = Describe("BasicCluster", func() { 17 | var aksConfig *aksv1.AKSClusterConfig 18 | var cluster *managementv3.Cluster 19 | 20 | BeforeEach(func() { 21 | var ok bool 22 | aksConfig, ok = clusterTemplates[basicClusterTemplateName] 23 | Expect(ok).To(BeTrue()) 24 | Expect(aksConfig).NotTo(BeNil()) 25 | 26 | cluster = &managementv3.Cluster{ 27 | ObjectMeta: metav1.ObjectMeta{ 28 | Name: aksConfig.Name, 29 | }, 30 | Spec: managementv3.ClusterSpec{ 31 | AKSConfig: &aksConfig.Spec, 32 | }, 33 | } 34 | 35 | }) 36 | 37 | It("Succesfully creates a cluster", func() { 38 | By("Creating a cluster") 39 | Expect(cl.Create(ctx, cluster)).Should(Succeed()) 40 | 41 | By("Waiting for cluster to be ready") 42 | Eventually(func() error { 43 | currentCluster := &aksv1.AKSClusterConfig{} 44 | 45 | if err := cl.Get(ctx, runtimeclient.ObjectKey{ 46 | Name: cluster.Name, 47 | Namespace: aksClusterConfigNamespace, 48 | }, currentCluster); err != nil { 49 | return err 50 | } 51 | 52 | if currentCluster.Status.Phase == "active" { 53 | return nil 54 | } 55 | 56 | return fmt.Errorf("cluster is not ready yet. Current phase: %s", currentCluster.Status.Phase) 57 | }, waitLong, pollInterval).ShouldNot(HaveOccurred()) 58 | }) 59 | 60 | It("Successfully adds and removes a node pool", func() { 61 | initialNodePools := aksConfig.DeepCopy().Spec.NodePools // save to restore later and test deletion 62 | 63 | Expect(cl.Get(ctx, runtimeclient.ObjectKey{Name: cluster.Name}, cluster)).Should(Succeed()) 64 | patch := runtimeclient.MergeFrom(cluster.DeepCopy()) 65 | 66 | nodePool := aksv1.AKSNodePool{ 67 | Name: to.Ptr("pool1"), 68 | AvailabilityZones: to.Ptr([]string{"1", "2", "3"}), 69 | MaxPods: to.Ptr(int32(110)), 70 | Count: to.Ptr(int32(1)), 71 | Mode: "User", 72 | OrchestratorVersion: cluster.Spec.AKSConfig.KubernetesVersion, 73 | OsDiskSizeGB: to.Ptr(int32(128)), 74 | OsDiskType: "Linux", 75 | VMSize: "Standard_D2_v2", 76 | } 77 | cluster.Spec.AKSConfig.NodePools = append(cluster.Spec.AKSConfig.NodePools, nodePool) 78 | 79 | Expect(cl.Patch(ctx, cluster, patch)).Should(Succeed()) 80 | 81 | By("Waiting for cluster to start adding node pool") 82 | Eventually(func() error { 83 | currentCluster := &aksv1.AKSClusterConfig{} 84 | 85 | if err := cl.Get(ctx, runtimeclient.ObjectKey{ 86 | Name: cluster.Name, 87 | Namespace: aksClusterConfigNamespace, 88 | }, currentCluster); err != nil { 89 | return err 90 | } 91 | 92 | if currentCluster.Status.Phase == "updating" && len(currentCluster.Spec.NodePools) == 2 { 93 | return nil 94 | } 95 | 96 | return fmt.Errorf("cluster didn't get new node pool. Current phase: %s, node pool count %d", currentCluster.Status.Phase, len(currentCluster.Spec.NodePools)) 97 | }, waitLong, pollInterval).ShouldNot(HaveOccurred()) 98 | 99 | By("Waiting for cluster to finish adding node pool") 100 | Eventually(func() error { 101 | currentCluster := &aksv1.AKSClusterConfig{} 102 | 103 | if err := cl.Get(ctx, runtimeclient.ObjectKey{ 104 | Name: cluster.Name, 105 | Namespace: aksClusterConfigNamespace, 106 | }, currentCluster); err != nil { 107 | return err 108 | } 109 | 110 | if currentCluster.Status.Phase == "active" && len(currentCluster.Spec.NodePools) == 2 { 111 | return nil 112 | } 113 | 114 | return fmt.Errorf("cluster didn't finish adding node pool. Current phase: %s, node pool count %d", currentCluster.Status.Phase, len(currentCluster.Spec.NodePools)) 115 | }, waitLong, pollInterval).ShouldNot(HaveOccurred()) 116 | 117 | By("Restoring initial node pools") 118 | 119 | Expect(cl.Get(ctx, runtimeclient.ObjectKey{Name: cluster.Name}, cluster)).Should(Succeed()) 120 | patch = runtimeclient.MergeFrom(cluster.DeepCopy()) 121 | 122 | cluster.Spec.AKSConfig.NodePools = initialNodePools 123 | 124 | Expect(cl.Patch(ctx, cluster, patch)).Should(Succeed()) 125 | 126 | By("Waiting for cluster to start removing node pool") 127 | Eventually(func() error { 128 | currentCluster := &aksv1.AKSClusterConfig{} 129 | 130 | if err := cl.Get(ctx, runtimeclient.ObjectKey{ 131 | Name: cluster.Name, 132 | Namespace: aksClusterConfigNamespace, 133 | }, currentCluster); err != nil { 134 | return err 135 | } 136 | 137 | if currentCluster.Status.Phase == "updating" && len(currentCluster.Spec.NodePools) == 1 { 138 | return nil 139 | } 140 | 141 | return fmt.Errorf("cluster didn't start removing node pool. Current phase: %s, node pool count %d", currentCluster.Status.Phase, len(currentCluster.Spec.NodePools)) 142 | }, waitLong, pollInterval).ShouldNot(HaveOccurred()) 143 | 144 | By("Waiting for cluster to finish removing node pool") 145 | Eventually(func() error { 146 | currentCluster := &aksv1.AKSClusterConfig{} 147 | 148 | if err := cl.Get(ctx, runtimeclient.ObjectKey{ 149 | Name: cluster.Name, 150 | Namespace: aksClusterConfigNamespace, 151 | }, currentCluster); err != nil { 152 | return err 153 | } 154 | 155 | if currentCluster.Status.Phase == "active" && len(currentCluster.Spec.NodePools) == 1 { 156 | return nil 157 | } 158 | 159 | return fmt.Errorf("cluster didn't finish removing node pool. Current phase: %s, node pool count %d", currentCluster.Status.Phase, len(currentCluster.Spec.NodePools)) 160 | }, waitLong, pollInterval).ShouldNot(HaveOccurred()) 161 | 162 | By("Done waiting for cluster to finish removing node pool") 163 | }) 164 | }) 165 | -------------------------------------------------------------------------------- /test/e2e/config/config.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright © 2022 SUSE LLC 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package config 18 | 19 | import ( 20 | "errors" 21 | "fmt" 22 | "os" 23 | 24 | "github.com/drone/envsubst/v2" 25 | "sigs.k8s.io/yaml" 26 | ) 27 | 28 | type E2EConfig struct { 29 | OperatorChart string `yaml:"operatorChart"` 30 | CRDChart string `yaml:"crdChart"` 31 | ExternalIP string `yaml:"externalIP"` 32 | MagicDNS string `yaml:"magicDNS"` 33 | BridgeIP string `yaml:"bridgeIP"` 34 | ArtifactsDir string `yaml:"artifactsDir"` 35 | 36 | CertManagerVersion string `yaml:"certManagerVersion"` 37 | CertManagerChartURL string `yaml:"certManagerChartURL"` 38 | 39 | RancherVersion string `yaml:"rancherVersion"` 40 | RancherChartURL string `yaml:"rancherChartURL"` 41 | 42 | AzureClientID string `yaml:"azureClientID"` 43 | AzureClientSecret string `yaml:"azureClientSecret"` 44 | AzureSubscriptionID string `yaml:"azureSubscriptionID"` 45 | 46 | AzureResourceGroup string `yaml:"resourceGroupPrefix"` 47 | } 48 | 49 | // ReadE2EConfig read config from yaml and substitute variables using envsubst. 50 | // All variables can be overridden by environmental variables. 51 | func ReadE2EConfig(configPath string) (*E2EConfig, error) { //nolint:gocyclo 52 | config := &E2EConfig{} 53 | 54 | configData, err := os.ReadFile(configPath) 55 | if err != nil { 56 | return nil, fmt.Errorf("failed to read config file: %w", err) 57 | } 58 | 59 | if configData == nil { 60 | return nil, errors.New("config file can't be empty") 61 | } 62 | 63 | if err := yaml.Unmarshal(configData, config); err != nil { 64 | return nil, fmt.Errorf("failed to unmarhal config file: %s", err) 65 | } 66 | 67 | if operatorChart := os.Getenv("OPERATOR_CHART"); operatorChart != "" { 68 | config.OperatorChart = operatorChart 69 | } 70 | 71 | if config.OperatorChart == "" { 72 | return nil, errors.New("no OPERATOR_CHART provided, an operator helm chart is required to run e2e tests") 73 | } 74 | 75 | if crdChart := os.Getenv("CRD_CHART"); crdChart != "" { 76 | config.CRDChart = crdChart 77 | } 78 | 79 | if config.CRDChart == "" { 80 | return nil, errors.New("no CRD_CHART provided, a crd helm chart is required to run e2e tests") 81 | } 82 | 83 | if externalIP := os.Getenv("EXTERNAL_IP"); externalIP != "" { 84 | config.ExternalIP = externalIP 85 | } 86 | 87 | if config.ExternalIP == "" { 88 | return nil, errors.New("no EXTERNAL_IP provided, a known (reachable) node external ip it is required to run e2e tests") 89 | } 90 | 91 | if magicDNS := os.Getenv("MAGIC_DNS"); magicDNS != "" { 92 | config.MagicDNS = magicDNS 93 | } 94 | 95 | if bridgeIP := os.Getenv("BRIDGE_IP"); bridgeIP != "" { 96 | config.BridgeIP = bridgeIP 97 | } 98 | 99 | if artifactsDir := os.Getenv("ARTIFACTS_DIR"); artifactsDir != "" { 100 | config.ArtifactsDir = artifactsDir 101 | } 102 | 103 | if azureClientID := os.Getenv("AZURE_CLIENT_ID"); azureClientID != "" { 104 | config.AzureClientID = azureClientID 105 | } 106 | 107 | if azureClientSecret := os.Getenv("AZURE_CLIENT_SECRET"); azureClientSecret != "" { 108 | config.AzureClientSecret = azureClientSecret 109 | } 110 | 111 | if azureSubscriptionID := os.Getenv("AZURE_SUBSCRIPTION_ID"); azureSubscriptionID != "" { 112 | config.AzureSubscriptionID = azureSubscriptionID 113 | } 114 | 115 | if azureResourceGroup := os.Getenv("AZURE_RESOURCE_GROUP"); azureResourceGroup != "" { 116 | config.AzureResourceGroup = azureResourceGroup 117 | } 118 | 119 | if certManagerVersion := os.Getenv("CERT_MANAGER_VERSION"); certManagerVersion != "" { 120 | config.CertManagerVersion = certManagerVersion 121 | } 122 | 123 | if certManagerURL := os.Getenv("CERT_MANAGER_CHART_URL"); certManagerURL != "" { 124 | config.CertManagerChartURL = certManagerURL 125 | } 126 | 127 | if rancherVersion := os.Getenv("RANCHER_VERSION"); rancherVersion != "" { 128 | config.RancherVersion = rancherVersion 129 | } 130 | 131 | if rancherURL := os.Getenv("RANCHER_CHART_URL"); rancherURL != "" { 132 | config.RancherChartURL = rancherURL 133 | } 134 | 135 | if err := substituteVersions(config); err != nil { 136 | return nil, err 137 | } 138 | 139 | return config, validateAzureCredentials(config) 140 | } 141 | 142 | func substituteVersions(config *E2EConfig) error { 143 | certManagerURL, err := envsubst.Eval(config.CertManagerChartURL, func(_ string) string { 144 | return config.CertManagerVersion 145 | }) 146 | if err != nil { 147 | return fmt.Errorf("failed to substitute cert manager chart url: %w", err) 148 | } 149 | config.CertManagerChartURL = certManagerURL 150 | 151 | rancherURL, err := envsubst.Eval(config.RancherChartURL, func(_ string) string { 152 | return config.RancherVersion 153 | }) 154 | if err != nil { 155 | return fmt.Errorf("failed to substitute rancher chart url: %w", err) 156 | } 157 | config.RancherChartURL = rancherURL 158 | 159 | return nil 160 | } 161 | 162 | func validateAzureCredentials(config *E2EConfig) error { 163 | if config.AzureClientID == "" { 164 | return errors.New("no AZURE_CLIENT_ID provided, an azure client id is required to run e2e tests") 165 | } 166 | 167 | if config.AzureClientSecret == "" { 168 | return errors.New("no AZURE_CLIENT_SECRET provided, an azure client secret is required to run e2e tests") 169 | } 170 | 171 | if config.AzureSubscriptionID == "" { 172 | return errors.New("no AZURE_SUBSCRIPTION_ID provided, an azure subscription id is required to run e2e tests") 173 | } 174 | 175 | return nil 176 | } 177 | -------------------------------------------------------------------------------- /test/e2e/config/config.yaml: -------------------------------------------------------------------------------- 1 | # E2E Tests config 2 | 3 | magicDNS: sslip.io 4 | bridgeIP: 172.17.0.1 5 | operatorReplicas: 1 6 | artifactsDir: ../../_artifacts 7 | 8 | certManagerVersion: v1.11.1 9 | certManagerChartURL: https://charts.jetstack.io/charts/cert-manager-${CERT_MANAGER_VERSION}.tgz 10 | 11 | rancherVersion: v2.9-head 12 | rancherChartURL: https://releases.rancher.com/server-charts/latest/ 13 | -------------------------------------------------------------------------------- /test/e2e/deploy_operator_test.go: -------------------------------------------------------------------------------- 1 | package e2e 2 | 3 | import ( 4 | . "github.com/onsi/ginkgo/v2" 5 | ) 6 | 7 | var _ = Describe("Do nothing, used to deploy rancher and operator", Label("do-nothing"), func() { 8 | It("Does nothing", func() {}) 9 | }) 10 | -------------------------------------------------------------------------------- /test/e2e/templates/basic-cluster.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: aks.cattle.io/v1 2 | kind: AKSClusterConfig 3 | metadata: 4 | namespace: default 5 | spec: 6 | azureCredentialSecret: default:azure-credentials 7 | clusterName: cluster 8 | dnsPrefix: basic-cluster-dns 9 | imported: false 10 | kubernetesVersion: 1.28.9 11 | linuxAdminUsername: azureuser 12 | loadBalancerSku: Standard 13 | networkPlugin: kubenet 14 | nodePools: 15 | - availabilityZones: 16 | - "1" 17 | - "2" 18 | - "3" 19 | count: 1 20 | enableAutoScaling: false 21 | maxPods: 110 22 | mode: System 23 | name: agentpool 24 | orchestratorVersion: 1.28.9 25 | osDiskSizeGB: 30 26 | osDiskType: Managed 27 | osType: Linux 28 | vmSize: Standard_B4ms 29 | privateCluster: false 30 | resourceLocation: eastus 31 | 32 | --------------------------------------------------------------------------------