├── .github ├── PULL_REQUEST_TEMPLATE.md ├── dependabot.yaml ├── scripts │ ├── update-rancher-charts.sh │ └── update-rancher-dep.sh └── workflows │ ├── apidiff.yaml │ ├── build.yaml │ ├── delete-old-versions.yaml │ ├── e2e-branch.yaml │ ├── e2e-workflow.yaml │ ├── lint.yaml │ ├── nightly-publish.yaml │ ├── release.yaml │ ├── scan.yaml │ ├── unit.yaml │ └── verify.yaml ├── .gitignore ├── .golangci.yml ├── .goreleaser.yaml ├── CODEOWNERS ├── License ├── Makefile ├── README.md ├── charts ├── gke-operator-crd │ ├── Chart.yaml │ └── templates │ │ └── crds.yaml └── gke-operator │ ├── Chart.yaml │ ├── templates │ ├── NOTES.txt │ ├── _helpers.tpl │ ├── clusterrole.yaml │ ├── clusterrolebinding.yaml │ ├── deployment.yaml │ └── serviceaccount.yaml │ └── values.yaml ├── controller ├── external.go ├── gke-cluster-config-handler.go ├── gke-cluster-config-handler_test.go └── suite_test.go ├── examples ├── cluster-autopilot.yaml ├── cluster-basic.yaml ├── cluster-full.yaml ├── cluster-registered.json ├── cluster-registered.yaml └── cluster.json ├── go.mod ├── go.sum ├── main.go ├── package ├── Dockerfile └── entrypoint.sh ├── pkg ├── apis │ └── gke.cattle.io │ │ ├── v1 │ │ ├── doc.go │ │ ├── types.go │ │ ├── zz_generated_deepcopy.go │ │ ├── zz_generated_list_types.go │ │ └── zz_generated_register.go │ │ └── zz_generated_register.go ├── codegen │ ├── boilerplate.go.txt │ ├── cleanup │ │ └── main.go │ └── main.go ├── generated │ └── controllers │ │ ├── core │ │ ├── factory.go │ │ ├── interface.go │ │ └── v1 │ │ │ ├── interface.go │ │ │ ├── node.go │ │ │ ├── pod.go │ │ │ └── secret.go │ │ └── gke.cattle.io │ │ ├── factory.go │ │ ├── interface.go │ │ └── v1 │ │ ├── gkeclusterconfig.go │ │ └── interface.go ├── gke │ ├── client.go │ ├── consts.go │ ├── create.go │ ├── create_test.go │ ├── delete.go │ ├── delete_test.go │ ├── relative_resource_name.go │ ├── services │ │ ├── gke.go │ │ └── mock_services │ │ │ ├── doc.go │ │ │ └── gke_mock.go │ ├── suite_test.go │ ├── update.go │ └── update_test.go ├── test │ ├── cleanup.go │ └── envtest.go ├── utils │ └── parse.go └── version │ └── version.go ├── scripts ├── build ├── ci ├── go_install.sh ├── package ├── package-helm ├── setup-kind-cluster.sh ├── validate └── version └── test └── e2e ├── Dockerfile.e2e ├── basic_cluster_test.go ├── config ├── config.go └── config.yaml ├── deploy_operator_test.go ├── suite_test.go └── templates └── basic-cluster.yaml /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | 10 | 11 | **What this PR does / why we need it**: 12 | 13 | 14 | 15 | **Which issue(s) this PR fixes** 16 | Issue # 17 | 18 | **Special notes for your reviewer**: 19 | 20 | **Checklist**: 21 | 22 | 23 | - [ ] squashed commits into logical changes 24 | - [ ] includes documentation 25 | - [ ] adds unit tests 26 | - [ ] adds or updates e2e tests 27 | - [ ] backport needed 28 | -------------------------------------------------------------------------------- /.github/dependabot.yaml: -------------------------------------------------------------------------------- 1 | # Please see the documentation for all configuration options: 2 | # https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates 3 | version: 2 4 | updates: 5 | # GitHub Actions 6 | - package-ecosystem: "github-actions" 7 | directory: "/" 8 | schedule: 9 | interval: "weekly" 10 | commit-message: 11 | prefix: ":seedling:" 12 | # Go modules in main branch 13 | - package-ecosystem: "gomod" 14 | directory: "/" 15 | schedule: 16 | interval: "weekly" 17 | ignore: 18 | # Ignore controller-runtime as its upgraded manually. 19 | - dependency-name: "sigs.k8s.io/controller-runtime" 20 | # Ignore k8s and its transitives modules as they are upgraded manually 21 | # together with controller-runtime. 22 | - dependency-name: "k8s.io/*" 23 | # Ignore wrangler 24 | - dependency-name: "github.com/rancher/wrangler" 25 | - dependency-name: "github.com/rancher/wrangler/v3" 26 | # Ignore rancher apis 27 | - dependency-name: "github.com/rancher/rancher/pkg/apis" 28 | # Ignore Google API version, due it is breaking integration 29 | - dependency-name: "github.com/Masterminds/semver/v3" 30 | commit-message: 31 | prefix: ":seedling:" 32 | target-branch: "main" 33 | # Go modules in release-v2.11 branch 34 | - package-ecosystem: "gomod" 35 | directory: "/" 36 | schedule: 37 | interval: "weekly" 38 | ignore: 39 | # Ignore controller-runtime as its upgraded manually. 40 | - dependency-name: "sigs.k8s.io/controller-runtime" 41 | # Ignore k8s and its transitives modules as they are upgraded manually 42 | # together with controller-runtime. 43 | - dependency-name: "k8s.io/*" 44 | # Ignore wrangler 45 | - dependency-name: "github.com/rancher/wrangler" 46 | - dependency-name: "github.com/rancher/wrangler/v3" 47 | # Ignore rancher apis 48 | - dependency-name: "github.com/rancher/rancher/pkg/apis" 49 | # Ignore Google API version, due it is breaking integration 50 | - dependency-name: "google.golang.org/api" 51 | # Ignore Google API version, due it is breaking integration 52 | - dependency-name: "github.com/Masterminds/semver/v3" 53 | commit-message: 54 | prefix: ":seedling:" 55 | target-branch: "release-v2.11" 56 | # Go modules in release-v2.10 branch 57 | - package-ecosystem: "gomod" 58 | directory: "/" 59 | schedule: 60 | interval: "weekly" 61 | ignore: 62 | # Ignore controller-runtime as its upgraded manually. 63 | - dependency-name: "sigs.k8s.io/controller-runtime" 64 | # Ignore k8s and its transitives modules as they are upgraded manually 65 | # together with controller-runtime. 66 | - dependency-name: "k8s.io/*" 67 | # Ignore wrangler 68 | - dependency-name: "github.com/rancher/wrangler" 69 | - dependency-name: "github.com/rancher/wrangler/v3" 70 | # Ignore rancher apis 71 | - dependency-name: "github.com/rancher/rancher/pkg/apis" 72 | # Ignore Google API version, due it is breaking integration 73 | - dependency-name: "google.golang.org/api" 74 | # Ignore Google API version, due it is breaking integration 75 | - dependency-name: "github.com/Masterminds/semver/v3" 76 | # Ignore lasso in branches earlier than 2.11 as the build fails due to a package removal 77 | - dependency-name: "github.com/rancher/lasso" 78 | commit-message: 79 | prefix: ":seedling:" 80 | target-branch: "release-v2.10" 81 | # Go modules in release-v2.9 branch 82 | - package-ecosystem: "gomod" 83 | directory: "/" 84 | schedule: 85 | interval: "weekly" 86 | ignore: 87 | # Ignore controller-runtime as its upgraded manually. 88 | - dependency-name: "sigs.k8s.io/controller-runtime" 89 | # Ignore k8s and its transitives modules as they are upgraded manually 90 | # together with controller-runtime. 91 | - dependency-name: "k8s.io/*" 92 | # Ignore wrangler 93 | - dependency-name: "github.com/rancher/wrangler" 94 | - dependency-name: "github.com/rancher/wrangler/v3" 95 | # Ignore rancher apis 96 | - dependency-name: "github.com/rancher/rancher/pkg/apis" 97 | # Ignore Google API version, due it is breaking integration 98 | - dependency-name: "google.golang.org/api" 99 | # Ignore Google API version, due it is breaking integration 100 | - dependency-name: "github.com/Masterminds/semver/v3" 101 | # Ignore lasso in branches earlier than 2.11 as the build fails due to a package removal 102 | - dependency-name: "github.com/rancher/lasso" 103 | commit-message: 104 | prefix: ":seedling:" 105 | target-branch: "release-v2.9" 106 | # Go modules in release-v2.8 branch 107 | - package-ecosystem: "gomod" 108 | directory: "/" 109 | schedule: 110 | interval: "weekly" 111 | ignore: 112 | # Ignore controller-runtime as its upgraded manually. 113 | - dependency-name: "sigs.k8s.io/controller-runtime" 114 | # Ignore k8s and its transitives modules as they are upgraded manually 115 | # together with controller-runtime. 116 | - dependency-name: "k8s.io/*" 117 | # Ignore wrangler 118 | - dependency-name: "github.com/rancher/wrangler" 119 | - dependency-name: "github.com/rancher/wrangler/v3" 120 | # Ignore rancher apis 121 | - dependency-name: "github.com/rancher/rancher/pkg/apis" 122 | # Ignore Google API version, due it is breaking integration 123 | - dependency-name: "google.golang.org/api" 124 | # Ignore lasso in branches earlier than 2.11 as the build fails due to a package removal 125 | - dependency-name: "github.com/rancher/lasso" 126 | commit-message: 127 | prefix: ":seedling:" 128 | target-branch: "release-v2.8" 129 | -------------------------------------------------------------------------------- /.github/scripts/update-rancher-charts.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Submit new operator version against rancher/charts 4 | 5 | set -ue 6 | 7 | PREV_OPERATOR_VERSION="$1" # e.g. 1.1.0-rc3 8 | NEW_OPERATOR_VERSION="$2" 9 | PREV_CHART_VERSION="$3" # e.g. 101.2.0 10 | NEW_CHART_VERSION="$4" 11 | REPLACE="$5" # remove previous version if `true`, otherwise add new 12 | 13 | OPERATOR="${OPERATOR:-gke-operator}" 14 | 15 | if [ -z "${GITHUB_WORKSPACE:-}" ]; then 16 | CHARTS_DIR="$(dirname -- "$0")/../../../charts" 17 | else 18 | CHARTS_DIR="${GITHUB_WORKSPACE}/charts" 19 | fi 20 | 21 | pushd "${CHARTS_DIR}" > /dev/null 22 | 23 | if [ ! -e ~/.gitconfig ]; then 24 | git config --global user.name "highlander-ci-bot" 25 | git config --global user.email highlander-ci@proton.me 26 | fi 27 | 28 | if [ ! -f bin/charts-build-scripts ]; then 29 | make pull-scripts 30 | fi 31 | 32 | find ./packages/rancher-${OPERATOR}/ -type f -exec sed -i -e "s/${PREV_OPERATOR_VERSION}/${NEW_OPERATOR_VERSION}/g" {} \; 33 | find ./packages/rancher-${OPERATOR}/ -type f -exec sed -i -e "s/version: ${PREV_CHART_VERSION}/version: ${NEW_CHART_VERSION}/g" {} \; 34 | find ./packages/rancher-${OPERATOR}/ -type f -exec sed -i -e "/doNotRelease: false/d" {} \; 35 | 36 | if [ "${REPLACE}" == "true" ] && grep -q "rancher-${OPERATOR}:" release.yaml; then 37 | # replace previous with new version 38 | sed -i -e "s/${PREV_CHART_VERSION}+up${PREV_OPERATOR_VERSION}/${NEW_CHART_VERSION}+up${NEW_OPERATOR_VERSION}/g" release.yaml 39 | else 40 | if grep -q "rancher-${OPERATOR}:" release.yaml; then 41 | # append new version below previous version 42 | sed -i -e "s/${PREV_CHART_VERSION}+up${PREV_OPERATOR_VERSION}/${PREV_CHART_VERSION}+up${PREV_OPERATOR_VERSION}\n - ${NEW_CHART_VERSION}+up${NEW_OPERATOR_VERSION}/g" release.yaml 43 | else 44 | # add new version to release.yaml 45 | cat <<< " 46 | rancher-${OPERATOR}: 47 | - ${PREV_CHART_VERSION}+up${PREV_OPERATOR_VERSION} 48 | - ${NEW_CHART_VERSION}+up${NEW_OPERATOR_VERSION} 49 | rancher-${OPERATOR}-crd: 50 | - ${PREV_CHART_VERSION}+up${PREV_OPERATOR_VERSION} 51 | - ${NEW_CHART_VERSION}+up${NEW_OPERATOR_VERSION}" >> release.yaml 52 | # remove empty line above rancher-${OPERATOR} 53 | sed -i -z -e "s/[[:space:]]*\nrancher-${OPERATOR}:/\nrancher-${OPERATOR}:/g" release.yaml 54 | fi 55 | fi 56 | 57 | git add packages/rancher-${OPERATOR} release.yaml 58 | git commit -m "Updating to Operator v${NEW_OPERATOR_VERSION}" 59 | 60 | if [ "${REPLACE}" == "true" ]; then 61 | for i in rancher-${OPERATOR} rancher-${OPERATOR}-crd; do CHART=$i VERSION=${PREV_CHART_VERSION}+up${PREV_OPERATOR_VERSION} make remove; done 62 | fi 63 | 64 | PACKAGE=rancher-${OPERATOR} make charts 65 | git add assets/rancher-${OPERATOR}* charts/rancher-${OPERATOR}* index.yaml 66 | git commit -m "Autogenerated changes for Operator v${NEW_OPERATOR_VERSION}" 67 | 68 | popd > /dev/null 69 | -------------------------------------------------------------------------------- /.github/scripts/update-rancher-dep.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Submit new operator version against rancher/rancher 4 | 5 | set -ue 6 | 7 | NEW_OPERATOR_VERSION="$1" # e.g. 1.1.0-rc2 8 | OPERATOR="${OPERATOR:-gke-operator}" 9 | 10 | if [ -z "${GITHUB_WORKSPACE:-}" ]; then 11 | RANCHER_DIR="$(dirname -- "$0")/../../../rancher" 12 | else 13 | RANCHER_DIR="${GITHUB_WORKSPACE}/rancher" 14 | fi 15 | 16 | 17 | if [ ! -e ~/.gitconfig ]; then 18 | git config --global user.name "highlander-ci-bot" 19 | git config --global user.email "highlander-ci@proton.me" 20 | fi 21 | 22 | cd "${RANCHER_DIR}" 23 | go get "github.com/rancher/${OPERATOR}@v${NEW_OPERATOR_VERSION}" 24 | go mod tidy 25 | cd pkg/apis 26 | go get "github.com/rancher/${OPERATOR}@v${NEW_OPERATOR_VERSION}" 27 | go mod tidy 28 | cd ../../ 29 | git add go.* pkg/apis/go.* 30 | 31 | git commit -m "Updating ${OPERATOR} to operator v${NEW_OPERATOR_VERSION}" 32 | 33 | -------------------------------------------------------------------------------- /.github/workflows/apidiff.yaml: -------------------------------------------------------------------------------- 1 | name: Go API Diff 2 | on: 3 | pull_request: 4 | jobs: 5 | go-apidiff: 6 | runs-on: ubuntu-latest 7 | steps: 8 | - uses: actions/checkout@v4 9 | with: 10 | fetch-depth: 0 11 | - uses: actions/setup-go@v5 12 | with: 13 | go-version: 1.23.x 14 | - name: Generate API diff 15 | run: make apidiff 16 | -------------------------------------------------------------------------------- /.github/workflows/build.yaml: -------------------------------------------------------------------------------- 1 | name: Build 2 | on: 3 | pull_request: 4 | push: 5 | branches: 6 | - main 7 | jobs: 8 | build: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - name: Checkout code 12 | uses: actions/checkout@v4 13 | - name: Build 14 | run: make image-build 15 | -------------------------------------------------------------------------------- /.github/workflows/delete-old-versions.yaml: -------------------------------------------------------------------------------- 1 | name: Delete Old Images and Charts 2 | on: 3 | schedule: 4 | - cron: '0 1 * * 1,4' # Every Mondays and Thursdays at 01:00 UTC 5 | workflow_dispatch: 6 | 7 | jobs: 8 | delete_old_packages: 9 | runs-on: ubuntu-latest 10 | permissions: 11 | packages: write 12 | steps: 13 | - name: Delete old gke-operator images 14 | uses: actions/delete-package-versions@v4 15 | with: 16 | package-name: gke-operator 17 | package-type: container 18 | min-versions-to-keep: 30 19 | token: ${{ secrets.GITHUB_TOKEN }} 20 | owner: rancher 21 | 22 | - name: Delete old rancher-gke-operator charts 23 | uses: actions/delete-package-versions@v4 24 | with: 25 | package-name: rancher-gke-operator-chart/rancher-gke-operator 26 | package-type: container 27 | min-versions-to-keep: 7 28 | token: ${{ secrets.GITHUB_TOKEN }} 29 | owner: rancher 30 | 31 | - name: Delete old rancher-gke-operator-crd charts 32 | uses: actions/delete-package-versions@v4 33 | with: 34 | package-name: rancher-gke-operator-crd-chart/rancher-gke-operator-crd 35 | package-type: container 36 | min-versions-to-keep: 7 37 | token: ${{ secrets.GITHUB_TOKEN }} 38 | owner: rancher 39 | -------------------------------------------------------------------------------- /.github/workflows/e2e-branch.yaml: -------------------------------------------------------------------------------- 1 | name: E2E test branch 2 | on: 3 | workflow_call: 4 | secrets: 5 | GKE_CREDENTIALS: 6 | description: "GKE credentials" 7 | GKE_PROJECT_ID: 8 | description: "GKE project ID" 9 | SLACK_WEBHOOK_URL: 10 | description: "WebHook URL to use for Slack" 11 | required: true 12 | inputs: 13 | branch: 14 | type: string 15 | default: "release-v2.9" 16 | 17 | env: 18 | GKE_CREDENTIALS: ${{ secrets.GKE_CREDENTIALS }} 19 | GKE_PROJECT_ID: ${{ secrets.GKE_PROJECT_ID }} 20 | 21 | jobs: 22 | e2e-tests: 23 | env: 24 | BRANCH: ${{ inputs.branch }} 25 | runs-on: ubuntu-latest 26 | steps: 27 | - name: Checkout code 28 | uses: actions/checkout@v4 29 | with: 30 | ref: | 31 | ${{ env.BRANCH }} 32 | - name: Login to GHCR registry 33 | uses: docker/login-action@v3 34 | with: 35 | registry: ghcr.io 36 | username: ${{ github.actor }} 37 | password: ${{ secrets.GITHUB_TOKEN }} 38 | - name: Setup Docker Buildx 39 | uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0 40 | - name: Build and push image 41 | env: 42 | REPO: ghcr.io/rancher 43 | run: | 44 | make image-push 45 | - name: Install Go 46 | uses: actions/setup-go@v5 47 | with: 48 | go-version: 1.23.x 49 | - uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0 50 | with: 51 | version: v0.23.0 52 | install_only: true 53 | - name: Create kind cluster 54 | run: make setup-kind 55 | - name: Set the value 56 | run: | 57 | GKE_PROJECT_ID="${{ env.GKE_PROJECT_ID }}" 58 | echo "GKE_PROJECT_ID=${GKE_PROJECT_ID}" >> $GITHUB_ENV 59 | - name: E2E tests 60 | env: 61 | GKE_CREDENTIALS: "${{ secrets.GKE_CREDENTIALS }}" 62 | REPO: ghcr.io/rancher 63 | run: make e2e-tests 64 | - name: Archive artifacts 65 | if: always() 66 | uses: actions/upload-artifact@v4.6.2 67 | with: 68 | name: ci-artifacts-${{ env.BRANCH }} 69 | path: _artifacts 70 | if-no-files-found: ignore 71 | - name: Send failed status to slack 72 | if: failure() && github.event_name == 'schedule' 73 | uses: slackapi/slack-github-action@v2.0.0 74 | with: 75 | payload: | 76 | { 77 | "blocks": [ 78 | { 79 | "type": "section", 80 | "text": { 81 | "type": "mrkdwn", 82 | "text": "AKS Operator E2E test run failed." 83 | }, 84 | "accessory": { 85 | "type": "button", 86 | "text": { 87 | "type": "plain_text", 88 | "text": ":github:", 89 | "emoji": true 90 | }, 91 | "url": "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" 92 | } 93 | } 94 | ] 95 | } 96 | env: 97 | SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} 98 | SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK 99 | -------------------------------------------------------------------------------- /.github/workflows/e2e-workflow.yaml: -------------------------------------------------------------------------------- 1 | name: Run E2E test workflow 2 | on: 3 | workflow_dispatch: 4 | schedule: 5 | - cron: 0 22 * * * 6 | permissions: 7 | contents: read 8 | packages: write # Required for pushing images to ghcr.io 9 | jobs: 10 | e2e-test-main: 11 | uses: ./.github/workflows/e2e-branch.yaml 12 | with: 13 | branch: main 14 | secrets: 15 | GKE_CREDENTIALS: ${{ secrets.GKE_CREDENTIALS }} 16 | GKE_PROJECT_ID: ${{ secrets.GKE_PROJECT_ID }} 17 | SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} 18 | e2e-test-v2_11: 19 | if: ${{ always() }} 20 | needs: e2e-test-main 21 | uses: ./.github/workflows/e2e-branch.yaml 22 | with: 23 | branch: release-v2.11 24 | secrets: 25 | GKE_CREDENTIALS: ${{ secrets.GKE_CREDENTIALS }} 26 | GKE_PROJECT_ID: ${{ secrets.GKE_PROJECT_ID }} 27 | SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} 28 | e2e-test-v2_10: 29 | if: ${{ always() }} 30 | needs: e2e-test-v2_11 31 | uses: ./.github/workflows/e2e-branch.yaml 32 | with: 33 | branch: release-v2.10 34 | secrets: 35 | GKE_CREDENTIALS: ${{ secrets.GKE_CREDENTIALS }} 36 | GKE_PROJECT_ID: ${{ secrets.GKE_PROJECT_ID }} 37 | SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} 38 | e2e-test-v2_9: 39 | if: ${{ always() }} 40 | needs: e2e-test-v2_10 41 | uses: ./.github/workflows/e2e-branch.yaml 42 | with: 43 | branch: release-v2.9 44 | secrets: 45 | GKE_CREDENTIALS: ${{ secrets.GKE_CREDENTIALS }} 46 | GKE_PROJECT_ID: ${{ secrets.GKE_PROJECT_ID }} 47 | SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} 48 | e2e-test-v2_8: 49 | if: ${{ always() }} 50 | needs: e2e-test-v2_9 51 | uses: ./.github/workflows/e2e-branch.yaml 52 | with: 53 | branch: release-v2.8 54 | secrets: 55 | GKE_CREDENTIALS: ${{ secrets.GKE_CREDENTIALS }} 56 | GKE_PROJECT_ID: ${{ secrets.GKE_PROJECT_ID }} 57 | SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} 58 | -------------------------------------------------------------------------------- /.github/workflows/lint.yaml: -------------------------------------------------------------------------------- 1 | name: Lint 2 | on: 3 | pull_request: 4 | push: 5 | branches: [ "main", "release-v*" ] 6 | tags: 7 | - 'v*' 8 | jobs: 9 | lint: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Checkout code 13 | uses: actions/checkout@v4 14 | - name: Install Go 15 | uses: actions/setup-go@v5 16 | with: 17 | go-version: 1.23.x 18 | - name: Analysis 19 | uses: golangci/golangci-lint-action@v6 20 | with: 21 | args: -v 22 | -------------------------------------------------------------------------------- /.github/workflows/nightly-publish.yaml: -------------------------------------------------------------------------------- 1 | name: Nightly 2 | on: 3 | schedule: 4 | - cron: '0 1 * * *' 5 | workflow_dispatch: 6 | jobs: 7 | publish_nightly: 8 | uses: rancher-sandbox/highlander-reusable-workflows/.github/workflows/operator-with-latest-rancher-build.yaml@main 9 | with: 10 | operator_name: gke-operator 11 | operator_commit: ${{ github.sha }} 12 | 13 | publish_images: 14 | permissions: 15 | packages: write # Required for pushing images to ghcr.io 16 | runs-on: ubuntu-latest 17 | steps: 18 | - name: Checkout code 19 | uses: actions/checkout@v4 20 | - name: Login to GHCR registry 21 | uses: docker/login-action@v3 22 | with: 23 | registry: ghcr.io 24 | username: ${{ github.actor }} 25 | password: ${{ secrets.GITHUB_TOKEN }} 26 | - name: Setup Docker Buildx 27 | uses: docker/setup-buildx-action@b5ca514318bd6ebac0fb2aedd5d36ec1b5c232a2 # v3.10.0 28 | - name: Set image tag 29 | run: echo "TAG=v0.0.0-$(date +'%Y%m%d')" >> "$GITHUB_ENV" 30 | - name: Build and push image 31 | env: 32 | REPO: ghcr.io/rancher 33 | run: | 34 | make image-push 35 | 36 | publish_charts: 37 | permissions: 38 | packages: write # Required for pushing charts to ghcr.io 39 | runs-on: ubuntu-latest 40 | needs: publish_images 41 | steps: 42 | - name: Checkout code 43 | uses: actions/checkout@v4 44 | - name: Login to GHCR registry 45 | uses: docker/login-action@v3 46 | with: 47 | registry: ghcr.io 48 | username: ${{ github.actor }} 49 | password: ${{ secrets.GITHUB_TOKEN }} 50 | - name: Install Helm 51 | uses: azure/setup-helm@v4 52 | with: 53 | version: 3.8.0 54 | - name: Set image tag and chart version 55 | run: | 56 | echo "TAG=v0.0.0-$(date +'%Y%m%d')" >> "$GITHUB_ENV" 57 | echo "CHART_VERSION=$(date +'%Y%m%d')" >> "$GITHUB_ENV" 58 | - name: Build charts 59 | env: 60 | REPO: ghcr.io/rancher # used in the Helm chart values.yaml 61 | run: | 62 | make charts 63 | - name: Push charts 64 | run: | 65 | helm push bin/rancher-gke-operator-$CHART_VERSION.tgz oci://ghcr.io/${{ github.repository_owner }}/rancher-gke-operator-chart 66 | helm push bin/rancher-gke-operator-crd-$CHART_VERSION.tgz oci://ghcr.io/${{ github.repository_owner }}/rancher-gke-operator-crd-chart -------------------------------------------------------------------------------- /.github/workflows/release.yaml: -------------------------------------------------------------------------------- 1 | name: Release 2 | 3 | on: 4 | push: 5 | tags: 6 | - 'v*' 7 | 8 | # GitHub settings / example values: 9 | # 10 | # org level vars: 11 | # - PUBLIC_REGISTRY: docker.io 12 | # repo level vars: 13 | # - PUBLIC_REGISTRY_REPO: rancher 14 | # repo level secrets: 15 | # - PUBLIC_REGISTRY_USERNAME 16 | # - PUBLIC_REGISTRY_PASSWORD 17 | 18 | jobs: 19 | publish-images: 20 | permissions: 21 | contents: read 22 | id-token: write # required for reading vault secrets and for cosign's use in ecm-distro-tools/publish-image 23 | strategy: 24 | matrix: 25 | include: 26 | # Three images are created: 27 | # - Multi-arch manifest for both amd64 and arm64 28 | - tag-suffix: "" 29 | platforms: linux/amd64,linux/arm64 30 | # - arm64 manifest 31 | - tag-suffix: "-arm64" 32 | platforms: linux/arm64 33 | # - amd64 manifest 34 | - tag-suffix: "-amd64" 35 | platforms: linux/amd64 36 | runs-on: ubuntu-latest 37 | steps: 38 | - name: Checkout code 39 | uses: actions/checkout@v4 40 | with: 41 | fetch-depth: 0 42 | ref: ${{ github.ref_name}} 43 | - name: Read secrets 44 | uses: rancher-eio/read-vault-secrets@main 45 | with: 46 | secrets: | 47 | secret/data/github/repo/${{ github.repository }}/dockerhub/${{ github.repository_owner }}/credentials username | PUBLIC_REGISTRY_USERNAME ; 48 | secret/data/github/repo/${{ github.repository }}/dockerhub/${{ github.repository_owner }}/credentials password | PUBLIC_REGISTRY_PASSWORD ; 49 | secret/data/github/repo/${{ github.repository }}/rancher-prime-registry/credentials registry | PRIME_REGISTRY ; 50 | secret/data/github/repo/${{ github.repository }}/rancher-prime-registry/credentials username | PRIME_REGISTRY_USERNAME ; 51 | secret/data/github/repo/${{ github.repository }}/rancher-prime-registry/credentials password | PRIME_REGISTRY_PASSWORD 52 | - name: Publish images 53 | uses: rancher/ecm-distro-tools/actions/publish-image@master 54 | with: 55 | image: gke-operator 56 | tag: ${{ github.ref_name }}${{ matrix.tag-suffix }} 57 | platforms: ${{ matrix.platforms }} 58 | public-registry: docker.io 59 | public-repo: rancher 60 | public-username: ${{ env.PUBLIC_REGISTRY_USERNAME }} 61 | public-password: ${{ env.PUBLIC_REGISTRY_PASSWORD }} 62 | prime-registry: ${{ env.PRIME_REGISTRY }} 63 | prime-repo: rancher 64 | prime-username: ${{ env.PRIME_REGISTRY_USERNAME }} 65 | prime-password: ${{ env.PRIME_REGISTRY_PASSWORD }} 66 | make-target: image-push 67 | push-to-prime: true 68 | - name: Cleanup checksum files # in order to avoid goreleaser dirty state error, remove once rancher/ecm-distro-tools/actions/publish-image@main gets updated 69 | run: rm -f slsactl_*_checksums.txt* 70 | 71 | release: 72 | permissions: 73 | contents: write # required for creating GH release 74 | runs-on: ubuntu-latest 75 | needs: publish-images 76 | steps: 77 | - name: Checkout code 78 | uses: actions/checkout@v4 79 | with: 80 | fetch-depth: 0 81 | ref: ${{ github.ref_name }} 82 | - name: Create release 83 | env: 84 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # required for creating GH release 85 | GORELEASER_CURRENT_TAG: ${{ github.ref_name }} # specify the tag to be released 86 | id: goreleaser 87 | uses: goreleaser/goreleaser-action@v6 88 | with: 89 | distribution: goreleaser 90 | version: "~> v2" 91 | args: release --clean --verbose 92 | - name: Upload charts to release 93 | env: 94 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} # required for updating GH release 95 | REPO: rancher # First name component for Docker repository to reference in `values.yaml` of the Helm chart release, this is expected to be `rancher`, image name is appended to this value 96 | TAG: ${{ github.ref_name }} # image tag to be referenced in `values.yaml` of the Helm chart release 97 | run: | 98 | version=$(echo '${{ steps.goreleaser.outputs.metadata }}' | jq -r '.version') 99 | echo "Publishing helm charts (version: $version)" 100 | 101 | # Both version and appVersion are set to the same value in the Chart.yaml (excluding the 'v' prefix) 102 | CHART_VERSION=$version GIT_TAG=$version make charts 103 | 104 | for f in $(find bin/ -name '*.tgz'); do 105 | echo "Uploading $f to GitHub release $TAG" 106 | gh release upload $TAG $f 107 | done 108 | echo "Charts successfully uploaded to GitHub release $TAG" 109 | 110 | dispatch-dependency: 111 | permissions: 112 | contents: read 113 | id-token: write 114 | actions: write 115 | runs-on: ubuntu-latest 116 | timeout-minutes: 10 117 | needs: publish-images 118 | if: github.event_name == 'push' && github.ref_type == 'tag' 119 | steps: 120 | - name: Read App Secrets 121 | uses: rancher-eio/read-vault-secrets@main 122 | with: 123 | secrets: | 124 | secret/data/github/repo/${{ github.repository }}/github/workflow-dispatcher/app-credentials appId | APP_ID ; 125 | secret/data/github/repo/${{ github.repository }}/github/workflow-dispatcher/app-credentials privateKey | PRIVATE_KEY 126 | 127 | - name: Create App Token 128 | uses: actions/create-github-app-token@v1 129 | id: app-token 130 | with: 131 | app-id: ${{ env.APP_ID }} 132 | private-key: ${{ env.PRIVATE_KEY }} 133 | owner: ${{ github.repository_owner }} 134 | 135 | - name: Run dispatch 136 | env: 137 | GH_TOKEN: ${{ steps.app-token.outputs.token }} 138 | run: | 139 | case ${{ github.ref_name }} in 140 | "v1.12"*) 141 | ACTION_TARGET_BRANCH="main" 142 | ;; 143 | "v1.11"*) 144 | ACTION_TARGET_BRANCH="release/v2.11" 145 | ;; 146 | "v1.10"*) 147 | ACTION_TARGET_BRANCH="release/v2.10" 148 | ;; 149 | "v1.9"*) 150 | ACTION_TARGET_BRANCH="release/v2.9" 151 | ;; 152 | "v1.3"*) 153 | ACTION_TARGET_BRANCH="release/v2.8" 154 | ;; 155 | *) 156 | echo "Not a valid tag, not dispatching event" 157 | exit 0 158 | esac 159 | echo "Running Go get on $ACTION_TARGET_BRANCH" 160 | gh workflow run "Go get" --repo rancher/rancher --ref $ACTION_TARGET_BRANCH -F goget_module=github.com/rancher/gke-operator -F goget_version=${{ github.ref_name }} -F source_author=${{ github.actor }} 161 | -------------------------------------------------------------------------------- /.github/workflows/scan.yaml: -------------------------------------------------------------------------------- 1 | name: Scan 2 | on: 3 | workflow_dispatch: 4 | pull_request: 5 | branches: 6 | - main 7 | push: 8 | branches: 9 | - main 10 | tags: 11 | - "v*" 12 | jobs: 13 | scan: 14 | runs-on: ubuntu-latest 15 | steps: 16 | - name: Checkout code 17 | uses: actions/checkout@v4 18 | with: 19 | fetch-depth: 0 20 | - name: Set up Go 21 | uses: actions/setup-go@v5 22 | with: 23 | go-version-file: 'go.mod' 24 | check-latest: true 25 | - name: Build operator 26 | run: make operator 27 | - name: Set up Docker Buildx 28 | id: buildx 29 | uses: docker/setup-buildx-action@v3.10.0 30 | - name: Build image 31 | uses: docker/build-push-action@v6.16.0 32 | with: 33 | context: . 34 | tags: ghcr.io/rancher/gke-operator:${{ github.sha }} 35 | load: true 36 | push: false 37 | file: package/Dockerfile 38 | - name: Run Trivy vulnerability scanner 39 | uses: aquasecurity/trivy-action@0.30.0 40 | env: 41 | TRIVY_DB_REPOSITORY: ghcr.io/aquasecurity/trivy-db,public.ecr.aws/aquasecurity/trivy-db 42 | TRIVY_JAVA_DB_REPOSITORY: ghcr.io/aquasecurity/trivy-java-db,public.ecr.aws/aquasecurity/trivy-java-db 43 | with: 44 | image-ref: "ghcr.io/rancher/gke-operator:${{ github.sha }}" 45 | format: "table" 46 | exit-code: "1" 47 | ignore-unfixed: true 48 | severity: "CRITICAL,HIGH" 49 | -------------------------------------------------------------------------------- /.github/workflows/unit.yaml: -------------------------------------------------------------------------------- 1 | name: Unit tests 2 | on: 3 | pull_request: 4 | push: 5 | branches: [ "main", "release-v*" ] 6 | jobs: 7 | test: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/checkout@v4 11 | - name: Install Go 12 | uses: actions/setup-go@v5 13 | with: 14 | go-version: 1.23.x 15 | - name: Run tests 16 | run: | 17 | make test 18 | -------------------------------------------------------------------------------- /.github/workflows/verify.yaml: -------------------------------------------------------------------------------- 1 | name: Verify 2 | on: 3 | pull_request: 4 | push: 5 | branches: [ "main", "release-v*" ] 6 | jobs: 7 | test: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/checkout@v4 11 | - name: Install Go 12 | uses: actions/setup-go@v5 13 | with: 14 | go-version: 1.23.x 15 | - name: Run make verify 16 | run: | 17 | make verify 18 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea/ 2 | .vscode/ 3 | bin/ 4 | dist/ 5 | vendor/ 6 | .dapper 7 | _artifacts/ 8 | .envrc -------------------------------------------------------------------------------- /.golangci.yml: -------------------------------------------------------------------------------- 1 | run: 2 | timeout: 5m 3 | go: "1.23" 4 | tests: false 5 | allow-parallel-runners: true 6 | 7 | output: 8 | formats: 9 | - format: github-actions 10 | 11 | linters: 12 | disable-all: true 13 | enable: 14 | - dupl # check duplicated code 15 | - goconst # check strings that can turn into constants 16 | - gofmt # check fmt 17 | - goimports # check imports 18 | - gosec # check for security problems 19 | - govet # check vet 20 | - importas # check consistent import aliasing 21 | - ineffassign # check ineffectual assignments 22 | - misspell # check for misspelled English words 23 | - nakedret # check naked returns in functions 24 | - prealloc # check preallocated slice declarations 25 | - revive # replacement for golint 26 | - unconvert # check redundant type conversions 27 | - whitespace # check for trailing whitespace and tabs 28 | linters-settings: 29 | revive: 30 | rules: 31 | # The following rules are recommended https://github.com/mgechev/revive#recommended-configuration 32 | - name: blank-imports 33 | - name: context-as-argument 34 | - name: context-keys-type 35 | - name: dot-imports 36 | - name: error-return 37 | - name: error-strings 38 | - name: error-naming 39 | - name: exported 40 | - name: increment-decrement 41 | - name: var-naming 42 | - name: var-declaration 43 | - name: package-comments 44 | - name: range 45 | - name: receiver-naming 46 | - name: time-naming 47 | - name: indent-error-flow 48 | - name: errorf 49 | - name: empty-block 50 | - name: superfluous-else 51 | - name: unused-parameter 52 | - name: unreachable-code 53 | - name: redefines-builtin-id 54 | importas: 55 | no-unaliased: true 56 | alias: 57 | # Kubernetes 58 | - pkg: k8s.io/api/core/v1 59 | alias: corev1 60 | - pkg: k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 61 | alias: apiextensionsv1 62 | - pkg: k8s.io/api/apps/v1 63 | alias: appsv1 64 | - pkg: k8s.io/apimachinery/pkg/apis/meta/v1 65 | alias: metav1 66 | - pkg: k8s.io/apimachinery/pkg/util/runtime 67 | alias: utilruntime 68 | - pkg: sigs.k8s.io/controller-runtime/pkg/client 69 | alias: runtimeclient 70 | - pkg: k8s.io/apimachinery/pkg/util/errors 71 | alias: kerrors 72 | - pkg: k8s.io/client-go/kubernetes/scheme 73 | alias: clientgoscheme 74 | # Rancher GKE operator 75 | - pkg: github.com/rancher/gke-operator/pkg/apis/gke.cattle.io/v1 76 | alias: gkev1 77 | - pkg: github.com/rancher/gke-operator/pkg/generated/controllers/gke.cattle.io/v1 78 | alias: gkecontrollers 79 | # Golang API 80 | - pkg: google.golang.org/api/container/v1 81 | alias: gkeapi 82 | issues: 83 | exclude-rules: 84 | - linters: 85 | - revive 86 | text: "var-naming: don't use an underscore in package name" 87 | path: 'mock(\w+)/doc.go$' 88 | exclude-dirs: 89 | - pkg/generated 90 | exclude-files: 91 | - "zz_generated_*" -------------------------------------------------------------------------------- /.goreleaser.yaml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | project_name: gke-operator 4 | 5 | before: 6 | hooks: 7 | - go mod tidy 8 | 9 | builds: 10 | - env: 11 | - CGO_ENABLED=0 12 | goos: 13 | - linux 14 | goarch: 15 | - amd64 16 | - arm64 17 | binary: gke-operator 18 | 19 | release: 20 | prerelease: auto 21 | 22 | changelog: 23 | sort: asc 24 | filters: 25 | exclude: 26 | - "^docs:" 27 | - "^test:" 28 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | # These owners will be the default owners for everything in 2 | # the repo. Unless a later match takes precedence. 3 | 4 | * @rancher/highlander @rancher/infracloud-team 5 | -------------------------------------------------------------------------------- /License: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 5 | 1. Definitions. 6 | "License" shall mean the terms and conditions for use, reproduction, 7 | and distribution as defined by Sections 1 through 9 of this document. 8 | "Licensor" shall mean the copyright owner or entity authorized by 9 | the copyright owner that is granting the License. 10 | "Legal Entity" shall mean the union of the acting entity and all 11 | other entities that control, are controlled by, or are under common 12 | control with that entity. For the purposes of this definition, 13 | "control" means (i) the power, direct or indirect, to cause the 14 | direction or management of such entity, whether by contract or 15 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 16 | outstanding shares, or (iii) beneficial ownership of such entity. 17 | "You" (or "Your") shall mean an individual or Legal Entity 18 | exercising permissions granted by this License. 19 | "Source" form shall mean the preferred form for making modifications, 20 | including but not limited to software source code, documentation 21 | source, and configuration files. 22 | "Object" form shall mean any form resulting from mechanical 23 | transformation or translation of a Source form, including but 24 | not limited to compiled object code, generated documentation, 25 | and conversions to other media types. 26 | "Work" shall mean the work of authorship, whether in Source or 27 | Object form, made available under the License, as indicated by a 28 | copyright notice that is included in or attached to the work 29 | (an example is provided in the Appendix below). 30 | "Derivative Works" shall mean any work, whether in Source or Object 31 | form, that is based on (or derived from) the Work and for which the 32 | editorial revisions, annotations, elaborations, or other modifications 33 | represent, as a whole, an original work of authorship. For the purposes 34 | of this License, Derivative Works shall not include works that remain 35 | separable from, or merely link (or bind by name) to the interfaces of, 36 | the Work and Derivative Works thereof. 37 | "Contribution" shall mean any work of authorship, including 38 | the original version of the Work and any modifications or additions 39 | to that Work or Derivative Works thereof, that is intentionally 40 | submitted to Licensor for inclusion in the Work by the copyright owner 41 | or by an individual or Legal Entity authorized to submit on behalf of 42 | the copyright owner. For the purposes of this definition, "submitted" 43 | means any form of electronic, verbal, or written communication sent 44 | to the Licensor or its representatives, including but not limited to 45 | communication on electronic mailing lists, source code control systems, 46 | and issue tracking systems that are managed by, or on behalf of, the 47 | Licensor for the purpose of discussing and improving the Work, but 48 | excluding communication that is conspicuously marked or otherwise 49 | designated in writing by the copyright owner as "Not a Contribution." 50 | "Contributor" shall mean Licensor and any individual or Legal Entity 51 | on behalf of whom a Contribution has been received by Licensor and 52 | subsequently incorporated within the Work. 53 | 2. Grant of Copyright License. Subject to the terms and conditions of 54 | this License, each Contributor hereby grants to You a perpetual, 55 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 56 | copyright license to reproduce, prepare Derivative Works of, 57 | publicly display, publicly perform, sublicense, and distribute the 58 | Work and such Derivative Works in Source or Object form. 59 | 3. Grant of Patent License. Subject to the terms and conditions of 60 | this License, each Contributor hereby grants to You a perpetual, 61 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 62 | (except as stated in this section) patent license to make, have made, 63 | use, offer to sell, sell, import, and otherwise transfer the Work, 64 | where such license applies only to those patent claims licensable 65 | by such Contributor that are necessarily infringed by their 66 | Contribution(s) alone or by combination of their Contribution(s) 67 | with the Work to which such Contribution(s) was submitted. If You 68 | institute patent litigation against any entity (including a 69 | cross-claim or counterclaim in a lawsuit) alleging that the Work 70 | or a Contribution incorporated within the Work constitutes direct 71 | or contributory patent infringement, then any patent licenses 72 | granted to You under this License for that Work shall terminate 73 | as of the date such litigation is filed. 74 | 4. Redistribution. You may reproduce and distribute copies of the 75 | Work or Derivative Works thereof in any medium, with or without 76 | modifications, and in Source or Object form, provided that You 77 | meet the following conditions: 78 | (a) You must give any other recipients of the Work or 79 | Derivative Works a copy of this License; and 80 | (b) You must cause any modified files to carry prominent notices 81 | stating that You changed the files; and 82 | (c) You must retain, in the Source form of any Derivative Works 83 | that You distribute, all copyright, patent, trademark, and 84 | attribution notices from the Source form of the Work, 85 | excluding those notices that do not pertain to any part of 86 | the Derivative Works; and 87 | (d) If the Work includes a "NOTICE" text file as part of its 88 | distribution, then any Derivative Works that You distribute must 89 | include a readable copy of the attribution notices contained 90 | within such NOTICE file, excluding those notices that do not 91 | pertain to any part of the Derivative Works, in at least one 92 | of the following places: within a NOTICE text file distributed 93 | as part of the Derivative Works; within the Source form or 94 | documentation, if provided along with the Derivative Works; or, 95 | within a display generated by the Derivative Works, if and 96 | wherever such third-party notices normally appear. The contents 97 | of the NOTICE file are for informational purposes only and 98 | do not modify the License. You may add Your own attribution 99 | notices within Derivative Works that You distribute, alongside 100 | or as an addendum to the NOTICE text from the Work, provided 101 | that such additional attribution notices cannot be construed 102 | as modifying the License. 103 | You may add Your own copyright statement to Your modifications and 104 | may provide additional or different license terms and conditions 105 | for use, reproduction, or distribution of Your modifications, or 106 | for any such Derivative Works as a whole, provided Your use, 107 | reproduction, and distribution of the Work otherwise complies with 108 | the conditions stated in this License. 109 | 5. Submission of Contributions. Unless You explicitly state otherwise, 110 | any Contribution intentionally submitted for inclusion in the Work 111 | by You to the Licensor shall be under the terms and conditions of 112 | this License, without any additional terms or conditions. 113 | Notwithstanding the above, nothing herein shall supersede or modify 114 | the terms of any separate license agreement you may have executed 115 | with Licensor regarding such Contributions. 116 | 6. Trademarks. This License does not grant permission to use the trade 117 | names, trademarks, service marks, or product names of the Licensor, 118 | except as required for reasonable and customary use in describing the 119 | origin of the Work and reproducing the content of the NOTICE file. 120 | 7. Disclaimer of Warranty. Unless required by applicable law or 121 | agreed to in writing, Licensor provides the Work (and each 122 | Contributor provides its Contributions) on an "AS IS" BASIS, 123 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 124 | implied, including, without limitation, any warranties or conditions 125 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 126 | PARTICULAR PURPOSE. You are solely responsible for determining the 127 | appropriateness of using or redistributing the Work and assume any 128 | risks associated with Your exercise of permissions under this License. 129 | 8. Limitation of Liability. In no event and under no legal theory, 130 | whether in tort (including negligence), contract, or otherwise, 131 | unless required by applicable law (such as deliberate and grossly 132 | negligent acts) or agreed to in writing, shall any Contributor be 133 | liable to You for damages, including any direct, indirect, special, 134 | incidental, or consequential damages of any character arising as a 135 | result of this License or out of the use or inability to use the 136 | Work (including but not limited to damages for loss of goodwill, 137 | work stoppage, computer failure or malfunction, or any and all 138 | other commercial damages or losses), even if such Contributor 139 | has been advised of the possibility of such damages. 140 | 9. Accepting Warranty or Additional Liability. While redistributing 141 | the Work or Derivative Works thereof, You may choose to offer, 142 | and charge a fee for, acceptance of support, warranty, indemnity, 143 | or other liability obligations and/or rights consistent with this 144 | License. However, in accepting such obligations, You may act only 145 | on Your own behalf and on Your sole responsibility, not on behalf 146 | of any other Contributor, and only if You agree to indemnify, 147 | defend, and hold each Contributor harmless for any liability 148 | incurred by, or claims asserted against, such Contributor by reason 149 | of your accepting any such warranty or additional liability. 150 | END OF TERMS AND CONDITIONSn -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | TARGETS := $(shell ls scripts) 2 | GIT_BRANCH?=$(shell git branch --show-current) 3 | GIT_COMMIT?=$(shell git rev-parse HEAD) 4 | GIT_COMMIT_SHORT?=$(shell git rev-parse --short HEAD) 5 | GIT_TAG?=v0.0.0 6 | ifneq ($(GIT_BRANCH), main) 7 | GIT_TAG?=$(shell git describe --abbrev=0 --tags 2>/dev/null || echo "v0.0.0" ) 8 | endif 9 | TAG?=${GIT_TAG}-${GIT_COMMIT_SHORT} 10 | REPO?=docker.io/rancher 11 | IMAGE = $(REPO)/gke-operator:$(TAG) 12 | MACHINE := rancher 13 | # Define the target platforms that can be used across the ecosystem. 14 | # Note that what would actually be used for a given project will be 15 | # defined in TARGET_PLATFORMS, and must be a subset of the below: 16 | DEFAULT_PLATFORMS := linux/amd64,linux/arm64,darwin/arm64,darwin/amd64 17 | TARGET_PLATFORMS := linux/amd64,linux/arm64 18 | BUILDX_ARGS ?= --sbom=true --attest type=provenance,mode=max 19 | 20 | OPERATOR_CHART?=$(shell find $(ROOT_DIR) -type f -name "rancher-gke-operator-[0-9]*.tgz" -print) 21 | CRD_CHART?=$(shell find $(ROOT_DIR) -type f -name "rancher-gke-operator-crd*.tgz" -print) 22 | CHART_VERSION?=900 # Only used in e2e to avoid downgrades from rancher 23 | CLUSTER_NAME?="gke-operator-e2e" 24 | E2E_CONF_FILE ?= $(ROOT_DIR)/test/e2e/config/config.yaml 25 | 26 | ROOT_DIR:=$(shell dirname $(realpath $(firstword $(MAKEFILE_LIST)))) 27 | BIN_DIR := $(abspath $(ROOT_DIR)/bin) 28 | GO_INSTALL = ./scripts/go_install.sh 29 | 30 | MOCKGEN_VER := v1.6.0 31 | MOCKGEN_BIN := mockgen 32 | MOCKGEN := $(BIN_DIR)/$(MOCKGEN_BIN)-$(MOCKGEN_VER) 33 | 34 | GINKGO_VER := v2.22.2 35 | GINKGO_BIN := ginkgo 36 | GINKGO := $(BIN_DIR)/$(GINKGO_BIN)-$(GINKGO_VER) 37 | 38 | GO_APIDIFF_VER := v0.8.2 39 | GO_APIDIFF_BIN := go-apidiff 40 | GO_APIDIFF := $(BIN_DIR)/$(GO_APIDIFF_BIN)-$(GO_APIDIFF_VER) 41 | 42 | SETUP_ENVTEST_VER := v0.0.0-20211110210527-619e6b92dab9 43 | SETUP_ENVTEST_BIN := setup-envtest 44 | SETUP_ENVTEST := $(BIN_DIR)/$(SETUP_ENVTEST_BIN)-$(SETUP_ENVTEST_VER) 45 | 46 | ifeq ($(shell go env GOOS),darwin) # Use the darwin/amd64 binary until an arm64 version is available 47 | KUBEBUILDER_ASSETS ?= $(shell $(SETUP_ENVTEST) use --use-env -p path --arch amd64 $(KUBEBUILDER_ENVTEST_KUBERNETES_VERSION)) 48 | else 49 | KUBEBUILDER_ASSETS ?= $(shell $(SETUP_ENVTEST) use --use-env -p path $(KUBEBUILDER_ENVTEST_KUBERNETES_VERSION)) 50 | endif 51 | 52 | default: operator 53 | 54 | .PHONY: generate-go 55 | generate-go: $(MOCKGEN) 56 | go generate ./pkg/gke/... 57 | 58 | .PHONY: generate-crd 59 | generate-crd: $(MOCKGEN) 60 | go generate main.go 61 | 62 | .PHONY: generate 63 | generate: 64 | $(MAKE) generate-go 65 | $(MAKE) generate-crd 66 | 67 | .PHONY: clean 68 | clean: 69 | rm -rf build bin dist 70 | 71 | $(MOCKGEN): 72 | GOBIN=$(BIN_DIR) $(GO_INSTALL) github.com/golang/mock/mockgen $(MOCKGEN_BIN) $(MOCKGEN_VER) 73 | 74 | $(GINKGO): 75 | GOBIN=$(BIN_DIR) $(GO_INSTALL) github.com/onsi/ginkgo/v2/ginkgo $(GINKGO_BIN) $(GINKGO_VER) 76 | 77 | $(GO_APIDIFF): 78 | GOBIN=$(BIN_DIR) $(GO_INSTALL) github.com/joelanford/go-apidiff $(GO_APIDIFF_BIN) $(GO_APIDIFF_VER) 79 | 80 | $(SETUP_ENVTEST): 81 | GOBIN=$(BIN_DIR) $(GO_INSTALL) sigs.k8s.io/controller-runtime/tools/setup-envtest $(SETUP_ENVTEST_BIN) $(SETUP_ENVTEST_VER) 82 | 83 | .PHONY: operator 84 | operator: 85 | CGO_ENABLED=0 go build -ldflags \ 86 | "-X github.com/rancher/gke-operator/pkg/version.GitCommit=$(GIT_COMMIT) \ 87 | -X github.com/rancher/gke-operator/pkg/version.Version=$(TAG)" \ 88 | -o bin/gke-operator . 89 | 90 | .PHONY: test 91 | test: $(SETUP_ENVTEST) $(GINKGO) 92 | KUBEBUILDER_ASSETS="$(KUBEBUILDER_ASSETS)" $(GINKGO) -v -r -p --trace ./pkg/... ./controller/... 93 | 94 | ALL_VERIFY_CHECKS = generate 95 | 96 | .PHONY: verify 97 | verify: $(addprefix verify-,$(ALL_VERIFY_CHECKS)) 98 | 99 | .PHONY: verify-generate 100 | verify-generate: generate 101 | @if !(git diff --quiet HEAD); then \ 102 | git diff; \ 103 | echo "generated files are out of date, run make generate"; exit 1; \ 104 | fi 105 | 106 | .PHONY: operator-chart 107 | operator-chart: 108 | mkdir -p $(BIN_DIR) 109 | cp -rf $(ROOT_DIR)/charts/gke-operator $(BIN_DIR)/chart 110 | sed -i -e 's/tag:.*/tag: '${TAG}'/' $(BIN_DIR)/chart/values.yaml 111 | sed -i -e 's|repository:.*|repository: '${REPO}/gke-operator'|' $(BIN_DIR)/chart/values.yaml 112 | helm package --version ${CHART_VERSION} --app-version ${GIT_TAG} -d $(BIN_DIR)/ $(BIN_DIR)/chart 113 | rm -Rf $(BIN_DIR)/chart 114 | 115 | .PHONY: crd-chart 116 | crd-chart: 117 | mkdir -p $(BIN_DIR) 118 | helm package --version ${CHART_VERSION} --app-version ${GIT_TAG} -d $(BIN_DIR)/ $(ROOT_DIR)/charts/gke-operator-crd 119 | rm -Rf $(BIN_DIR)/chart 120 | 121 | .PHONY: charts 122 | charts: 123 | $(MAKE) operator-chart 124 | $(MAKE) crd-chart 125 | 126 | buildx-machine: ## create rancher dockerbuildx machine targeting platform defined by DEFAULT_PLATFORMS 127 | @docker buildx ls | grep $(MACHINE) || \ 128 | docker buildx create --name=$(MACHINE) --platform=$(DEFAULT_PLATFORMS) 129 | 130 | .PHONY: image-build 131 | image-build: buildx-machine ## build (and load) the container image targeting the current platform. 132 | docker buildx build -f package/Dockerfile \ 133 | --builder $(MACHINE) --build-arg COMMIT=$(GIT_COMMIT) --build-arg VERSION=$(TAG) \ 134 | -t "$(IMAGE)" $(BUILD_ACTION) . 135 | @echo "Built $(IMAGE)" 136 | 137 | .PHONY: image-push 138 | image-push: buildx-machine ## build the container image targeting all platforms defined by TARGET_PLATFORMS and push to a registry. 139 | docker buildx build -f package/Dockerfile \ 140 | --builder $(MACHINE) $(IID_FILE_FLAG) $(BUILDX_ARGS) --build-arg COMMIT=$(GIT_COMMIT) --build-arg VERSION=$(TAG) \ 141 | --platform=$(TARGET_PLATFORMS) -t "$(IMAGE)" --push . 142 | @echo "Pushed $(IMAGE)" 143 | 144 | .PHONY: setup-kind 145 | setup-kind: 146 | CLUSTER_NAME=$(CLUSTER_NAME) $(ROOT_DIR)/scripts/setup-kind-cluster.sh 147 | 148 | .PHONY: e2e-tests 149 | e2e-tests: $(GINKGO) charts 150 | export EXTERNAL_IP=`kubectl get nodes -o jsonpath='{.items[].status.addresses[?(@.type == "InternalIP")].address}'` && \ 151 | export BRIDGE_IP="172.18.0.1" && \ 152 | export CONFIG_PATH=$(E2E_CONF_FILE) && \ 153 | export OPERATOR_CHART=$(OPERATOR_CHART) && \ 154 | export CRD_CHART=$(CRD_CHART) && \ 155 | cd $(ROOT_DIR)/test && $(GINKGO) $(ONLY_DEPLOY) -r -v ./e2e 156 | 157 | .PHONY: kind-e2e-tests 158 | kind-e2e-tests: docker-build-e2e setup-kind 159 | kind load docker-image --name $(CLUSTER_NAME) ${IMAGE} 160 | $(MAKE) e2e-tests 161 | 162 | kind-deploy-operator: 163 | ONLY_DEPLOY="--label-filter=\"do-nothing\"" $(MAKE) kind-e2e-tests 164 | 165 | .PHONY: docker-build 166 | docker-build-e2e: 167 | DOCKER_BUILDKIT=1 docker build \ 168 | -f test/e2e/Dockerfile.e2e \ 169 | --build-arg "TAG=${GIT_TAG}" \ 170 | --build-arg "COMMIT=${GIT_COMMIT}" \ 171 | --build-arg "COMMITDATE=${COMMITDATE}" \ 172 | -t ${IMAGE} . 173 | 174 | .PHOHY: delete-local-kind-cluster 175 | delete-local-kind-cluster: ## Delete the local kind cluster 176 | kind delete cluster --name=$(CLUSTER_NAME) 177 | 178 | APIDIFF_OLD_COMMIT ?= $(shell git rev-parse origin/main) 179 | 180 | .PHONY: apidiff 181 | apidiff: $(GO_APIDIFF) ## Check for API differences 182 | $(GO_APIDIFF) $(APIDIFF_OLD_COMMIT) --print-compatible 183 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Nightly e2e tests](https://github.com/rancher/gke-operator/actions/workflows/e2e-latest-rancher.yaml/badge.svg?branch=main)](https://github.com/rancher/gke-operator/actions/workflows/e2e-latest-rancher.yaml) 2 | 3 | # gke-operator 4 | 5 | The GKE operator is a controller for Kubernetes Custom Resource Definitions (CRDs) that manages cluster provisioning in Google Kubernetes Engine. It uses a GKEClusterConfig defined by a CRD. 6 | 7 | ## Build 8 | 9 | Operator binary can be built using the following command: 10 | 11 | ```sh 12 | make operator 13 | ``` 14 | 15 | ## Deploy operator from source 16 | 17 | You can use the following command to deploy a Kind cluster with Rancher manager and operator: 18 | 19 | ```sh 20 | make kind-deploy-operator 21 | ``` 22 | 23 | After this, you can also scale down operator deployment and run it from a local binary. 24 | 25 | ## Tests 26 | 27 | To run unit tests use the following command: 28 | 29 | ```sh 30 | make test 31 | ``` 32 | 33 | ## E2E 34 | 35 | We run e2e tests after every merged PR and periodically every 24 hours. They are triggered by a [Github action](https://github.com/rancher/gke-operator/blob/main/.github/workflows/e2e-latest-rancher.yaml) 36 | 37 | For running e2e tests: 38 | 39 | 1. Set `GKE_PROJECT_ID` and `GKE_CREDENTIALS` environment variables: 40 | 41 | ```sh 42 | export GKE_PROJECT_ID="replace-with-your-value" 43 | export GKE_CREDENTIALS=$( cat /path/to/gke-credentials.json ) 44 | ``` 45 | 46 | 2. and finally run: 47 | 48 | ```sh 49 | make kind-e2e-tests 50 | ``` 51 | 52 | This will setup a kind cluster locally, and the e2e tests will be run against where it will: 53 | 54 | * deploy rancher and cert-manager 55 | * deploy gke operator and operator CRD charts 56 | * create gke credentials secret 57 | * create a cluster in GKE 58 | * wait for cluster to be ready 59 | * clean up cluster 60 | 61 | 62 | Once e2e tests are completed, the local kind cluster can also be deleted by running: 63 | 64 | ```bash 65 | make delete-local-kind-cluster 66 | ``` 67 | 68 | ## Release 69 | 70 | ### When should I release? 71 | 72 | A KEv2 operator should be released if: 73 | 74 | * There have been several commits since the last release, 75 | * You need to pull in an update/bug fix/backend code to unblock UI for a feature enhancement in Rancher 76 | * The operator needs to be unRC for a Rancher release 77 | 78 | ### How do I release? 79 | 80 | Tag the latest commit on the `master` branch. For example, if latest tag is: 81 | * `v1.1.3-rc1` you should tag `v1.1.3-rc2`. 82 | * `v1.1.3` you should tag `v1.1.4-rc1`. 83 | 84 | ```bash 85 | # Get the latest upstream changes 86 | # Note: `upstream` must be the remote pointing to `git@github.com:rancher/gke-operator.git`. 87 | git pull upstream master --tags 88 | 89 | # Export the tag of the release to be cut, e.g.: 90 | export RELEASE_TAG=v1.1.3-rc2 91 | 92 | # Create tags locally 93 | git tag -s -a ${RELEASE_TAG} -m ${RELEASE_TAG} 94 | 95 | # Push tags 96 | # Note: `upstream` must be the remote pointing to `git@github.com:rancher/gke-operator.git`. 97 | git push upstream ${RELEASE_TAG} 98 | ``` 99 | 100 | After pushing the release tag, you need to run 2 Github Actions. You can find them in the Actions tab of the repo: 101 | 102 | * [Update GKE operator in rancher/rancher](https://github.com/rancher/gke-operator/actions/workflows/update-rancher-dep.yaml) - This action will update the GKE operator in rancher/rancher repo. It will bump go dependencies. 103 | * [Update GKE operator in rancher/charts](https://github.com/rancher/gke-operator/actions/workflows/update-rancher-charts.yaml) - This action will update the GKE operator in rancher/charts repo. It will bump the chart version. 104 | 105 | ### How do I unRC? 106 | 107 | unRC is the process of removing the rc from a KEv2 operator tag and means the released version is stable and ready for use. Release the KEv2 operator but instead of bumping the rc, remove the rc. For example, if the latest release of GKE operator is: 108 | * `v1.1.3-rc1`, release the next version without the rc which would be `v1.1.3`. 109 | * `v1.1.3`, that has no rc so release that version or `v1.1.4` if updates are available. 110 | -------------------------------------------------------------------------------- /charts/gke-operator-crd/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: rancher-gke-operator-crd 3 | description: GKE Operator CustomResourceDefinitions 4 | version: 999 5 | appVersion: 999 6 | annotations: 7 | catalog.cattle.io/certified: rancher 8 | catalog.cattle.io/namespace: cattle-system 9 | catalog.cattle.io/hidden: "true" 10 | catalog.cattle.io/release-name: rancher-gke-operator-crd 11 | catalog.cattle.io/os: linux 12 | catalog.cattle.io/permits-os: linux,windows 13 | 14 | -------------------------------------------------------------------------------- /charts/gke-operator-crd/templates/crds.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | annotations: 5 | helm.sh/resource-policy: keep 6 | name: gkeclusterconfigs.gke.cattle.io 7 | spec: 8 | group: gke.cattle.io 9 | names: 10 | kind: GKEClusterConfig 11 | plural: gkeclusterconfigs 12 | shortNames: 13 | - gkecc 14 | singular: gkeclusterconfig 15 | preserveUnknownFields: false 16 | scope: Namespaced 17 | versions: 18 | - name: v1 19 | schema: 20 | openAPIV3Schema: 21 | properties: 22 | spec: 23 | properties: 24 | autopilotConfig: 25 | nullable: true 26 | properties: 27 | enabled: 28 | type: boolean 29 | type: object 30 | clusterAddons: 31 | nullable: true 32 | properties: 33 | horizontalPodAutoscaling: 34 | type: boolean 35 | httpLoadBalancing: 36 | type: boolean 37 | networkPolicyConfig: 38 | type: boolean 39 | type: object 40 | clusterIpv4Cidr: 41 | nullable: true 42 | type: string 43 | clusterName: 44 | nullable: true 45 | type: string 46 | customerManagedEncryptionKey: 47 | nullable: true 48 | properties: 49 | keyName: 50 | nullable: true 51 | type: string 52 | ringName: 53 | nullable: true 54 | type: string 55 | type: object 56 | description: 57 | nullable: true 58 | type: string 59 | enableKubernetesAlpha: 60 | nullable: true 61 | type: boolean 62 | googleCredentialSecret: 63 | nullable: true 64 | type: string 65 | imported: 66 | type: boolean 67 | ipAllocationPolicy: 68 | nullable: true 69 | properties: 70 | clusterIpv4CidrBlock: 71 | nullable: true 72 | type: string 73 | clusterSecondaryRangeName: 74 | nullable: true 75 | type: string 76 | createSubnetwork: 77 | type: boolean 78 | nodeIpv4CidrBlock: 79 | nullable: true 80 | type: string 81 | servicesIpv4CidrBlock: 82 | nullable: true 83 | type: string 84 | servicesSecondaryRangeName: 85 | nullable: true 86 | type: string 87 | subnetworkName: 88 | nullable: true 89 | type: string 90 | useIpAliases: 91 | type: boolean 92 | type: object 93 | kubernetesVersion: 94 | nullable: true 95 | type: string 96 | labels: 97 | additionalProperties: 98 | nullable: true 99 | type: string 100 | nullable: true 101 | type: object 102 | locations: 103 | items: 104 | nullable: true 105 | type: string 106 | nullable: true 107 | type: array 108 | loggingService: 109 | nullable: true 110 | type: string 111 | maintenanceWindow: 112 | nullable: true 113 | type: string 114 | masterAuthorizedNetworks: 115 | nullable: true 116 | properties: 117 | cidrBlocks: 118 | items: 119 | properties: 120 | cidrBlock: 121 | nullable: true 122 | type: string 123 | displayName: 124 | nullable: true 125 | type: string 126 | type: object 127 | nullable: true 128 | type: array 129 | enabled: 130 | type: boolean 131 | type: object 132 | monitoringService: 133 | nullable: true 134 | type: string 135 | network: 136 | nullable: true 137 | type: string 138 | networkPolicyEnabled: 139 | nullable: true 140 | type: boolean 141 | nodePools: 142 | items: 143 | properties: 144 | autoscaling: 145 | nullable: true 146 | properties: 147 | enabled: 148 | type: boolean 149 | maxNodeCount: 150 | type: integer 151 | minNodeCount: 152 | type: integer 153 | type: object 154 | config: 155 | nullable: true 156 | properties: 157 | bootDiskKmsKey: 158 | nullable: true 159 | type: string 160 | diskSizeGb: 161 | type: integer 162 | diskType: 163 | nullable: true 164 | type: string 165 | imageType: 166 | nullable: true 167 | type: string 168 | labels: 169 | additionalProperties: 170 | nullable: true 171 | type: string 172 | nullable: true 173 | type: object 174 | localSsdCount: 175 | type: integer 176 | machineType: 177 | nullable: true 178 | type: string 179 | oauthScopes: 180 | items: 181 | nullable: true 182 | type: string 183 | nullable: true 184 | type: array 185 | preemptible: 186 | type: boolean 187 | serviceAccount: 188 | nullable: true 189 | type: string 190 | tags: 191 | items: 192 | nullable: true 193 | type: string 194 | nullable: true 195 | type: array 196 | taints: 197 | items: 198 | properties: 199 | effect: 200 | nullable: true 201 | type: string 202 | key: 203 | nullable: true 204 | type: string 205 | value: 206 | nullable: true 207 | type: string 208 | type: object 209 | nullable: true 210 | type: array 211 | type: object 212 | initialNodeCount: 213 | nullable: true 214 | type: integer 215 | management: 216 | nullable: true 217 | properties: 218 | autoRepair: 219 | type: boolean 220 | autoUpgrade: 221 | type: boolean 222 | type: object 223 | maxPodsConstraint: 224 | nullable: true 225 | type: integer 226 | name: 227 | nullable: true 228 | type: string 229 | version: 230 | nullable: true 231 | type: string 232 | type: object 233 | nullable: true 234 | type: array 235 | privateClusterConfig: 236 | nullable: true 237 | properties: 238 | enablePrivateEndpoint: 239 | type: boolean 240 | enablePrivateNodes: 241 | type: boolean 242 | masterIpv4CidrBlock: 243 | nullable: true 244 | type: string 245 | type: object 246 | projectID: 247 | nullable: true 248 | type: string 249 | region: 250 | nullable: true 251 | type: string 252 | subnetwork: 253 | nullable: true 254 | type: string 255 | zone: 256 | nullable: true 257 | type: string 258 | type: object 259 | status: 260 | properties: 261 | failureMessage: 262 | nullable: true 263 | type: string 264 | phase: 265 | nullable: true 266 | type: string 267 | type: object 268 | type: object 269 | served: true 270 | storage: true 271 | subresources: 272 | status: {} 273 | -------------------------------------------------------------------------------- /charts/gke-operator/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: rancher-gke-operator 3 | description: A Helm chart for provisioning GKE clusters 4 | home: https://github.com/rancher/gke-operator 5 | sources: 6 | - "https://github.com/rancher/gke-operator" 7 | version: 999 8 | appVersion: 999 9 | annotations: 10 | catalog.cattle.io/auto-install: rancher-gke-operator-crd=match 11 | catalog.cattle.io/certified: rancher 12 | catalog.cattle.io/hidden: "true" 13 | catalog.cattle.io/kube-version: ">= 1.18.0-0" 14 | catalog.cattle.io/namespace: cattle-system 15 | catalog.cattle.io/os: linux 16 | catalog.cattle.io/permits-os: linux,windows 17 | catalog.cattle.io/provides-gvr: gkeclusterconfigs.gke.cattle.io/v1 18 | catalog.cattle.io/rancher-version: ">= 2.6.0-alpha" 19 | catalog.cattle.io/release-name: rancher-gke-operator 20 | catalog.cattle.io/scope: management 21 | -------------------------------------------------------------------------------- /charts/gke-operator/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | You have deployed the Rancher GKE operator 2 | Version: {{ .Chart.AppVersion }} 3 | Description: This operator provisions GKE clusters 4 | from GKEClusterConfig CRs. 5 | -------------------------------------------------------------------------------- /charts/gke-operator/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | 3 | {{- define "system_default_registry" -}} 4 | {{- if .Values.global.cattle.systemDefaultRegistry -}} 5 | {{- printf "%s/" .Values.global.cattle.systemDefaultRegistry -}} 6 | {{- else -}} 7 | {{- "" -}} 8 | {{- end -}} 9 | {{- end -}} 10 | 11 | {{/* 12 | Windows cluster will add default taint for linux nodes, 13 | add below linux tolerations to workloads could be scheduled to those linux nodes 14 | */}} 15 | {{- define "linux-node-tolerations" -}} 16 | - key: "cattle.io/os" 17 | value: "linux" 18 | effect: "NoSchedule" 19 | operator: "Equal" 20 | {{- end -}} 21 | 22 | {{- define "linux-node-selector" -}} 23 | kubernetes.io/os: linux 24 | {{- end -}} 25 | 26 | -------------------------------------------------------------------------------- /charts/gke-operator/templates/clusterrole.yaml: -------------------------------------------------------------------------------- 1 | kind: ClusterRole 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: gke-operator 5 | namespace: cattle-system 6 | rules: 7 | - apiGroups: [''] 8 | resources: ['secrets'] 9 | verbs: ['get', 'list', 'create', 'watch'] 10 | - apiGroups: ['gke.cattle.io'] 11 | resources: ['gkeclusterconfigs'] 12 | verbs: ['get', 'list', 'update', 'watch'] 13 | - apiGroups: ['gke.cattle.io'] 14 | resources: ['gkeclusterconfigs/status'] 15 | verbs: ['update'] 16 | -------------------------------------------------------------------------------- /charts/gke-operator/templates/clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: gke-operator 5 | namespace: cattle-system 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: ClusterRole 9 | name: gke-operator 10 | subjects: 11 | - kind: ServiceAccount 12 | name: gke-operator 13 | namespace: cattle-system 14 | -------------------------------------------------------------------------------- /charts/gke-operator/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: gke-config-operator 5 | namespace: cattle-system 6 | spec: 7 | replicas: 1 8 | selector: 9 | matchLabels: 10 | ke.cattle.io/operator: gke 11 | template: 12 | metadata: 13 | labels: 14 | ke.cattle.io/operator: gke 15 | spec: 16 | nodeSelector: {{ include "linux-node-selector" . | nindent 8 }} 17 | {{- if .Values.nodeSelector }} 18 | {{ toYaml .Values.nodeSelector | indent 8 }} 19 | {{- end }} 20 | tolerations: {{ include "linux-node-tolerations" . | nindent 8 }} 21 | {{- if .Values.tolerations }} 22 | {{ toYaml .Values.tolerations | indent 8 }} 23 | {{- end }} 24 | serviceAccountName: gke-operator 25 | {{- if .Values.priorityClassName }} 26 | priorityClassName: "{{.Values.priorityClassName}}" 27 | {{- end }} 28 | securityContext: 29 | fsGroup: 1007 30 | runAsUser: 1007 31 | containers: 32 | - name: rancher-gke-operator 33 | image: '{{ template "system_default_registry" $ }}{{ $.Values.gkeOperator.image.repository }}:{{ $.Values.gkeOperator.image.tag }}' 34 | imagePullPolicy: IfNotPresent 35 | args: ["-debug={{ .Values.gkeOperator.debug | default false }}"] 36 | env: 37 | - name: HTTP_PROXY 38 | value: {{ .Values.httpProxy }} 39 | - name: HTTPS_PROXY 40 | value: {{ .Values.httpsProxy }} 41 | - name: NO_PROXY 42 | value: {{ .Values.noProxy }} 43 | securityContext: 44 | allowPrivilegeEscalation: false 45 | readOnlyRootFilesystem: true 46 | privileged: false 47 | capabilities: 48 | drop: 49 | - ALL 50 | {{- if .Values.additionalTrustedCAs }} 51 | # gke-operator mounts the additional CAs in two places: 52 | volumeMounts: 53 | # This directory is owned by the gke-operator user so c_rehash works here. 54 | - mountPath: /etc/rancher/ssl/ca-additional.pem 55 | name: tls-ca-additional-volume 56 | subPath: ca-additional.pem 57 | readOnly: true 58 | # This directory is root-owned so c_rehash doesn't work here, 59 | # but the cert is here in case update-ca-certificates is called in the future or by the OS. 60 | - mountPath: /etc/pki/trust/anchors/ca-additional.pem 61 | name: tls-ca-additional-volume 62 | subPath: ca-additional.pem 63 | readOnly: true 64 | volumes: 65 | - name: tls-ca-additional-volume 66 | secret: 67 | defaultMode: 0400 68 | secretName: tls-ca-additional 69 | {{- end }} 70 | -------------------------------------------------------------------------------- /charts/gke-operator/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | namespace: cattle-system 5 | name: gke-operator 6 | -------------------------------------------------------------------------------- /charts/gke-operator/values.yaml: -------------------------------------------------------------------------------- 1 | global: 2 | cattle: 3 | systemDefaultRegistry: "" 4 | 5 | gkeOperator: 6 | image: 7 | repository: rancher/gke-operator 8 | tag: v0.0.0 9 | debug: false 10 | 11 | httpProxy: "" 12 | httpsProxy: "" 13 | noProxy: "" 14 | additionalTrustedCAs: false 15 | ## Node labels for pod assignment 16 | ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ 17 | ## 18 | nodeSelector: {} 19 | ## List of node taints to tolerate (requires Kubernetes >= 1.6) 20 | tolerations: [] 21 | 22 | ## PriorityClassName assigned to deployment. 23 | priorityClassName: "" 24 | -------------------------------------------------------------------------------- /controller/external.go: -------------------------------------------------------------------------------- 1 | package controller 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "strings" 7 | 8 | gkev1 "github.com/rancher/gke-operator/pkg/apis/gke.cattle.io/v1" 9 | "github.com/rancher/gke-operator/pkg/gke" 10 | wranglerv1 "github.com/rancher/wrangler/v3/pkg/generated/controllers/core/v1" 11 | "golang.org/x/oauth2" 12 | gkeapi "google.golang.org/api/container/v1" 13 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 14 | ) 15 | 16 | func parseCredential(ref string) (namespace string, name string) { 17 | parts := strings.SplitN(ref, ":", 2) 18 | if len(parts) == 1 { 19 | return "", parts[0] 20 | } 21 | return parts[0], parts[1] 22 | } 23 | 24 | func GetSecret(_ context.Context, secretsClient wranglerv1.SecretClient, configSpec *gkev1.GKEClusterConfigSpec) (string, error) { 25 | ns, id := parseCredential(configSpec.GoogleCredentialSecret) 26 | secret, err := secretsClient.Get(ns, id, metav1.GetOptions{}) 27 | if err != nil { 28 | return "", err 29 | } 30 | dataBytes, ok := secret.Data["googlecredentialConfig-authEncodedJson"] 31 | if !ok { 32 | return "", fmt.Errorf("could not read malformed cloud credential secret %s from namespace %s", id, ns) 33 | } 34 | return string(dataBytes), nil 35 | } 36 | 37 | func GetCluster(ctx context.Context, secretsClient wranglerv1.SecretClient, configSpec *gkev1.GKEClusterConfigSpec) (*gkeapi.Cluster, error) { 38 | cred, err := GetSecret(ctx, secretsClient, configSpec) 39 | if err != nil { 40 | return nil, err 41 | } 42 | gkeClient, err := gke.GetGKEClusterClient(ctx, cred) 43 | if err != nil { 44 | return nil, err 45 | } 46 | return gke.GetCluster(ctx, gkeClient, configSpec) 47 | } 48 | 49 | func GetTokenSource(ctx context.Context, secretsClient wranglerv1.SecretClient, configSpec *gkev1.GKEClusterConfigSpec) (oauth2.TokenSource, error) { 50 | cred, err := GetSecret(ctx, secretsClient, configSpec) 51 | if err != nil { 52 | return nil, fmt.Errorf("error getting secret: %w", err) 53 | } 54 | ts, err := gke.GetTokenSource(ctx, cred) 55 | if err != nil { 56 | return nil, fmt.Errorf("error getting oauth2 token: %w", err) 57 | } 58 | return ts, nil 59 | } 60 | 61 | // BuildUpstreamClusterState creates an GKEClusterConfigSpec (spec for the GKE cluster state) from the existing 62 | // cluster configuration. 63 | func BuildUpstreamClusterState(ctx context.Context, secretsCache wranglerv1.SecretCache, secretClient wranglerv1.SecretClient, configSpec *gkev1.GKEClusterConfigSpec) (*gkev1.GKEClusterConfigSpec, error) { 64 | cred, err := GetSecret(ctx, secretClient, configSpec) 65 | if err != nil { 66 | return nil, err 67 | } 68 | gkeClient, err := gke.GetGKEClusterClient(ctx, cred) 69 | if err != nil { 70 | return nil, err 71 | } 72 | gkeCluster, err := gke.GetCluster(ctx, gkeClient, configSpec) 73 | if err != nil { 74 | return nil, err 75 | } 76 | 77 | h := Handler{ 78 | secretsCache: secretsCache, 79 | secrets: secretClient, 80 | } 81 | return h.buildUpstreamClusterState(gkeCluster) 82 | } 83 | -------------------------------------------------------------------------------- /controller/suite_test.go: -------------------------------------------------------------------------------- 1 | package controller 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | 7 | . "github.com/onsi/ginkgo/v2" 8 | . "github.com/onsi/gomega" 9 | gkev1 "github.com/rancher/gke-operator/pkg/generated/controllers/gke.cattle.io" 10 | "github.com/rancher/gke-operator/pkg/test" 11 | "github.com/rancher/wrangler/v3/pkg/generated/controllers/core" 12 | "k8s.io/client-go/rest" 13 | "sigs.k8s.io/controller-runtime/pkg/client" 14 | "sigs.k8s.io/controller-runtime/pkg/envtest" 15 | logf "sigs.k8s.io/controller-runtime/pkg/log" 16 | "sigs.k8s.io/controller-runtime/pkg/log/zap" 17 | ) 18 | 19 | var ( 20 | testEnv *envtest.Environment 21 | cfg *rest.Config 22 | cl client.Client 23 | coreFactory *core.Factory 24 | gkeFactory *gkev1.Factory 25 | 26 | ctx = context.Background() 27 | ) 28 | 29 | func TestAPIs(t *testing.T) { 30 | RegisterFailHandler(Fail) 31 | RunSpecs(t, "GKE Operator Suite") 32 | } 33 | 34 | var _ = BeforeSuite(func() { 35 | logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) 36 | 37 | By("bootstrapping test environment") 38 | var err error 39 | testEnv = &envtest.Environment{} 40 | cfg, cl, err = test.StartEnvTest(testEnv) 41 | Expect(err).NotTo(HaveOccurred()) 42 | Expect(cfg).NotTo(BeNil()) 43 | Expect(cl).NotTo(BeNil()) 44 | 45 | coreFactory, err = core.NewFactoryFromConfig(cfg) 46 | Expect(err).NotTo(HaveOccurred()) 47 | Expect(coreFactory).NotTo(BeNil()) 48 | 49 | gkeFactory, err = gkev1.NewFactoryFromConfig(cfg) 50 | Expect(err).NotTo(HaveOccurred()) 51 | Expect(gkeFactory).NotTo(BeNil()) 52 | }) 53 | 54 | var _ = AfterSuite(func() { 55 | By("tearing down the test environment") 56 | Expect(test.StopEnvTest(testEnv)).To(Succeed()) 57 | }) 58 | -------------------------------------------------------------------------------- /examples/cluster-autopilot.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: gke.cattle.io/v1 2 | kind: GKEClusterConfig 3 | metadata: 4 | name: example-autopilot-cluster 5 | spec: 6 | clusterName: "example-autopilot-cluster" 7 | description: "Example cluster with autopilot" 8 | labels: {} 9 | region: "us-east1" 10 | projectID: "example-project" 11 | kubernetesVersion: "1.27.3-gke.100" 12 | loggingService: "" 13 | monitoringService: "" 14 | enableKubernetesAlpha: false 15 | clusterIpv4Cidr: "10.43.0.0/16" 16 | ipAllocationPolicy: 17 | useIpAliases: true 18 | clusterAddons: 19 | httpLoadBalancing: true 20 | networkPolicyConfig: false 21 | horizontalPodAutoscaling: true 22 | networkPolicyEnabled: false 23 | network: default 24 | subnetwork: default 25 | privateClusterConfig: 26 | enablePrivateEndpoint: false 27 | enablePrivateNodes: false 28 | masterAuthorizedNetworks: 29 | enabled: false 30 | locations: [] 31 | maintenanceWindow: "" 32 | googleCredentialSecret: "cattle-global-data:cc-gqpl4" 33 | autopilotConfig: 34 | enabled: true 35 | -------------------------------------------------------------------------------- /examples/cluster-basic.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: gke.cattle.io/v1 2 | kind: GKEClusterConfig 3 | metadata: 4 | name: example-cluster 5 | spec: 6 | clusterName: "example-cluster" 7 | description: "example cluster" 8 | labels: {} 9 | region: "us-west1" 10 | projectID: "example-project" 11 | kubernetesVersion: "1.19.10-gke.1600" 12 | loggingService: "" 13 | monitoringService: "" 14 | enableKubernetesAlpha: false 15 | clusterIpv4Cidr: "10.42.0.0/16" 16 | ipAllocationPolicy: 17 | useIpAliases: true 18 | nodePools: 19 | - name: example-node-pool 20 | autoscaling: 21 | enabled: false 22 | config: {} 23 | labels: [] 24 | initialNodeCount: 1 25 | maxPodsConstraint: 110 26 | version: "1.19.10-gke.1600" 27 | management: 28 | autoRepair: true 29 | autoUpgrade: true 30 | clusterAddons: 31 | httpLoadBalancing: true 32 | networkPolicyConfig: false 33 | horizontalPodAutoscaling: true 34 | networkPolicyEnabled: false 35 | network: default 36 | subnetwork: default 37 | privateClusterConfig: 38 | enablePrivateEndpoint: false 39 | enablePrivateNodes: false 40 | masterAuthorizedNetworks: 41 | enabled: false 42 | locations: [] 43 | maintenanceWindow: "" 44 | googleCredentialSecret: "cattle-global-data:cc-abcde" 45 | -------------------------------------------------------------------------------- /examples/cluster-full.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: gke.cattle.io/v1 2 | kind: GKEClusterConfig 3 | metadata: 4 | name: example-cluster-b 5 | spec: 6 | clusterName: "example-cluster-b" 7 | description: "complex example cluster" 8 | labels: 9 | foo: bar 10 | zone: "us-west1-a" 11 | projectID: "example-project" 12 | kubernetesVersion: "1.19.10-gke.1600" 13 | loggingService: "none" 14 | monitoringService: "none" 15 | enableKubernetesAlpha: true 16 | clusterIpv4Cidr: "" 17 | ipAllocationPolicy: 18 | createSubnetwork: true 19 | subnetworkName: "new-example-subnet-for-cluster-b" 20 | clusterIpv4CidrBlock: "10.100.0.0/16" 21 | nodeIpv4CidrBlock: "10.101.0.0/16" 22 | servicesIpv4CidrBlock: "10.102.0.0/16" 23 | useIpAliases: true 24 | nodePools: 25 | - name: example-node-pool-1 26 | autoscaling: 27 | enabled: true 28 | maxNodeCount: 3 29 | minNodeCount: 1 30 | config: 31 | diskSizeGb: 50 32 | diskType: "pd-standard" 33 | imageType: "cos_containerd" 34 | localSsdCount: 1 35 | labels: 36 | color: blue 37 | shape: square 38 | machineType: "n1-standard-1" 39 | preemptible: true 40 | oauthScopes: 41 | - "https://www.googleapis.com/auth/compute" 42 | - "https://www.googleapis.com/auth/devstorage.read_only" 43 | - "https://www.googleapis.com/auth/cloud-platform" 44 | tags: 45 | - "red" 46 | - "blue" 47 | taints: 48 | - effect: NO_SCHEDULE 49 | key: group 50 | value: examples 51 | labels: 52 | - "foo" 53 | - "bar" 54 | initialNodeCount: 3 55 | maxPodsConstraint: 55 56 | version: "1.19.10-gke.1600" 57 | management: 58 | autoRepair: false 59 | autoUpgrade: false 60 | - name: example-node-pool-2 61 | autoscaling: 62 | enabled: true 63 | maxNodeCount: 4 64 | minNodeCount: 2 65 | config: 66 | diskSizeGb: 100 67 | diskType: "pd-standard" 68 | imageType: "cos" 69 | localSsdCount: 0 70 | labels: {} 71 | machineType: "" 72 | preemptible: false 73 | oauthScopes: [] 74 | taints: [] 75 | tags: 76 | - "green" 77 | - "yellow" 78 | labels: 79 | - "one" 80 | - "two" 81 | initialNodeCount: 3 82 | maxPodsConstraint: 110 83 | version: "1.19.10-gke.1600" 84 | management: 85 | autoRepair: false 86 | autoUpgrade: false 87 | clusterAddons: 88 | httpLoadBalancing: false 89 | networkPolicyConfig: true 90 | horizontalPodAutoscaling: false 91 | networkPolicyEnabled: true 92 | network: example-network 93 | subnetwork: "" 94 | privateClusterConfig: 95 | enablePrivateEndpoint: false 96 | enablePrivateNodes: true 97 | masterIpv4CidrBlock: "10.77.27.0/28" 98 | masterAuthorizedNetworks: 99 | enabled: true 100 | cidrBlocks: 101 | - displayName: office-net 102 | cidrBlock: "10.42.42.0/24" 103 | locations: 104 | - us-west1-a 105 | - us-west1-b 106 | - us-west1-c 107 | maintenanceWindow: "00:00" 108 | googleCredentialSecret: "cattle-global-data:cc-abcde" 109 | -------------------------------------------------------------------------------- /examples/cluster-registered.json: -------------------------------------------------------------------------------- 1 | { 2 | "clusterName": "example-cluster", 3 | "googleCredentialSecret": "cattle-global-data:cc-abcde", 4 | "projectID": "example-project", 5 | "region": "us-west1", 6 | "zone": "", 7 | "imported": true 8 | } 9 | -------------------------------------------------------------------------------- /examples/cluster-registered.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: gke.cattle.io/v1 2 | kind: GKEClusterConfig 3 | metadata: 4 | name: example-registered-cluster 5 | spec: 6 | clusterName: "example-registered-cluster" 7 | region: "us-west1" 8 | projectID: "example-project" 9 | imported: true 10 | googleCredentialSecret: "cattle-global-data:cc-abcde" 11 | -------------------------------------------------------------------------------- /examples/cluster.json: -------------------------------------------------------------------------------- 1 | { 2 | "clusterAddons": { 3 | "horizontalPodAutoscaling": true, 4 | "httpLoadBalancing": true, 5 | "networkPolicyConfig": false 6 | }, 7 | "clusterIpv4Cidr": "", 8 | "clusterName": "example-cluster", 9 | "googleCredentialSecret": "cattle-global-data:cc-abcde", 10 | "description": "GKEConfig JSON cluster", 11 | "enableKubernetesAlpha": false, 12 | "network": "default", 13 | "subnetwork": "default", 14 | "imported": false, 15 | "ipAllocationPolicy": { 16 | "useIpAliases": true 17 | }, 18 | "kubernetesVersion": "1.19.10-gke.1600", 19 | "loggingService": "", 20 | "masterAuthorizedNetworks": { 21 | "enabled": false 22 | }, 23 | "monitoringService": "", 24 | "networkPolicyEnabled": false, 25 | "nodePools": [ 26 | { 27 | "autoscaling": { 28 | "enabled": false 29 | }, 30 | "config": {}, 31 | "initialNodeCount": 1, 32 | "labels": [], 33 | "maxPodsConstraint": 110, 34 | "name": "example-node-pool", 35 | "version": "1.19.10-gke.1600", 36 | "management": {} 37 | } 38 | ], 39 | "privateClusterConfig": { 40 | "enablePrivateEndpoint": false, 41 | "enablePrivateNodes": false 42 | }, 43 | "projectID": "example-project", 44 | "region": "us-west1", 45 | "zone": "", 46 | "locations": [], 47 | "maintenanceWindow": "", 48 | "labels": {} 49 | } 50 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/rancher/gke-operator 2 | 3 | go 1.23.0 4 | 5 | toolchain go1.23.6 6 | 7 | replace ( 8 | k8s.io/client-go => k8s.io/client-go v0.32.1 9 | k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 10 | ) 11 | 12 | require ( 13 | github.com/Masterminds/semver/v3 v3.3.0 14 | github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46 15 | github.com/golang/mock v1.6.0 16 | github.com/onsi/ginkgo/v2 v2.23.4 17 | github.com/onsi/gomega v1.37.0 18 | github.com/pkg/errors v0.9.1 19 | github.com/rancher-sandbox/ele-testhelpers v0.0.0-20231206161614-20a517410736 20 | github.com/rancher/lasso v0.2.2 21 | github.com/rancher/rancher/pkg/apis v0.0.0-20240821150307-952f563826f5 22 | github.com/rancher/wrangler-api v0.6.1-0.20200427172631-a7c2f09b783e 23 | github.com/rancher/wrangler/v3 v3.2.0-rc.3 24 | github.com/sirupsen/logrus v1.9.3 25 | golang.org/x/net v0.40.0 26 | golang.org/x/oauth2 v0.30.0 27 | google.golang.org/api v0.234.0 28 | k8s.io/api v0.32.1 29 | k8s.io/apiextensions-apiserver v0.32.1 30 | k8s.io/apimachinery v0.32.1 31 | k8s.io/apiserver v0.32.1 32 | k8s.io/client-go v12.0.0+incompatible 33 | sigs.k8s.io/controller-runtime v0.19.4 34 | sigs.k8s.io/yaml v1.4.0 35 | ) 36 | 37 | require ( 38 | cloud.google.com/go/auth v0.16.1 // indirect 39 | cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect 40 | cloud.google.com/go/compute/metadata v0.7.0 // indirect 41 | github.com/beorn7/perks v1.0.1 // indirect 42 | github.com/blang/semver/v4 v4.0.0 // indirect 43 | github.com/cespare/xxhash/v2 v2.3.0 // indirect 44 | github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect 45 | github.com/emicklei/go-restful/v3 v3.12.1 // indirect 46 | github.com/evanphx/json-patch v5.9.11+incompatible // indirect 47 | github.com/evanphx/json-patch/v5 v5.9.0 // indirect 48 | github.com/felixge/httpsnoop v1.0.4 // indirect 49 | github.com/fxamacker/cbor/v2 v2.7.0 // indirect 50 | github.com/ghodss/yaml v1.0.0 // indirect 51 | github.com/go-logr/logr v1.4.2 // indirect 52 | github.com/go-logr/stdr v1.2.2 // indirect 53 | github.com/go-logr/zapr v1.3.0 // indirect 54 | github.com/go-openapi/jsonpointer v0.21.0 // indirect 55 | github.com/go-openapi/jsonreference v0.21.0 // indirect 56 | github.com/go-openapi/swag v0.23.0 // indirect 57 | github.com/go-task/slim-sprig/v3 v3.0.0 // indirect 58 | github.com/gogo/protobuf v1.3.2 // indirect 59 | github.com/golang/protobuf v1.5.4 // indirect 60 | github.com/google/gnostic-models v0.6.9 // indirect 61 | github.com/google/go-cmp v0.7.0 // indirect 62 | github.com/google/gofuzz v1.2.0 // indirect 63 | github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect 64 | github.com/google/s2a-go v0.1.9 // indirect 65 | github.com/google/uuid v1.6.0 // indirect 66 | github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect 67 | github.com/googleapis/gax-go/v2 v2.14.2 // indirect 68 | github.com/inconshreveable/mousetrap v1.1.0 // indirect 69 | github.com/josharian/intern v1.0.0 // indirect 70 | github.com/json-iterator/go v1.1.12 // indirect 71 | github.com/klauspost/compress v1.17.9 // indirect 72 | github.com/mailru/easyjson v0.7.7 // indirect 73 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 74 | github.com/modern-go/reflect2 v1.0.2 // indirect 75 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 76 | github.com/prometheus/client_golang v1.20.5 // indirect 77 | github.com/prometheus/client_model v0.6.1 // indirect 78 | github.com/prometheus/common v0.55.0 // indirect 79 | github.com/prometheus/procfs v0.15.1 // indirect 80 | github.com/rancher/aks-operator v1.9.1 // indirect 81 | github.com/rancher/eks-operator v1.9.1 // indirect 82 | github.com/rancher/fleet/pkg/apis v0.10.0 // indirect 83 | github.com/rancher/norman v0.0.0-20240708202514-a0127673d1b9 // indirect 84 | github.com/rancher/rke v1.6.0 // indirect 85 | github.com/rancher/wrangler v1.1.1 // indirect 86 | github.com/spf13/cobra v1.8.1 // indirect 87 | github.com/spf13/pflag v1.0.5 // indirect 88 | github.com/x448/float16 v0.8.4 // indirect 89 | go.opentelemetry.io/auto/sdk v1.1.0 // indirect 90 | go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect 91 | go.opentelemetry.io/otel v1.35.0 // indirect 92 | go.opentelemetry.io/otel/metric v1.35.0 // indirect 93 | go.opentelemetry.io/otel/trace v1.35.0 // indirect 94 | go.uber.org/automaxprocs v1.6.0 // indirect 95 | go.uber.org/multierr v1.11.0 // indirect 96 | go.uber.org/zap v1.27.0 // indirect 97 | golang.org/x/crypto v0.38.0 // indirect 98 | golang.org/x/mod v0.24.0 // indirect 99 | golang.org/x/sync v0.14.0 // indirect 100 | golang.org/x/sys v0.33.0 // indirect 101 | golang.org/x/term v0.32.0 // indirect 102 | golang.org/x/text v0.25.0 // indirect 103 | golang.org/x/time v0.11.0 // indirect 104 | golang.org/x/tools v0.31.0 // indirect 105 | google.golang.org/genproto/googleapis/rpc v0.0.0-20250512202823-5a2f75b736a9 // indirect 106 | google.golang.org/grpc v1.72.1 // indirect 107 | google.golang.org/protobuf v1.36.6 // indirect 108 | gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect 109 | gopkg.in/inf.v0 v0.9.1 // indirect 110 | gopkg.in/yaml.v2 v2.4.0 // indirect 111 | gopkg.in/yaml.v3 v3.0.1 // indirect 112 | k8s.io/code-generator v0.32.1 // indirect 113 | k8s.io/component-base v0.32.1 // indirect 114 | k8s.io/gengo v0.0.0-20250130153323-76c5745d3511 // indirect 115 | k8s.io/gengo/v2 v2.0.0-20240911193312-2b36238f13e9 // indirect 116 | k8s.io/klog/v2 v2.130.1 // indirect 117 | k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect 118 | k8s.io/kubernetes v1.30.10 // indirect 119 | k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect 120 | sigs.k8s.io/cli-utils v0.37.2 // indirect 121 | sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect 122 | sigs.k8s.io/structured-merge-diff/v4 v4.4.3 // indirect 123 | ) 124 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | //go:generate go run pkg/codegen/cleanup/main.go 2 | //go:generate go run pkg/codegen/main.go 3 | 4 | package main 5 | 6 | import ( 7 | "flag" 8 | 9 | "github.com/rancher/gke-operator/controller" 10 | gkev1 "github.com/rancher/gke-operator/pkg/generated/controllers/gke.cattle.io" 11 | core3 "github.com/rancher/wrangler/v3/pkg/generated/controllers/core" 12 | "github.com/rancher/wrangler/v3/pkg/kubeconfig" 13 | "github.com/rancher/wrangler/v3/pkg/signals" 14 | "github.com/rancher/wrangler/v3/pkg/start" 15 | "github.com/sirupsen/logrus" 16 | ) 17 | 18 | var ( 19 | masterURL string 20 | kubeconfigFile string 21 | debug bool 22 | ) 23 | 24 | func init() { 25 | flag.StringVar(&kubeconfigFile, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.") 26 | flag.StringVar(&masterURL, "master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.") 27 | flag.BoolVar(&debug, "debug", false, "Enable debug logs.") 28 | flag.Parse() 29 | } 30 | 31 | func main() { 32 | if debug { 33 | logrus.SetLevel(logrus.DebugLevel) 34 | logrus.Debugf("Loglevel set to [%v]", logrus.DebugLevel) 35 | } 36 | 37 | // set up signals so we handle the first shutdown signal gracefully 38 | ctx := signals.SetupSignalContext() 39 | 40 | // This will load the kubeconfig file in a style the same as kubectl 41 | cfg, err := kubeconfig.GetNonInteractiveClientConfig(kubeconfigFile).ClientConfig() 42 | if err != nil { 43 | logrus.Fatalf("Error building kubeconfig: %s", err.Error()) 44 | } 45 | 46 | // core 47 | core, err := core3.NewFactoryFromConfig(cfg) 48 | if err != nil { 49 | logrus.Fatalf("Error building core factory: %s", err.Error()) 50 | } 51 | 52 | // Generated sample controller 53 | gke, err := gkev1.NewFactoryFromConfig(cfg) 54 | if err != nil { 55 | logrus.Fatalf("Error building gke factory: %s", err.Error()) 56 | } 57 | 58 | // The typical pattern is to build all your controller/clients then just pass to each handler 59 | // the bare minimum of what they need. This will eventually help with writing tests. So 60 | // don't pass in something like kubeClient, apps, or sample 61 | controller.Register(ctx, 62 | core.Core().V1().Secret(), 63 | gke.Gke().V1().GKEClusterConfig()) 64 | 65 | // Start all the controllers 66 | if err := start.All(ctx, 3, gke, core); err != nil { 67 | logrus.Fatalf("Error starting: %s", err.Error()) 68 | } 69 | 70 | <-ctx.Done() 71 | } 72 | -------------------------------------------------------------------------------- /package/Dockerfile: -------------------------------------------------------------------------------- 1 | # Image that provides cross compilation tooling. 2 | FROM --platform=$BUILDPLATFORM rancher/mirrored-tonistiigi-xx:1.5.0 AS xx 3 | 4 | FROM registry.suse.com/bci/bci-base:15.6 AS base 5 | RUN sed -i 's/^CREATE_MAIL_SPOOL=yes/CREATE_MAIL_SPOOL=no/' /etc/default/useradd 6 | RUN useradd --uid 1007 gke-operator 7 | 8 | FROM --platform=$BUILDPLATFORM registry.suse.com/bci/golang:1.23 AS builder 9 | 10 | WORKDIR /app 11 | COPY go.mod go.sum ./ 12 | RUN go mod download && go mod verify 13 | 14 | COPY ./controller ./controller 15 | COPY ./pkg ./pkg 16 | COPY ./main.go ./main.go 17 | 18 | # Copy xx scripts to your build stage 19 | COPY --from=xx / / 20 | 21 | ARG TARGETPLATFORM 22 | ARG COMMIT 23 | ARG VERSION 24 | ENV CGO_ENABLED=0 25 | RUN xx-go build -ldflags \ 26 | "-X github.com/rancher/gke-operator/pkg/version.GitCommit=${COMMIT} \ 27 | -X github.com/rancher/gke-operator/pkg/version.Version=${VERSION}" \ 28 | -o /gke-operator && \ 29 | xx-verify /gke-operator 30 | 31 | FROM registry.suse.com/bci/bci-micro:15.6 32 | COPY --from=base /etc/passwd /etc/passwd 33 | COPY --from=base /etc/shadow /etc/shadow 34 | COPY --from=builder /gke-operator /usr/bin/gke-operator 35 | 36 | RUN rm -rf /tmp/* /var/tmp/* /usr/share/doc/packages/* 37 | 38 | ENV KUBECONFIG="/home/gke-operator/.kube/config" 39 | ENV SSL_CERT_DIR="/etc/rancher/ssl" 40 | 41 | COPY package/entrypoint.sh /usr/bin 42 | RUN chmod +x /usr/bin/entrypoint.sh 43 | 44 | RUN mkdir -p /etc/rancher/ssl && \ 45 | chown -R gke-operator /etc/rancher/ssl 46 | 47 | USER 1007 48 | ENTRYPOINT ["entrypoint.sh"] 49 | -------------------------------------------------------------------------------- /package/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | if [ -x "$(command -v c_rehash)" ]; then 5 | # c_rehash is run here instead of update-ca-certificates because the latter requires root privileges 6 | # and the gke-operator container is run as non-root user. 7 | c_rehash 8 | fi 9 | gke-operator -------------------------------------------------------------------------------- /pkg/apis/gke.cattle.io/v1/doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2019 Wrangler Sample Controller Authors 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Code generated by main. DO NOT EDIT. 18 | 19 | // +k8s:deepcopy-gen=package 20 | // +groupName=gke.cattle.io 21 | package v1 22 | -------------------------------------------------------------------------------- /pkg/apis/gke.cattle.io/v1/zz_generated_list_types.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2019 Wrangler Sample Controller Authors 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Code generated by main. DO NOT EDIT. 18 | 19 | // +k8s:deepcopy-gen=package 20 | // +groupName=gke.cattle.io 21 | package v1 22 | 23 | import ( 24 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 25 | ) 26 | 27 | // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object 28 | 29 | // GKEClusterConfigList is a list of GKEClusterConfig resources 30 | type GKEClusterConfigList struct { 31 | metav1.TypeMeta `json:",inline"` 32 | metav1.ListMeta `json:"metadata"` 33 | 34 | Items []GKEClusterConfig `json:"items"` 35 | } 36 | 37 | func NewGKEClusterConfig(namespace, name string, obj GKEClusterConfig) *GKEClusterConfig { 38 | obj.APIVersion, obj.Kind = SchemeGroupVersion.WithKind("GKEClusterConfig").ToAPIVersionAndKind() 39 | obj.Name = name 40 | obj.Namespace = namespace 41 | return &obj 42 | } 43 | -------------------------------------------------------------------------------- /pkg/apis/gke.cattle.io/v1/zz_generated_register.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2019 Wrangler Sample Controller Authors 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Code generated by main. DO NOT EDIT. 18 | 19 | // +k8s:deepcopy-gen=package 20 | // +groupName=gke.cattle.io 21 | package v1 22 | 23 | import ( 24 | gke "github.com/rancher/gke-operator/pkg/apis/gke.cattle.io" 25 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 26 | "k8s.io/apimachinery/pkg/runtime" 27 | "k8s.io/apimachinery/pkg/runtime/schema" 28 | ) 29 | 30 | var ( 31 | GKEClusterConfigResourceName = "gkeclusterconfigs" 32 | ) 33 | 34 | // SchemeGroupVersion is group version used to register these objects 35 | var SchemeGroupVersion = schema.GroupVersion{Group: gke.GroupName, Version: "v1"} 36 | 37 | // Kind takes an unqualified kind and returns back a Group qualified GroupKind 38 | func Kind(kind string) schema.GroupKind { 39 | return SchemeGroupVersion.WithKind(kind).GroupKind() 40 | } 41 | 42 | // Resource takes an unqualified resource and returns a Group qualified GroupResource 43 | func Resource(resource string) schema.GroupResource { 44 | return SchemeGroupVersion.WithResource(resource).GroupResource() 45 | } 46 | 47 | var ( 48 | SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) 49 | AddToScheme = SchemeBuilder.AddToScheme 50 | ) 51 | 52 | // Adds the list of known types to Scheme. 53 | func addKnownTypes(scheme *runtime.Scheme) error { 54 | scheme.AddKnownTypes(SchemeGroupVersion, 55 | &GKEClusterConfig{}, 56 | &GKEClusterConfigList{}, 57 | ) 58 | metav1.AddToGroupVersion(scheme, SchemeGroupVersion) 59 | return nil 60 | } 61 | -------------------------------------------------------------------------------- /pkg/apis/gke.cattle.io/zz_generated_register.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2019 Wrangler Sample Controller Authors 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Code generated by main. DO NOT EDIT. 18 | 19 | package gke 20 | 21 | const ( 22 | // Package-wide consts from generator "zz_generated_register". 23 | GroupName = "gke.cattle.io" 24 | ) 25 | -------------------------------------------------------------------------------- /pkg/codegen/boilerplate.go.txt: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2019 Wrangler Sample Controller Authors 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | -------------------------------------------------------------------------------- /pkg/codegen/cleanup/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "os" 5 | 6 | "github.com/rancher/wrangler/v3/pkg/cleanup" 7 | "github.com/sirupsen/logrus" 8 | ) 9 | 10 | func main() { 11 | if err := cleanup.Cleanup("./pkg/apis"); err != nil { 12 | logrus.Fatal(err) 13 | } 14 | if err := os.RemoveAll("./pkg/generated"); err != nil { 15 | logrus.Fatal(err) 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /pkg/codegen/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | 7 | gkev1 "github.com/rancher/gke-operator/pkg/apis/gke.cattle.io/v1" 8 | _ "github.com/rancher/wrangler-api/pkg/generated/controllers/apiextensions.k8s.io" 9 | controllergen "github.com/rancher/wrangler/v3/pkg/controller-gen" 10 | "github.com/rancher/wrangler/v3/pkg/controller-gen/args" 11 | "github.com/rancher/wrangler/v3/pkg/crd" 12 | "github.com/rancher/wrangler/v3/pkg/yaml" 13 | corev1 "k8s.io/api/core/v1" 14 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 15 | "k8s.io/apimachinery/pkg/runtime/schema" 16 | ) 17 | 18 | func main() { 19 | os.Unsetenv("GOPATH") 20 | 21 | controllergen.Run(args.Options{ 22 | OutputPackage: "github.com/rancher/gke-operator/pkg/generated", 23 | Boilerplate: "pkg/codegen/boilerplate.go.txt", 24 | Groups: map[string]args.Group{ 25 | "gke.cattle.io": { 26 | Types: []interface{}{ 27 | "./pkg/apis/gke.cattle.io/v1", 28 | }, 29 | GenerateTypes: true, 30 | }, 31 | corev1.GroupName: { 32 | Types: []interface{}{ 33 | corev1.Pod{}, 34 | corev1.Node{}, 35 | corev1.Secret{}, 36 | }, 37 | }, 38 | }, 39 | }) 40 | 41 | gkeClusterConfig := newCRD(&gkev1.GKEClusterConfig{}, func(c crd.CRD) crd.CRD { 42 | c.ShortNames = []string{"gkecc"} 43 | return c 44 | }) 45 | 46 | obj, err := gkeClusterConfig.ToCustomResourceDefinition() 47 | if err != nil { 48 | panic(err) 49 | } 50 | 51 | obj.(*unstructured.Unstructured).SetAnnotations(map[string]string{ 52 | "helm.sh/resource-policy": "keep", 53 | }) 54 | 55 | gkeCCYaml, err := yaml.Export(obj) 56 | if err != nil { 57 | panic(err) 58 | } 59 | 60 | if err := saveCRDYaml("gke-operator-crd", string(gkeCCYaml)); err != nil { 61 | panic(err) 62 | } 63 | 64 | fmt.Printf("obj yaml: %s", gkeCCYaml) 65 | } 66 | 67 | func newCRD(obj interface{}, customize func(crd.CRD) crd.CRD) crd.CRD { 68 | crd := crd.CRD{ 69 | GVK: schema.GroupVersionKind{ 70 | Group: "gke.cattle.io", 71 | Version: "v1", 72 | }, 73 | Status: true, 74 | SchemaObject: obj, 75 | } 76 | if customize != nil { 77 | crd = customize(crd) 78 | } 79 | return crd 80 | } 81 | 82 | func saveCRDYaml(name, yaml string) error { 83 | filename := fmt.Sprintf("./charts/%s/templates/crds.yaml", name) 84 | save, err := os.Create(filename) 85 | if err != nil { 86 | return err 87 | } 88 | 89 | defer save.Close() 90 | if err := save.Chmod(0755); err != nil { 91 | return err 92 | } 93 | 94 | if _, err := fmt.Fprint(save, yaml); err != nil { 95 | return err 96 | } 97 | 98 | return nil 99 | } 100 | -------------------------------------------------------------------------------- /pkg/generated/controllers/core/factory.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2019 Wrangler Sample Controller Authors 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Code generated by main. DO NOT EDIT. 18 | 19 | package core 20 | 21 | import ( 22 | "github.com/rancher/lasso/pkg/controller" 23 | "github.com/rancher/wrangler/v3/pkg/generic" 24 | "k8s.io/client-go/rest" 25 | ) 26 | 27 | type Factory struct { 28 | *generic.Factory 29 | } 30 | 31 | func NewFactoryFromConfigOrDie(config *rest.Config) *Factory { 32 | f, err := NewFactoryFromConfig(config) 33 | if err != nil { 34 | panic(err) 35 | } 36 | return f 37 | } 38 | 39 | func NewFactoryFromConfig(config *rest.Config) (*Factory, error) { 40 | return NewFactoryFromConfigWithOptions(config, nil) 41 | } 42 | 43 | func NewFactoryFromConfigWithNamespace(config *rest.Config, namespace string) (*Factory, error) { 44 | return NewFactoryFromConfigWithOptions(config, &FactoryOptions{ 45 | Namespace: namespace, 46 | }) 47 | } 48 | 49 | type FactoryOptions = generic.FactoryOptions 50 | 51 | func NewFactoryFromConfigWithOptions(config *rest.Config, opts *FactoryOptions) (*Factory, error) { 52 | f, err := generic.NewFactoryFromConfigWithOptions(config, opts) 53 | return &Factory{ 54 | Factory: f, 55 | }, err 56 | } 57 | 58 | func NewFactoryFromConfigWithOptionsOrDie(config *rest.Config, opts *FactoryOptions) *Factory { 59 | f, err := NewFactoryFromConfigWithOptions(config, opts) 60 | if err != nil { 61 | panic(err) 62 | } 63 | return f 64 | } 65 | 66 | func (c *Factory) Core() Interface { 67 | return New(c.ControllerFactory()) 68 | } 69 | 70 | func (c *Factory) WithAgent(userAgent string) Interface { 71 | return New(controller.NewSharedControllerFactoryWithAgent(userAgent, c.ControllerFactory())) 72 | } 73 | -------------------------------------------------------------------------------- /pkg/generated/controllers/core/interface.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2019 Wrangler Sample Controller Authors 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Code generated by main. DO NOT EDIT. 18 | 19 | package core 20 | 21 | import ( 22 | v1 "github.com/rancher/gke-operator/pkg/generated/controllers/core/v1" 23 | "github.com/rancher/lasso/pkg/controller" 24 | ) 25 | 26 | type Interface interface { 27 | V1() v1.Interface 28 | } 29 | 30 | type group struct { 31 | controllerFactory controller.SharedControllerFactory 32 | } 33 | 34 | // New returns a new Interface. 35 | func New(controllerFactory controller.SharedControllerFactory) Interface { 36 | return &group{ 37 | controllerFactory: controllerFactory, 38 | } 39 | } 40 | 41 | func (g *group) V1() v1.Interface { 42 | return v1.New(g.controllerFactory) 43 | } 44 | -------------------------------------------------------------------------------- /pkg/generated/controllers/core/v1/interface.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2019 Wrangler Sample Controller Authors 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Code generated by main. DO NOT EDIT. 18 | 19 | package v1 20 | 21 | import ( 22 | "github.com/rancher/lasso/pkg/controller" 23 | "github.com/rancher/wrangler/v3/pkg/generic" 24 | "github.com/rancher/wrangler/v3/pkg/schemes" 25 | v1 "k8s.io/api/core/v1" 26 | "k8s.io/apimachinery/pkg/runtime/schema" 27 | ) 28 | 29 | func init() { 30 | schemes.Register(v1.AddToScheme) 31 | } 32 | 33 | type Interface interface { 34 | Node() NodeController 35 | Pod() PodController 36 | Secret() SecretController 37 | } 38 | 39 | func New(controllerFactory controller.SharedControllerFactory) Interface { 40 | return &version{ 41 | controllerFactory: controllerFactory, 42 | } 43 | } 44 | 45 | type version struct { 46 | controllerFactory controller.SharedControllerFactory 47 | } 48 | 49 | func (v *version) Node() NodeController { 50 | return generic.NewNonNamespacedController[*v1.Node, *v1.NodeList](schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Node"}, "nodes", v.controllerFactory) 51 | } 52 | 53 | func (v *version) Pod() PodController { 54 | return generic.NewController[*v1.Pod, *v1.PodList](schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"}, "pods", true, v.controllerFactory) 55 | } 56 | 57 | func (v *version) Secret() SecretController { 58 | return generic.NewController[*v1.Secret, *v1.SecretList](schema.GroupVersionKind{Group: "", Version: "v1", Kind: "Secret"}, "secrets", true, v.controllerFactory) 59 | } 60 | -------------------------------------------------------------------------------- /pkg/generated/controllers/core/v1/node.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2019 Wrangler Sample Controller Authors 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Code generated by main. DO NOT EDIT. 18 | 19 | package v1 20 | 21 | import ( 22 | "context" 23 | "sync" 24 | "time" 25 | 26 | "github.com/rancher/wrangler/v3/pkg/apply" 27 | "github.com/rancher/wrangler/v3/pkg/condition" 28 | "github.com/rancher/wrangler/v3/pkg/generic" 29 | "github.com/rancher/wrangler/v3/pkg/kv" 30 | v1 "k8s.io/api/core/v1" 31 | "k8s.io/apimachinery/pkg/api/equality" 32 | "k8s.io/apimachinery/pkg/api/errors" 33 | "k8s.io/apimachinery/pkg/runtime" 34 | "k8s.io/apimachinery/pkg/runtime/schema" 35 | ) 36 | 37 | // NodeController interface for managing Node resources. 38 | type NodeController interface { 39 | generic.NonNamespacedControllerInterface[*v1.Node, *v1.NodeList] 40 | } 41 | 42 | // NodeClient interface for managing Node resources in Kubernetes. 43 | type NodeClient interface { 44 | generic.NonNamespacedClientInterface[*v1.Node, *v1.NodeList] 45 | } 46 | 47 | // NodeCache interface for retrieving Node resources in memory. 48 | type NodeCache interface { 49 | generic.NonNamespacedCacheInterface[*v1.Node] 50 | } 51 | 52 | // NodeStatusHandler is executed for every added or modified Node. Should return the new status to be updated 53 | type NodeStatusHandler func(obj *v1.Node, status v1.NodeStatus) (v1.NodeStatus, error) 54 | 55 | // NodeGeneratingHandler is the top-level handler that is executed for every Node event. It extends NodeStatusHandler by a returning a slice of child objects to be passed to apply.Apply 56 | type NodeGeneratingHandler func(obj *v1.Node, status v1.NodeStatus) ([]runtime.Object, v1.NodeStatus, error) 57 | 58 | // RegisterNodeStatusHandler configures a NodeController to execute a NodeStatusHandler for every events observed. 59 | // If a non-empty condition is provided, it will be updated in the status conditions for every handler execution 60 | func RegisterNodeStatusHandler(ctx context.Context, controller NodeController, condition condition.Cond, name string, handler NodeStatusHandler) { 61 | statusHandler := &nodeStatusHandler{ 62 | client: controller, 63 | condition: condition, 64 | handler: handler, 65 | } 66 | controller.AddGenericHandler(ctx, name, generic.FromObjectHandlerToHandler(statusHandler.sync)) 67 | } 68 | 69 | // RegisterNodeGeneratingHandler configures a NodeController to execute a NodeGeneratingHandler for every events observed, passing the returned objects to the provided apply.Apply. 70 | // If a non-empty condition is provided, it will be updated in the status conditions for every handler execution 71 | func RegisterNodeGeneratingHandler(ctx context.Context, controller NodeController, apply apply.Apply, 72 | condition condition.Cond, name string, handler NodeGeneratingHandler, opts *generic.GeneratingHandlerOptions) { 73 | statusHandler := &nodeGeneratingHandler{ 74 | NodeGeneratingHandler: handler, 75 | apply: apply, 76 | name: name, 77 | gvk: controller.GroupVersionKind(), 78 | } 79 | if opts != nil { 80 | statusHandler.opts = *opts 81 | } 82 | controller.OnChange(ctx, name, statusHandler.Remove) 83 | RegisterNodeStatusHandler(ctx, controller, condition, name, statusHandler.Handle) 84 | } 85 | 86 | type nodeStatusHandler struct { 87 | client NodeClient 88 | condition condition.Cond 89 | handler NodeStatusHandler 90 | } 91 | 92 | // sync is executed on every resource addition or modification. Executes the configured handlers and sends the updated status to the Kubernetes API 93 | func (a *nodeStatusHandler) sync(key string, obj *v1.Node) (*v1.Node, error) { 94 | if obj == nil { 95 | return obj, nil 96 | } 97 | 98 | origStatus := obj.Status.DeepCopy() 99 | obj = obj.DeepCopy() 100 | newStatus, err := a.handler(obj, obj.Status) 101 | if err != nil { 102 | // Revert to old status on error 103 | newStatus = *origStatus.DeepCopy() 104 | } 105 | 106 | if a.condition != "" { 107 | if errors.IsConflict(err) { 108 | a.condition.SetError(&newStatus, "", nil) 109 | } else { 110 | a.condition.SetError(&newStatus, "", err) 111 | } 112 | } 113 | if !equality.Semantic.DeepEqual(origStatus, &newStatus) { 114 | if a.condition != "" { 115 | // Since status has changed, update the lastUpdatedTime 116 | a.condition.LastUpdated(&newStatus, time.Now().UTC().Format(time.RFC3339)) 117 | } 118 | 119 | var newErr error 120 | obj.Status = newStatus 121 | newObj, newErr := a.client.UpdateStatus(obj) 122 | if err == nil { 123 | err = newErr 124 | } 125 | if newErr == nil { 126 | obj = newObj 127 | } 128 | } 129 | return obj, err 130 | } 131 | 132 | type nodeGeneratingHandler struct { 133 | NodeGeneratingHandler 134 | apply apply.Apply 135 | opts generic.GeneratingHandlerOptions 136 | gvk schema.GroupVersionKind 137 | name string 138 | seen sync.Map 139 | } 140 | 141 | // Remove handles the observed deletion of a resource, cascade deleting every associated resource previously applied 142 | func (a *nodeGeneratingHandler) Remove(key string, obj *v1.Node) (*v1.Node, error) { 143 | if obj != nil { 144 | return obj, nil 145 | } 146 | 147 | obj = &v1.Node{} 148 | obj.Namespace, obj.Name = kv.RSplit(key, "/") 149 | obj.SetGroupVersionKind(a.gvk) 150 | 151 | if a.opts.UniqueApplyForResourceVersion { 152 | a.seen.Delete(key) 153 | } 154 | 155 | return nil, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). 156 | WithOwner(obj). 157 | WithSetID(a.name). 158 | ApplyObjects() 159 | } 160 | 161 | // Handle executes the configured NodeGeneratingHandler and pass the resulting objects to apply.Apply, finally returning the new status of the resource 162 | func (a *nodeGeneratingHandler) Handle(obj *v1.Node, status v1.NodeStatus) (v1.NodeStatus, error) { 163 | if !obj.DeletionTimestamp.IsZero() { 164 | return status, nil 165 | } 166 | 167 | objs, newStatus, err := a.NodeGeneratingHandler(obj, status) 168 | if err != nil { 169 | return newStatus, err 170 | } 171 | if !a.isNewResourceVersion(obj) { 172 | return newStatus, nil 173 | } 174 | 175 | err = generic.ConfigureApplyForObject(a.apply, obj, &a.opts). 176 | WithOwner(obj). 177 | WithSetID(a.name). 178 | ApplyObjects(objs...) 179 | if err != nil { 180 | return newStatus, err 181 | } 182 | a.storeResourceVersion(obj) 183 | return newStatus, nil 184 | } 185 | 186 | // isNewResourceVersion detects if a specific resource version was already successfully processed. 187 | // Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions 188 | func (a *nodeGeneratingHandler) isNewResourceVersion(obj *v1.Node) bool { 189 | if !a.opts.UniqueApplyForResourceVersion { 190 | return true 191 | } 192 | 193 | // Apply once per resource version 194 | key := obj.Namespace + "/" + obj.Name 195 | previous, ok := a.seen.Load(key) 196 | return !ok || previous != obj.ResourceVersion 197 | } 198 | 199 | // storeResourceVersion keeps track of the latest resource version of an object for which Apply was executed 200 | // Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions 201 | func (a *nodeGeneratingHandler) storeResourceVersion(obj *v1.Node) { 202 | if !a.opts.UniqueApplyForResourceVersion { 203 | return 204 | } 205 | 206 | key := obj.Namespace + "/" + obj.Name 207 | a.seen.Store(key, obj.ResourceVersion) 208 | } 209 | -------------------------------------------------------------------------------- /pkg/generated/controllers/core/v1/pod.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2019 Wrangler Sample Controller Authors 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Code generated by main. DO NOT EDIT. 18 | 19 | package v1 20 | 21 | import ( 22 | "context" 23 | "sync" 24 | "time" 25 | 26 | "github.com/rancher/wrangler/v3/pkg/apply" 27 | "github.com/rancher/wrangler/v3/pkg/condition" 28 | "github.com/rancher/wrangler/v3/pkg/generic" 29 | "github.com/rancher/wrangler/v3/pkg/kv" 30 | v1 "k8s.io/api/core/v1" 31 | "k8s.io/apimachinery/pkg/api/equality" 32 | "k8s.io/apimachinery/pkg/api/errors" 33 | "k8s.io/apimachinery/pkg/runtime" 34 | "k8s.io/apimachinery/pkg/runtime/schema" 35 | ) 36 | 37 | // PodController interface for managing Pod resources. 38 | type PodController interface { 39 | generic.ControllerInterface[*v1.Pod, *v1.PodList] 40 | } 41 | 42 | // PodClient interface for managing Pod resources in Kubernetes. 43 | type PodClient interface { 44 | generic.ClientInterface[*v1.Pod, *v1.PodList] 45 | } 46 | 47 | // PodCache interface for retrieving Pod resources in memory. 48 | type PodCache interface { 49 | generic.CacheInterface[*v1.Pod] 50 | } 51 | 52 | // PodStatusHandler is executed for every added or modified Pod. Should return the new status to be updated 53 | type PodStatusHandler func(obj *v1.Pod, status v1.PodStatus) (v1.PodStatus, error) 54 | 55 | // PodGeneratingHandler is the top-level handler that is executed for every Pod event. It extends PodStatusHandler by a returning a slice of child objects to be passed to apply.Apply 56 | type PodGeneratingHandler func(obj *v1.Pod, status v1.PodStatus) ([]runtime.Object, v1.PodStatus, error) 57 | 58 | // RegisterPodStatusHandler configures a PodController to execute a PodStatusHandler for every events observed. 59 | // If a non-empty condition is provided, it will be updated in the status conditions for every handler execution 60 | func RegisterPodStatusHandler(ctx context.Context, controller PodController, condition condition.Cond, name string, handler PodStatusHandler) { 61 | statusHandler := &podStatusHandler{ 62 | client: controller, 63 | condition: condition, 64 | handler: handler, 65 | } 66 | controller.AddGenericHandler(ctx, name, generic.FromObjectHandlerToHandler(statusHandler.sync)) 67 | } 68 | 69 | // RegisterPodGeneratingHandler configures a PodController to execute a PodGeneratingHandler for every events observed, passing the returned objects to the provided apply.Apply. 70 | // If a non-empty condition is provided, it will be updated in the status conditions for every handler execution 71 | func RegisterPodGeneratingHandler(ctx context.Context, controller PodController, apply apply.Apply, 72 | condition condition.Cond, name string, handler PodGeneratingHandler, opts *generic.GeneratingHandlerOptions) { 73 | statusHandler := &podGeneratingHandler{ 74 | PodGeneratingHandler: handler, 75 | apply: apply, 76 | name: name, 77 | gvk: controller.GroupVersionKind(), 78 | } 79 | if opts != nil { 80 | statusHandler.opts = *opts 81 | } 82 | controller.OnChange(ctx, name, statusHandler.Remove) 83 | RegisterPodStatusHandler(ctx, controller, condition, name, statusHandler.Handle) 84 | } 85 | 86 | type podStatusHandler struct { 87 | client PodClient 88 | condition condition.Cond 89 | handler PodStatusHandler 90 | } 91 | 92 | // sync is executed on every resource addition or modification. Executes the configured handlers and sends the updated status to the Kubernetes API 93 | func (a *podStatusHandler) sync(key string, obj *v1.Pod) (*v1.Pod, error) { 94 | if obj == nil { 95 | return obj, nil 96 | } 97 | 98 | origStatus := obj.Status.DeepCopy() 99 | obj = obj.DeepCopy() 100 | newStatus, err := a.handler(obj, obj.Status) 101 | if err != nil { 102 | // Revert to old status on error 103 | newStatus = *origStatus.DeepCopy() 104 | } 105 | 106 | if a.condition != "" { 107 | if errors.IsConflict(err) { 108 | a.condition.SetError(&newStatus, "", nil) 109 | } else { 110 | a.condition.SetError(&newStatus, "", err) 111 | } 112 | } 113 | if !equality.Semantic.DeepEqual(origStatus, &newStatus) { 114 | if a.condition != "" { 115 | // Since status has changed, update the lastUpdatedTime 116 | a.condition.LastUpdated(&newStatus, time.Now().UTC().Format(time.RFC3339)) 117 | } 118 | 119 | var newErr error 120 | obj.Status = newStatus 121 | newObj, newErr := a.client.UpdateStatus(obj) 122 | if err == nil { 123 | err = newErr 124 | } 125 | if newErr == nil { 126 | obj = newObj 127 | } 128 | } 129 | return obj, err 130 | } 131 | 132 | type podGeneratingHandler struct { 133 | PodGeneratingHandler 134 | apply apply.Apply 135 | opts generic.GeneratingHandlerOptions 136 | gvk schema.GroupVersionKind 137 | name string 138 | seen sync.Map 139 | } 140 | 141 | // Remove handles the observed deletion of a resource, cascade deleting every associated resource previously applied 142 | func (a *podGeneratingHandler) Remove(key string, obj *v1.Pod) (*v1.Pod, error) { 143 | if obj != nil { 144 | return obj, nil 145 | } 146 | 147 | obj = &v1.Pod{} 148 | obj.Namespace, obj.Name = kv.RSplit(key, "/") 149 | obj.SetGroupVersionKind(a.gvk) 150 | 151 | if a.opts.UniqueApplyForResourceVersion { 152 | a.seen.Delete(key) 153 | } 154 | 155 | return nil, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). 156 | WithOwner(obj). 157 | WithSetID(a.name). 158 | ApplyObjects() 159 | } 160 | 161 | // Handle executes the configured PodGeneratingHandler and pass the resulting objects to apply.Apply, finally returning the new status of the resource 162 | func (a *podGeneratingHandler) Handle(obj *v1.Pod, status v1.PodStatus) (v1.PodStatus, error) { 163 | if !obj.DeletionTimestamp.IsZero() { 164 | return status, nil 165 | } 166 | 167 | objs, newStatus, err := a.PodGeneratingHandler(obj, status) 168 | if err != nil { 169 | return newStatus, err 170 | } 171 | if !a.isNewResourceVersion(obj) { 172 | return newStatus, nil 173 | } 174 | 175 | err = generic.ConfigureApplyForObject(a.apply, obj, &a.opts). 176 | WithOwner(obj). 177 | WithSetID(a.name). 178 | ApplyObjects(objs...) 179 | if err != nil { 180 | return newStatus, err 181 | } 182 | a.storeResourceVersion(obj) 183 | return newStatus, nil 184 | } 185 | 186 | // isNewResourceVersion detects if a specific resource version was already successfully processed. 187 | // Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions 188 | func (a *podGeneratingHandler) isNewResourceVersion(obj *v1.Pod) bool { 189 | if !a.opts.UniqueApplyForResourceVersion { 190 | return true 191 | } 192 | 193 | // Apply once per resource version 194 | key := obj.Namespace + "/" + obj.Name 195 | previous, ok := a.seen.Load(key) 196 | return !ok || previous != obj.ResourceVersion 197 | } 198 | 199 | // storeResourceVersion keeps track of the latest resource version of an object for which Apply was executed 200 | // Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions 201 | func (a *podGeneratingHandler) storeResourceVersion(obj *v1.Pod) { 202 | if !a.opts.UniqueApplyForResourceVersion { 203 | return 204 | } 205 | 206 | key := obj.Namespace + "/" + obj.Name 207 | a.seen.Store(key, obj.ResourceVersion) 208 | } 209 | -------------------------------------------------------------------------------- /pkg/generated/controllers/core/v1/secret.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2019 Wrangler Sample Controller Authors 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Code generated by main. DO NOT EDIT. 18 | 19 | package v1 20 | 21 | import ( 22 | "github.com/rancher/wrangler/v3/pkg/generic" 23 | v1 "k8s.io/api/core/v1" 24 | ) 25 | 26 | // SecretController interface for managing Secret resources. 27 | type SecretController interface { 28 | generic.ControllerInterface[*v1.Secret, *v1.SecretList] 29 | } 30 | 31 | // SecretClient interface for managing Secret resources in Kubernetes. 32 | type SecretClient interface { 33 | generic.ClientInterface[*v1.Secret, *v1.SecretList] 34 | } 35 | 36 | // SecretCache interface for retrieving Secret resources in memory. 37 | type SecretCache interface { 38 | generic.CacheInterface[*v1.Secret] 39 | } 40 | -------------------------------------------------------------------------------- /pkg/generated/controllers/gke.cattle.io/factory.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2019 Wrangler Sample Controller Authors 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Code generated by main. DO NOT EDIT. 18 | 19 | package gke 20 | 21 | import ( 22 | "github.com/rancher/lasso/pkg/controller" 23 | "github.com/rancher/wrangler/v3/pkg/generic" 24 | "k8s.io/client-go/rest" 25 | ) 26 | 27 | type Factory struct { 28 | *generic.Factory 29 | } 30 | 31 | func NewFactoryFromConfigOrDie(config *rest.Config) *Factory { 32 | f, err := NewFactoryFromConfig(config) 33 | if err != nil { 34 | panic(err) 35 | } 36 | return f 37 | } 38 | 39 | func NewFactoryFromConfig(config *rest.Config) (*Factory, error) { 40 | return NewFactoryFromConfigWithOptions(config, nil) 41 | } 42 | 43 | func NewFactoryFromConfigWithNamespace(config *rest.Config, namespace string) (*Factory, error) { 44 | return NewFactoryFromConfigWithOptions(config, &FactoryOptions{ 45 | Namespace: namespace, 46 | }) 47 | } 48 | 49 | type FactoryOptions = generic.FactoryOptions 50 | 51 | func NewFactoryFromConfigWithOptions(config *rest.Config, opts *FactoryOptions) (*Factory, error) { 52 | f, err := generic.NewFactoryFromConfigWithOptions(config, opts) 53 | return &Factory{ 54 | Factory: f, 55 | }, err 56 | } 57 | 58 | func NewFactoryFromConfigWithOptionsOrDie(config *rest.Config, opts *FactoryOptions) *Factory { 59 | f, err := NewFactoryFromConfigWithOptions(config, opts) 60 | if err != nil { 61 | panic(err) 62 | } 63 | return f 64 | } 65 | 66 | func (c *Factory) Gke() Interface { 67 | return New(c.ControllerFactory()) 68 | } 69 | 70 | func (c *Factory) WithAgent(userAgent string) Interface { 71 | return New(controller.NewSharedControllerFactoryWithAgent(userAgent, c.ControllerFactory())) 72 | } 73 | -------------------------------------------------------------------------------- /pkg/generated/controllers/gke.cattle.io/interface.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2019 Wrangler Sample Controller Authors 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Code generated by main. DO NOT EDIT. 18 | 19 | package gke 20 | 21 | import ( 22 | v1 "github.com/rancher/gke-operator/pkg/generated/controllers/gke.cattle.io/v1" 23 | "github.com/rancher/lasso/pkg/controller" 24 | ) 25 | 26 | type Interface interface { 27 | V1() v1.Interface 28 | } 29 | 30 | type group struct { 31 | controllerFactory controller.SharedControllerFactory 32 | } 33 | 34 | // New returns a new Interface. 35 | func New(controllerFactory controller.SharedControllerFactory) Interface { 36 | return &group{ 37 | controllerFactory: controllerFactory, 38 | } 39 | } 40 | 41 | func (g *group) V1() v1.Interface { 42 | return v1.New(g.controllerFactory) 43 | } 44 | -------------------------------------------------------------------------------- /pkg/generated/controllers/gke.cattle.io/v1/gkeclusterconfig.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2019 Wrangler Sample Controller Authors 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Code generated by main. DO NOT EDIT. 18 | 19 | package v1 20 | 21 | import ( 22 | "context" 23 | "sync" 24 | "time" 25 | 26 | v1 "github.com/rancher/gke-operator/pkg/apis/gke.cattle.io/v1" 27 | "github.com/rancher/wrangler/v3/pkg/apply" 28 | "github.com/rancher/wrangler/v3/pkg/condition" 29 | "github.com/rancher/wrangler/v3/pkg/generic" 30 | "github.com/rancher/wrangler/v3/pkg/kv" 31 | "k8s.io/apimachinery/pkg/api/equality" 32 | "k8s.io/apimachinery/pkg/api/errors" 33 | "k8s.io/apimachinery/pkg/runtime" 34 | "k8s.io/apimachinery/pkg/runtime/schema" 35 | ) 36 | 37 | // GKEClusterConfigController interface for managing GKEClusterConfig resources. 38 | type GKEClusterConfigController interface { 39 | generic.ControllerInterface[*v1.GKEClusterConfig, *v1.GKEClusterConfigList] 40 | } 41 | 42 | // GKEClusterConfigClient interface for managing GKEClusterConfig resources in Kubernetes. 43 | type GKEClusterConfigClient interface { 44 | generic.ClientInterface[*v1.GKEClusterConfig, *v1.GKEClusterConfigList] 45 | } 46 | 47 | // GKEClusterConfigCache interface for retrieving GKEClusterConfig resources in memory. 48 | type GKEClusterConfigCache interface { 49 | generic.CacheInterface[*v1.GKEClusterConfig] 50 | } 51 | 52 | // GKEClusterConfigStatusHandler is executed for every added or modified GKEClusterConfig. Should return the new status to be updated 53 | type GKEClusterConfigStatusHandler func(obj *v1.GKEClusterConfig, status v1.GKEClusterConfigStatus) (v1.GKEClusterConfigStatus, error) 54 | 55 | // GKEClusterConfigGeneratingHandler is the top-level handler that is executed for every GKEClusterConfig event. It extends GKEClusterConfigStatusHandler by a returning a slice of child objects to be passed to apply.Apply 56 | type GKEClusterConfigGeneratingHandler func(obj *v1.GKEClusterConfig, status v1.GKEClusterConfigStatus) ([]runtime.Object, v1.GKEClusterConfigStatus, error) 57 | 58 | // RegisterGKEClusterConfigStatusHandler configures a GKEClusterConfigController to execute a GKEClusterConfigStatusHandler for every events observed. 59 | // If a non-empty condition is provided, it will be updated in the status conditions for every handler execution 60 | func RegisterGKEClusterConfigStatusHandler(ctx context.Context, controller GKEClusterConfigController, condition condition.Cond, name string, handler GKEClusterConfigStatusHandler) { 61 | statusHandler := &gKEClusterConfigStatusHandler{ 62 | client: controller, 63 | condition: condition, 64 | handler: handler, 65 | } 66 | controller.AddGenericHandler(ctx, name, generic.FromObjectHandlerToHandler(statusHandler.sync)) 67 | } 68 | 69 | // RegisterGKEClusterConfigGeneratingHandler configures a GKEClusterConfigController to execute a GKEClusterConfigGeneratingHandler for every events observed, passing the returned objects to the provided apply.Apply. 70 | // If a non-empty condition is provided, it will be updated in the status conditions for every handler execution 71 | func RegisterGKEClusterConfigGeneratingHandler(ctx context.Context, controller GKEClusterConfigController, apply apply.Apply, 72 | condition condition.Cond, name string, handler GKEClusterConfigGeneratingHandler, opts *generic.GeneratingHandlerOptions) { 73 | statusHandler := &gKEClusterConfigGeneratingHandler{ 74 | GKEClusterConfigGeneratingHandler: handler, 75 | apply: apply, 76 | name: name, 77 | gvk: controller.GroupVersionKind(), 78 | } 79 | if opts != nil { 80 | statusHandler.opts = *opts 81 | } 82 | controller.OnChange(ctx, name, statusHandler.Remove) 83 | RegisterGKEClusterConfigStatusHandler(ctx, controller, condition, name, statusHandler.Handle) 84 | } 85 | 86 | type gKEClusterConfigStatusHandler struct { 87 | client GKEClusterConfigClient 88 | condition condition.Cond 89 | handler GKEClusterConfigStatusHandler 90 | } 91 | 92 | // sync is executed on every resource addition or modification. Executes the configured handlers and sends the updated status to the Kubernetes API 93 | func (a *gKEClusterConfigStatusHandler) sync(key string, obj *v1.GKEClusterConfig) (*v1.GKEClusterConfig, error) { 94 | if obj == nil { 95 | return obj, nil 96 | } 97 | 98 | origStatus := obj.Status.DeepCopy() 99 | obj = obj.DeepCopy() 100 | newStatus, err := a.handler(obj, obj.Status) 101 | if err != nil { 102 | // Revert to old status on error 103 | newStatus = *origStatus.DeepCopy() 104 | } 105 | 106 | if a.condition != "" { 107 | if errors.IsConflict(err) { 108 | a.condition.SetError(&newStatus, "", nil) 109 | } else { 110 | a.condition.SetError(&newStatus, "", err) 111 | } 112 | } 113 | if !equality.Semantic.DeepEqual(origStatus, &newStatus) { 114 | if a.condition != "" { 115 | // Since status has changed, update the lastUpdatedTime 116 | a.condition.LastUpdated(&newStatus, time.Now().UTC().Format(time.RFC3339)) 117 | } 118 | 119 | var newErr error 120 | obj.Status = newStatus 121 | newObj, newErr := a.client.UpdateStatus(obj) 122 | if err == nil { 123 | err = newErr 124 | } 125 | if newErr == nil { 126 | obj = newObj 127 | } 128 | } 129 | return obj, err 130 | } 131 | 132 | type gKEClusterConfigGeneratingHandler struct { 133 | GKEClusterConfigGeneratingHandler 134 | apply apply.Apply 135 | opts generic.GeneratingHandlerOptions 136 | gvk schema.GroupVersionKind 137 | name string 138 | seen sync.Map 139 | } 140 | 141 | // Remove handles the observed deletion of a resource, cascade deleting every associated resource previously applied 142 | func (a *gKEClusterConfigGeneratingHandler) Remove(key string, obj *v1.GKEClusterConfig) (*v1.GKEClusterConfig, error) { 143 | if obj != nil { 144 | return obj, nil 145 | } 146 | 147 | obj = &v1.GKEClusterConfig{} 148 | obj.Namespace, obj.Name = kv.RSplit(key, "/") 149 | obj.SetGroupVersionKind(a.gvk) 150 | 151 | if a.opts.UniqueApplyForResourceVersion { 152 | a.seen.Delete(key) 153 | } 154 | 155 | return nil, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). 156 | WithOwner(obj). 157 | WithSetID(a.name). 158 | ApplyObjects() 159 | } 160 | 161 | // Handle executes the configured GKEClusterConfigGeneratingHandler and pass the resulting objects to apply.Apply, finally returning the new status of the resource 162 | func (a *gKEClusterConfigGeneratingHandler) Handle(obj *v1.GKEClusterConfig, status v1.GKEClusterConfigStatus) (v1.GKEClusterConfigStatus, error) { 163 | if !obj.DeletionTimestamp.IsZero() { 164 | return status, nil 165 | } 166 | 167 | objs, newStatus, err := a.GKEClusterConfigGeneratingHandler(obj, status) 168 | if err != nil { 169 | return newStatus, err 170 | } 171 | if !a.isNewResourceVersion(obj) { 172 | return newStatus, nil 173 | } 174 | 175 | err = generic.ConfigureApplyForObject(a.apply, obj, &a.opts). 176 | WithOwner(obj). 177 | WithSetID(a.name). 178 | ApplyObjects(objs...) 179 | if err != nil { 180 | return newStatus, err 181 | } 182 | a.storeResourceVersion(obj) 183 | return newStatus, nil 184 | } 185 | 186 | // isNewResourceVersion detects if a specific resource version was already successfully processed. 187 | // Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions 188 | func (a *gKEClusterConfigGeneratingHandler) isNewResourceVersion(obj *v1.GKEClusterConfig) bool { 189 | if !a.opts.UniqueApplyForResourceVersion { 190 | return true 191 | } 192 | 193 | // Apply once per resource version 194 | key := obj.Namespace + "/" + obj.Name 195 | previous, ok := a.seen.Load(key) 196 | return !ok || previous != obj.ResourceVersion 197 | } 198 | 199 | // storeResourceVersion keeps track of the latest resource version of an object for which Apply was executed 200 | // Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions 201 | func (a *gKEClusterConfigGeneratingHandler) storeResourceVersion(obj *v1.GKEClusterConfig) { 202 | if !a.opts.UniqueApplyForResourceVersion { 203 | return 204 | } 205 | 206 | key := obj.Namespace + "/" + obj.Name 207 | a.seen.Store(key, obj.ResourceVersion) 208 | } 209 | -------------------------------------------------------------------------------- /pkg/generated/controllers/gke.cattle.io/v1/interface.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2019 Wrangler Sample Controller Authors 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Code generated by main. DO NOT EDIT. 18 | 19 | package v1 20 | 21 | import ( 22 | v1 "github.com/rancher/gke-operator/pkg/apis/gke.cattle.io/v1" 23 | "github.com/rancher/lasso/pkg/controller" 24 | "github.com/rancher/wrangler/v3/pkg/generic" 25 | "github.com/rancher/wrangler/v3/pkg/schemes" 26 | "k8s.io/apimachinery/pkg/runtime/schema" 27 | ) 28 | 29 | func init() { 30 | schemes.Register(v1.AddToScheme) 31 | } 32 | 33 | type Interface interface { 34 | GKEClusterConfig() GKEClusterConfigController 35 | } 36 | 37 | func New(controllerFactory controller.SharedControllerFactory) Interface { 38 | return &version{ 39 | controllerFactory: controllerFactory, 40 | } 41 | } 42 | 43 | type version struct { 44 | controllerFactory controller.SharedControllerFactory 45 | } 46 | 47 | func (v *version) GKEClusterConfig() GKEClusterConfigController { 48 | return generic.NewController[*v1.GKEClusterConfig, *v1.GKEClusterConfigList](schema.GroupVersionKind{Group: "gke.cattle.io", Version: "v1", Kind: "GKEClusterConfig"}, "gkeclusterconfigs", true, v.controllerFactory) 49 | } 50 | -------------------------------------------------------------------------------- /pkg/gke/client.go: -------------------------------------------------------------------------------- 1 | package gke 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/rancher/gke-operator/pkg/gke/services" 7 | "golang.org/x/oauth2" 8 | "golang.org/x/oauth2/google" 9 | gkeapi "google.golang.org/api/container/v1" 10 | "google.golang.org/api/option" 11 | ) 12 | 13 | // GetGKEClient accepts a JSON credential string and returns a Service client. 14 | func GetGKEClient(ctx context.Context, credential string) (*gkeapi.Service, error) { 15 | ts, err := GetTokenSource(ctx, credential) 16 | if err != nil { 17 | return nil, err 18 | } 19 | return getServiceClientWithTokenSource(ctx, ts) 20 | } 21 | 22 | func GetGKEClusterClient(ctx context.Context, credential string) (services.GKEClusterService, error) { 23 | ts, err := GetTokenSource(ctx, credential) 24 | if err != nil { 25 | return nil, err 26 | } 27 | return services.NewGKEClusterService(ctx, ts) 28 | } 29 | 30 | func getServiceClientWithTokenSource(ctx context.Context, ts oauth2.TokenSource) (*gkeapi.Service, error) { 31 | return gkeapi.NewService(ctx, option.WithHTTPClient(oauth2.NewClient(ctx, ts))) 32 | } 33 | 34 | func GetTokenSource(ctx context.Context, credential string) (oauth2.TokenSource, error) { 35 | ts, err := google.CredentialsFromJSON(ctx, []byte(credential), gkeapi.CloudPlatformScope) 36 | if err != nil { 37 | return nil, err 38 | } 39 | return ts.TokenSource, nil 40 | } 41 | -------------------------------------------------------------------------------- /pkg/gke/consts.go: -------------------------------------------------------------------------------- 1 | package gke 2 | 3 | // Status indicates how to handle the response from a request to update a resource 4 | type Status int 5 | 6 | // Status indicators 7 | const ( 8 | // Changed means the request to change resource was accepted and change is in progress 9 | Changed Status = iota 10 | // Retry means the request to change resource was rejected due to an expected error and should be retried later 11 | Retry 12 | // NotChanged means the resource was not changed, either due to error or because it was unnecessary 13 | NotChanged 14 | ) 15 | 16 | // Error strings from the provider 17 | const ( 18 | errNotFound = "notFound" 19 | errWait = "Please wait and try again once it is done" 20 | ) 21 | -------------------------------------------------------------------------------- /pkg/gke/create_test.go: -------------------------------------------------------------------------------- 1 | package gke 2 | 3 | import ( 4 | "github.com/golang/mock/gomock" 5 | . "github.com/onsi/ginkgo/v2" 6 | . "github.com/onsi/gomega" 7 | 8 | gkev1 "github.com/rancher/gke-operator/pkg/apis/gke.cattle.io/v1" 9 | "github.com/rancher/gke-operator/pkg/gke/services/mock_services" 10 | gkeapi "google.golang.org/api/container/v1" 11 | ) 12 | 13 | var _ = Describe("CreateCluster", func() { 14 | var ( 15 | mockController *gomock.Controller 16 | clusterServiceMock *mock_services.MockGKEClusterService 17 | k8sVersion = "1.25.12-gke.200" 18 | clusterIpv4Cidr = "10.42.0.0/16" 19 | networkName = "test-network" 20 | subnetworkName = "test-subnetwork" 21 | emptyString = "" 22 | boolTrue = true 23 | nodePoolName = "test-node-pool" 24 | initialNodeCount = int64(3) 25 | maxPodsConstraint = int64(110) 26 | config = &gkev1.GKEClusterConfig{ 27 | Spec: gkev1.GKEClusterConfigSpec{ 28 | Region: "test-region", 29 | ProjectID: "test-project", 30 | ClusterName: "test-cluster", 31 | Locations: []string{""}, 32 | Labels: map[string]string{"test": "test"}, 33 | ClusterIpv4CidrBlock: &clusterIpv4Cidr, 34 | KubernetesVersion: &k8sVersion, 35 | LoggingService: &emptyString, 36 | MonitoringService: &emptyString, 37 | EnableKubernetesAlpha: &boolTrue, 38 | Network: &networkName, 39 | Subnetwork: &subnetworkName, 40 | NetworkPolicyEnabled: &boolTrue, 41 | MaintenanceWindow: &emptyString, 42 | IPAllocationPolicy: &gkev1.GKEIPAllocationPolicy{ 43 | UseIPAliases: true, 44 | }, 45 | ClusterAddons: &gkev1.GKEClusterAddons{ 46 | HTTPLoadBalancing: true, 47 | NetworkPolicyConfig: false, 48 | HorizontalPodAutoscaling: true, 49 | }, 50 | PrivateClusterConfig: &gkev1.GKEPrivateClusterConfig{ 51 | EnablePrivateEndpoint: false, 52 | EnablePrivateNodes: false, 53 | }, 54 | MasterAuthorizedNetworksConfig: &gkev1.GKEMasterAuthorizedNetworksConfig{ 55 | Enabled: false, 56 | }, 57 | }, 58 | } 59 | ) 60 | 61 | BeforeEach(func() { 62 | mockController = gomock.NewController(GinkgoT()) 63 | clusterServiceMock = mock_services.NewMockGKEClusterService(mockController) 64 | }) 65 | 66 | AfterEach(func() { 67 | mockController.Finish() 68 | }) 69 | 70 | It("should successfully create cluster", func() { 71 | createClusterRequest := NewClusterCreateRequest(config) 72 | clusterServiceMock.EXPECT(). 73 | ClusterCreate( 74 | ctx, 75 | LocationRRN(config.Spec.ProjectID, Location(config.Spec.Region, config.Spec.Zone)), 76 | createClusterRequest). 77 | Return(&gkeapi.Operation{}, nil) 78 | 79 | clusterServiceMock.EXPECT(). 80 | ClusterList( 81 | ctx, 82 | LocationRRN(config.Spec.ProjectID, Location(config.Spec.Region, config.Spec.Zone))). 83 | Return(&gkeapi.ListClustersResponse{}, nil) 84 | 85 | err := Create(ctx, clusterServiceMock, config) 86 | Expect(err).ToNot(HaveOccurred()) 87 | 88 | clusterServiceMock.EXPECT(). 89 | ClusterGet( 90 | ctx, 91 | ClusterRRN(config.Spec.ProjectID, Location(config.Spec.Region, config.Spec.Zone), 92 | config.Spec.ClusterName)). 93 | Return( 94 | &gkeapi.Cluster{ 95 | Name: "test-cluster", 96 | }, nil) 97 | 98 | managedCluster, err := GetCluster(ctx, clusterServiceMock, &config.Spec) 99 | Expect(err).ToNot(HaveOccurred()) 100 | Expect(managedCluster.Name).To(Equal(config.Spec.ClusterName)) 101 | }) 102 | 103 | It("should successfully create cluster with customer managment encryption key", func() { 104 | config.Spec.CustomerManagedEncryptionKey = &gkev1.CMEKConfig{ 105 | KeyName: "test-key", 106 | RingName: "test-keyring", 107 | } 108 | createClusterRequest := NewClusterCreateRequest(config) 109 | clusterServiceMock.EXPECT(). 110 | ClusterCreate( 111 | ctx, 112 | LocationRRN(config.Spec.ProjectID, Location(config.Spec.Region, config.Spec.Zone)), 113 | createClusterRequest). 114 | Return(&gkeapi.Operation{}, nil) 115 | 116 | clusterServiceMock.EXPECT(). 117 | ClusterList( 118 | ctx, 119 | LocationRRN(config.Spec.ProjectID, Location(config.Spec.Region, config.Spec.Zone))). 120 | Return(&gkeapi.ListClustersResponse{}, nil) 121 | 122 | err := Create(ctx, clusterServiceMock, config) 123 | Expect(err).ToNot(HaveOccurred()) 124 | 125 | clusterServiceMock.EXPECT(). 126 | ClusterGet( 127 | ctx, 128 | ClusterRRN(config.Spec.ProjectID, Location(config.Spec.Region, config.Spec.Zone), 129 | config.Spec.ClusterName)). 130 | Return( 131 | &gkeapi.Cluster{ 132 | Name: "test-cluster", 133 | }, nil) 134 | 135 | managedCluster, err := GetCluster(ctx, clusterServiceMock, &config.Spec) 136 | Expect(err).ToNot(HaveOccurred()) 137 | Expect(managedCluster.Name).To(Equal(config.Spec.ClusterName)) 138 | }) 139 | 140 | It("should fail to create cluster", func() { 141 | clusterServiceMock.EXPECT(). 142 | ClusterList( 143 | ctx, 144 | LocationRRN(config.Spec.ProjectID, Location(config.Spec.Region, config.Spec.Zone))). 145 | Return( 146 | &gkeapi.ListClustersResponse{ 147 | Clusters: []*gkeapi.Cluster{ 148 | { 149 | Name: "test-cluster", 150 | }, 151 | }, 152 | }, nil) 153 | 154 | err := Create(ctx, clusterServiceMock, config) 155 | Expect(err).To(HaveOccurred()) 156 | }) 157 | 158 | It("should successfully create autopilot cluster", func() { 159 | config.Spec.ClusterName = "test-autopilot-cluster" 160 | config.Spec.AutopilotConfig = &gkev1.GKEAutopilotConfig{ 161 | Enabled: true, 162 | } 163 | 164 | createClusterRequest := NewClusterCreateRequest(config) 165 | clusterServiceMock.EXPECT(). 166 | ClusterCreate( 167 | ctx, 168 | LocationRRN(config.Spec.ProjectID, Location(config.Spec.Region, config.Spec.Zone)), 169 | createClusterRequest). 170 | Return(&gkeapi.Operation{}, nil) 171 | 172 | clusterServiceMock.EXPECT(). 173 | ClusterList( 174 | ctx, 175 | LocationRRN(config.Spec.ProjectID, Location(config.Spec.Region, config.Spec.Zone))). 176 | Return(&gkeapi.ListClustersResponse{}, nil) 177 | 178 | err := Create(ctx, clusterServiceMock, config) 179 | Expect(err).ToNot(HaveOccurred()) 180 | 181 | clusterServiceMock.EXPECT(). 182 | ClusterGet( 183 | ctx, 184 | ClusterRRN(config.Spec.ProjectID, Location(config.Spec.Region, config.Spec.Zone), 185 | config.Spec.ClusterName)). 186 | Return( 187 | &gkeapi.Cluster{ 188 | Name: "test-autopilot-cluster", 189 | }, nil) 190 | 191 | managedCluster, err := GetCluster(ctx, clusterServiceMock, &config.Spec) 192 | Expect(err).ToNot(HaveOccurred()) 193 | Expect(managedCluster.Name).To(Equal(config.Spec.ClusterName)) 194 | }) 195 | 196 | It("should fail create cluster with customer managment encryption key", func() { 197 | config.Spec.CustomerManagedEncryptionKey = &gkev1.CMEKConfig{ 198 | KeyName: "test-key", 199 | } 200 | err := Create(ctx, clusterServiceMock, config) 201 | Expect(err).To(HaveOccurred()) 202 | }) 203 | 204 | It("should fail to create autopilot cluster with nodepools", func() { 205 | config.Spec.ClusterName = "test-autopilot-cluster" 206 | config.Spec.AutopilotConfig = &gkev1.GKEAutopilotConfig{ 207 | Enabled: true, 208 | } 209 | 210 | config.Spec.NodePools = []gkev1.GKENodePoolConfig{ 211 | { 212 | Name: &nodePoolName, 213 | InitialNodeCount: &initialNodeCount, 214 | Version: &k8sVersion, 215 | MaxPodsConstraint: &maxPodsConstraint, 216 | Config: &gkev1.GKENodeConfig{}, 217 | Autoscaling: &gkev1.GKENodePoolAutoscaling{ 218 | Enabled: true, 219 | MinNodeCount: 3, 220 | MaxNodeCount: 5, 221 | }, 222 | Management: &gkev1.GKENodePoolManagement{ 223 | AutoRepair: true, 224 | AutoUpgrade: true, 225 | }, 226 | }, 227 | } 228 | 229 | err := Create(ctx, clusterServiceMock, config) 230 | Expect(err).To(HaveOccurred()) 231 | }) 232 | 233 | It("should fail to create cluster with duplicated nodepool names", func() { 234 | config.Spec.NodePools = []gkev1.GKENodePoolConfig{ 235 | { 236 | Name: &nodePoolName, 237 | InitialNodeCount: &initialNodeCount, 238 | Version: &k8sVersion, 239 | MaxPodsConstraint: &maxPodsConstraint, 240 | Config: &gkev1.GKENodeConfig{}, 241 | Autoscaling: &gkev1.GKENodePoolAutoscaling{ 242 | Enabled: true, 243 | MinNodeCount: 3, 244 | MaxNodeCount: 5, 245 | }, 246 | Management: &gkev1.GKENodePoolManagement{ 247 | AutoRepair: true, 248 | AutoUpgrade: true, 249 | }, 250 | }, 251 | { 252 | Name: &nodePoolName, 253 | InitialNodeCount: &initialNodeCount, 254 | Version: &k8sVersion, 255 | MaxPodsConstraint: &maxPodsConstraint, 256 | Config: &gkev1.GKENodeConfig{}, 257 | Autoscaling: &gkev1.GKENodePoolAutoscaling{ 258 | Enabled: true, 259 | MinNodeCount: 3, 260 | MaxNodeCount: 5, 261 | }, 262 | Management: &gkev1.GKENodePoolManagement{ 263 | AutoRepair: true, 264 | AutoUpgrade: true, 265 | }, 266 | }, 267 | } 268 | err := Create(ctx, clusterServiceMock, config) 269 | Expect(err).To(HaveOccurred()) 270 | }) 271 | }) 272 | 273 | var _ = Describe("CreateNodePool", func() { 274 | var ( 275 | mockController *gomock.Controller 276 | clusterServiceMock *mock_services.MockGKEClusterService 277 | k8sVersion = "1.25.12-gke.200" 278 | clusterIpv4Cidr = "10.42.0.0/16" 279 | networkName = "test-network" 280 | subnetworkName = "test-subnetwork" 281 | emptyString = "" 282 | boolTrue = true 283 | 284 | nodePoolName = "test-node-pool" 285 | initialNodeCount = int64(3) 286 | maxPodsConstraint = int64(110) 287 | nodePoolConfig = &gkev1.GKENodePoolConfig{ 288 | Name: &nodePoolName, 289 | InitialNodeCount: &initialNodeCount, 290 | Version: &k8sVersion, 291 | MaxPodsConstraint: &maxPodsConstraint, 292 | Config: &gkev1.GKENodeConfig{}, 293 | Autoscaling: &gkev1.GKENodePoolAutoscaling{ 294 | Enabled: true, 295 | MinNodeCount: 3, 296 | MaxNodeCount: 5, 297 | }, 298 | Management: &gkev1.GKENodePoolManagement{ 299 | AutoRepair: true, 300 | AutoUpgrade: true, 301 | }, 302 | } 303 | 304 | config = &gkev1.GKEClusterConfig{ 305 | Spec: gkev1.GKEClusterConfigSpec{ 306 | Region: "test-region", 307 | ProjectID: "test-project", 308 | ClusterName: "test-cluster", 309 | Locations: []string{""}, 310 | Labels: map[string]string{"test": "test"}, 311 | ClusterIpv4CidrBlock: &clusterIpv4Cidr, 312 | KubernetesVersion: &k8sVersion, 313 | LoggingService: &emptyString, 314 | MonitoringService: &emptyString, 315 | EnableKubernetesAlpha: &boolTrue, 316 | Network: &networkName, 317 | Subnetwork: &subnetworkName, 318 | NetworkPolicyEnabled: &boolTrue, 319 | MaintenanceWindow: &emptyString, 320 | IPAllocationPolicy: &gkev1.GKEIPAllocationPolicy{ 321 | UseIPAliases: true, 322 | }, 323 | ClusterAddons: &gkev1.GKEClusterAddons{ 324 | HTTPLoadBalancing: true, 325 | NetworkPolicyConfig: false, 326 | HorizontalPodAutoscaling: true, 327 | }, 328 | PrivateClusterConfig: &gkev1.GKEPrivateClusterConfig{ 329 | EnablePrivateEndpoint: false, 330 | EnablePrivateNodes: false, 331 | }, 332 | MasterAuthorizedNetworksConfig: &gkev1.GKEMasterAuthorizedNetworksConfig{ 333 | Enabled: false, 334 | }, 335 | }, 336 | } 337 | ) 338 | 339 | BeforeEach(func() { 340 | mockController = gomock.NewController(GinkgoT()) 341 | clusterServiceMock = mock_services.NewMockGKEClusterService(mockController) 342 | }) 343 | 344 | AfterEach(func() { 345 | mockController.Finish() 346 | }) 347 | 348 | It("should successfully create cluster and node pool", func() { 349 | createClusterRequest := NewClusterCreateRequest(config) 350 | clusterServiceMock.EXPECT(). 351 | ClusterCreate( 352 | ctx, 353 | LocationRRN(config.Spec.ProjectID, Location(config.Spec.Region, config.Spec.Zone)), 354 | createClusterRequest). 355 | Return(&gkeapi.Operation{}, nil) 356 | 357 | clusterServiceMock.EXPECT(). 358 | ClusterList( 359 | ctx, 360 | LocationRRN(config.Spec.ProjectID, Location(config.Spec.Region, config.Spec.Zone))). 361 | Return(&gkeapi.ListClustersResponse{}, nil) 362 | 363 | err := Create(ctx, clusterServiceMock, config) 364 | Expect(err).ToNot(HaveOccurred()) 365 | 366 | createNodePoolRequest, err := newNodePoolCreateRequest(nodePoolConfig, config) 367 | Expect(err).ToNot(HaveOccurred()) 368 | clusterServiceMock.EXPECT(). 369 | NodePoolCreate( 370 | ctx, 371 | ClusterRRN(config.Spec.ProjectID, Location(config.Spec.Region, config.Spec.Zone), config.Spec.ClusterName), 372 | createNodePoolRequest). 373 | Return(&gkeapi.Operation{}, nil) 374 | 375 | status, err := CreateNodePool(ctx, clusterServiceMock, config, nodePoolConfig) 376 | Expect(err).ToNot(HaveOccurred()) 377 | Expect(status).To(Equal(Changed)) 378 | }) 379 | It("shouldn't successfully create cluster and node pool", func() { 380 | testNodePoolConfig := &gkev1.GKENodePoolConfig{} 381 | status, err := CreateNodePool(ctx, clusterServiceMock, config, testNodePoolConfig) 382 | Expect(err).To(HaveOccurred()) 383 | Expect(status).To(Equal(NotChanged)) 384 | }) 385 | }) 386 | -------------------------------------------------------------------------------- /pkg/gke/delete.go: -------------------------------------------------------------------------------- 1 | package gke 2 | 3 | import ( 4 | "context" 5 | "strings" 6 | "time" 7 | 8 | gkev1 "github.com/rancher/gke-operator/pkg/apis/gke.cattle.io/v1" 9 | "github.com/rancher/gke-operator/pkg/gke/services" 10 | "k8s.io/apimachinery/pkg/util/wait" 11 | ) 12 | 13 | const ( 14 | waitSec = 30 15 | backoffSteps = 12 16 | ) 17 | 18 | var backoff = wait.Backoff{ 19 | Duration: waitSec * time.Second, 20 | Steps: backoffSteps, 21 | } 22 | 23 | // RemoveCluster attempts to delete a cluster and retries the delete request if the cluster is busy. 24 | func RemoveCluster(ctx context.Context, gkeClient services.GKEClusterService, config *gkev1.GKEClusterConfig) error { 25 | return wait.ExponentialBackoff(backoff, func() (bool, error) { 26 | _, err := gkeClient.ClusterDelete(ctx, 27 | ClusterRRN(config.Spec.ProjectID, Location(config.Spec.Region, config.Spec.Zone), config.Spec.ClusterName)) 28 | 29 | if err != nil && strings.Contains(err.Error(), errWait) { 30 | return false, nil 31 | } 32 | if err != nil && strings.Contains(err.Error(), errNotFound) { 33 | return true, nil 34 | } 35 | if err != nil { 36 | return false, err 37 | } 38 | return true, nil 39 | }) 40 | } 41 | 42 | // RemoveNodePool deletes a node pool 43 | func RemoveNodePool(ctx context.Context, gkeClient services.GKEClusterService, config *gkev1.GKEClusterConfig, nodePoolName string) (Status, error) { 44 | _, err := gkeClient.NodePoolDelete(ctx, 45 | NodePoolRRN(config.Spec.ProjectID, Location(config.Spec.Region, config.Spec.Zone), config.Spec.ClusterName, nodePoolName)) 46 | if err != nil && strings.Contains(err.Error(), errWait) { 47 | return Retry, nil 48 | } 49 | if err != nil && strings.Contains(err.Error(), errNotFound) { 50 | return NotChanged, nil 51 | } 52 | if err != nil { 53 | return NotChanged, err 54 | } 55 | return Changed, nil 56 | } 57 | -------------------------------------------------------------------------------- /pkg/gke/delete_test.go: -------------------------------------------------------------------------------- 1 | package gke 2 | 3 | import ( 4 | "github.com/golang/mock/gomock" 5 | . "github.com/onsi/ginkgo/v2" 6 | . "github.com/onsi/gomega" 7 | 8 | gkev1 "github.com/rancher/gke-operator/pkg/apis/gke.cattle.io/v1" 9 | "github.com/rancher/gke-operator/pkg/gke/services/mock_services" 10 | gkeapi "google.golang.org/api/container/v1" 11 | ) 12 | 13 | var _ = Describe("RemoveCluster", func() { 14 | var ( 15 | mockController *gomock.Controller 16 | clusterServiceMock *mock_services.MockGKEClusterService 17 | k8sVersion = "1.25.12-gke.200" 18 | clusterIpv4Cidr = "10.42.0.0/16" 19 | networkName = "test-network" 20 | subnetworkName = "test-subnetwork" 21 | emptyString = "" 22 | boolTrue = true 23 | config = &gkev1.GKEClusterConfig{ 24 | Spec: gkev1.GKEClusterConfigSpec{ 25 | Region: "test-region", 26 | ProjectID: "test-project", 27 | ClusterName: "test-cluster", 28 | Locations: []string{""}, 29 | Labels: map[string]string{"test": "test"}, 30 | ClusterIpv4CidrBlock: &clusterIpv4Cidr, 31 | KubernetesVersion: &k8sVersion, 32 | LoggingService: &emptyString, 33 | MonitoringService: &emptyString, 34 | EnableKubernetesAlpha: &boolTrue, 35 | Network: &networkName, 36 | Subnetwork: &subnetworkName, 37 | NetworkPolicyEnabled: &boolTrue, 38 | MaintenanceWindow: &emptyString, 39 | IPAllocationPolicy: &gkev1.GKEIPAllocationPolicy{ 40 | UseIPAliases: true, 41 | }, 42 | ClusterAddons: &gkev1.GKEClusterAddons{ 43 | HTTPLoadBalancing: true, 44 | NetworkPolicyConfig: false, 45 | HorizontalPodAutoscaling: true, 46 | }, 47 | PrivateClusterConfig: &gkev1.GKEPrivateClusterConfig{ 48 | EnablePrivateEndpoint: false, 49 | EnablePrivateNodes: false, 50 | }, 51 | MasterAuthorizedNetworksConfig: &gkev1.GKEMasterAuthorizedNetworksConfig{ 52 | Enabled: false, 53 | }, 54 | }, 55 | } 56 | ) 57 | 58 | BeforeEach(func() { 59 | mockController = gomock.NewController(GinkgoT()) 60 | clusterServiceMock = mock_services.NewMockGKEClusterService(mockController) 61 | }) 62 | 63 | AfterEach(func() { 64 | mockController.Finish() 65 | }) 66 | 67 | It("should successfully remove cluster", func() { 68 | createClusterRequest := NewClusterCreateRequest(config) 69 | clusterServiceMock.EXPECT(). 70 | ClusterCreate( 71 | ctx, 72 | LocationRRN(config.Spec.ProjectID, Location(config.Spec.Region, config.Spec.Zone)), 73 | createClusterRequest). 74 | Return(&gkeapi.Operation{}, nil) 75 | 76 | clusterServiceMock.EXPECT(). 77 | ClusterList( 78 | ctx, 79 | LocationRRN(config.Spec.ProjectID, Location(config.Spec.Region, config.Spec.Zone))). 80 | Return(&gkeapi.ListClustersResponse{}, nil) 81 | 82 | err := Create(ctx, clusterServiceMock, config) 83 | Expect(err).ToNot(HaveOccurred()) 84 | 85 | clusterServiceMock.EXPECT(). 86 | ClusterGet( 87 | ctx, 88 | ClusterRRN(config.Spec.ProjectID, Location(config.Spec.Region, config.Spec.Zone), 89 | config.Spec.ClusterName)). 90 | Return( 91 | &gkeapi.Cluster{ 92 | Name: "test-cluster", 93 | }, nil) 94 | 95 | managedCluster, err := GetCluster(ctx, clusterServiceMock, &config.Spec) 96 | Expect(err).ToNot(HaveOccurred()) 97 | Expect(managedCluster.Name).To(Equal(config.Spec.ClusterName)) 98 | 99 | clusterServiceMock.EXPECT(). 100 | ClusterDelete( 101 | ctx, 102 | ClusterRRN(config.Spec.ProjectID, Location(config.Spec.Region, config.Spec.Zone), config.Spec.ClusterName)). 103 | Return(&gkeapi.Operation{}, nil) 104 | 105 | err = RemoveCluster(ctx, clusterServiceMock, config) 106 | Expect(err).ToNot(HaveOccurred()) 107 | }) 108 | }) 109 | 110 | var _ = Describe("RemoveNodePool", func() { 111 | var ( 112 | mockController *gomock.Controller 113 | clusterServiceMock *mock_services.MockGKEClusterService 114 | k8sVersion = "1.25.12-gke.200" 115 | clusterIpv4Cidr = "10.42.0.0/16" 116 | networkName = "test-network" 117 | subnetworkName = "test-subnetwork" 118 | emptyString = "" 119 | boolTrue = true 120 | 121 | nodePoolName = "test-node-pool" 122 | initialNodeCount = int64(3) 123 | maxPodsConstraint = int64(110) 124 | nodePoolConfig = &gkev1.GKENodePoolConfig{ 125 | Name: &nodePoolName, 126 | InitialNodeCount: &initialNodeCount, 127 | Version: &k8sVersion, 128 | MaxPodsConstraint: &maxPodsConstraint, 129 | Config: &gkev1.GKENodeConfig{}, 130 | Autoscaling: &gkev1.GKENodePoolAutoscaling{ 131 | Enabled: true, 132 | MinNodeCount: 3, 133 | MaxNodeCount: 5, 134 | }, 135 | Management: &gkev1.GKENodePoolManagement{ 136 | AutoRepair: true, 137 | AutoUpgrade: true, 138 | }, 139 | } 140 | 141 | config = &gkev1.GKEClusterConfig{ 142 | Spec: gkev1.GKEClusterConfigSpec{ 143 | Region: "test-region", 144 | ProjectID: "test-project", 145 | ClusterName: "test-cluster", 146 | Locations: []string{""}, 147 | Labels: map[string]string{"test": "test"}, 148 | ClusterIpv4CidrBlock: &clusterIpv4Cidr, 149 | KubernetesVersion: &k8sVersion, 150 | LoggingService: &emptyString, 151 | MonitoringService: &emptyString, 152 | EnableKubernetesAlpha: &boolTrue, 153 | Network: &networkName, 154 | Subnetwork: &subnetworkName, 155 | NetworkPolicyEnabled: &boolTrue, 156 | MaintenanceWindow: &emptyString, 157 | IPAllocationPolicy: &gkev1.GKEIPAllocationPolicy{ 158 | UseIPAliases: true, 159 | }, 160 | ClusterAddons: &gkev1.GKEClusterAddons{ 161 | HTTPLoadBalancing: true, 162 | NetworkPolicyConfig: false, 163 | HorizontalPodAutoscaling: true, 164 | }, 165 | PrivateClusterConfig: &gkev1.GKEPrivateClusterConfig{ 166 | EnablePrivateEndpoint: false, 167 | EnablePrivateNodes: false, 168 | }, 169 | MasterAuthorizedNetworksConfig: &gkev1.GKEMasterAuthorizedNetworksConfig{ 170 | Enabled: false, 171 | }, 172 | }, 173 | } 174 | ) 175 | 176 | BeforeEach(func() { 177 | mockController = gomock.NewController(GinkgoT()) 178 | clusterServiceMock = mock_services.NewMockGKEClusterService(mockController) 179 | }) 180 | 181 | AfterEach(func() { 182 | mockController.Finish() 183 | }) 184 | 185 | It("should successfully remove node pool", func() { 186 | createClusterRequest := NewClusterCreateRequest(config) 187 | clusterServiceMock.EXPECT(). 188 | ClusterCreate( 189 | ctx, 190 | LocationRRN(config.Spec.ProjectID, Location(config.Spec.Region, config.Spec.Zone)), 191 | createClusterRequest). 192 | Return(&gkeapi.Operation{}, nil) 193 | 194 | clusterServiceMock.EXPECT(). 195 | ClusterList( 196 | ctx, 197 | LocationRRN(config.Spec.ProjectID, Location(config.Spec.Region, config.Spec.Zone))). 198 | Return(&gkeapi.ListClustersResponse{}, nil) 199 | 200 | err := Create(ctx, clusterServiceMock, config) 201 | Expect(err).ToNot(HaveOccurred()) 202 | 203 | createNodePoolRequest, err := newNodePoolCreateRequest(nodePoolConfig, config) 204 | Expect(err).ToNot(HaveOccurred()) 205 | clusterServiceMock.EXPECT(). 206 | NodePoolCreate( 207 | ctx, 208 | ClusterRRN(config.Spec.ProjectID, Location(config.Spec.Region, config.Spec.Zone), config.Spec.ClusterName), 209 | createNodePoolRequest). 210 | Return(&gkeapi.Operation{}, nil) 211 | 212 | status, err := CreateNodePool(ctx, clusterServiceMock, config, nodePoolConfig) 213 | Expect(err).ToNot(HaveOccurred()) 214 | Expect(status).To(Equal(Changed)) 215 | 216 | clusterServiceMock.EXPECT(). 217 | NodePoolDelete( 218 | ctx, 219 | NodePoolRRN(config.Spec.ProjectID, Location(config.Spec.Region, config.Spec.Zone), config.Spec.ClusterName, nodePoolName)). 220 | Return(&gkeapi.Operation{}, nil) 221 | 222 | status, err = RemoveNodePool(ctx, clusterServiceMock, config, nodePoolName) 223 | Expect(err).ToNot(HaveOccurred()) 224 | Expect(status).To(Equal(Changed)) 225 | }) 226 | }) 227 | -------------------------------------------------------------------------------- /pkg/gke/relative_resource_name.go: -------------------------------------------------------------------------------- 1 | package gke 2 | 3 | import ( 4 | "fmt" 5 | ) 6 | 7 | // Location returns the region or zone depending on which is not set to empty. 8 | // Cluster creation validation should ensure that only one of region or zone is set, not both. 9 | func Location(region, zone string) string { 10 | ret := region 11 | if zone != "" { 12 | ret = zone 13 | } 14 | return ret 15 | } 16 | 17 | // LocationRRN returns a Relative Resource Name representing a location. This 18 | // RRN can either represent a Region or a Zone. It can be used as the parent 19 | // attribute during cluster creation to create a zonal or regional cluster, or 20 | // be used to generate more specific RRNs like an RRN representing a cluster. 21 | // 22 | // https://cloud.google.com/apis/design/resource_names#relative_resource_name 23 | func LocationRRN(projectID, location string) string { 24 | return fmt.Sprintf("projects/%s/locations/%s", projectID, location) 25 | } 26 | 27 | // ClusterRRN returns an Relative Resource Name of a cluster in the specified 28 | // region or zone 29 | func ClusterRRN(projectID, location, clusterName string) string { 30 | return fmt.Sprintf("%s/clusters/%s", LocationRRN(projectID, location), clusterName) 31 | } 32 | 33 | // NodePoolRRN returns a Relative Resource Name of a node pool in a cluster in the 34 | // region or zone for the specified project 35 | func NodePoolRRN(projectID, location, clusterName, nodePool string) string { 36 | return fmt.Sprintf("%s/nodePools/%s", ClusterRRN(projectID, location, clusterName), nodePool) 37 | } 38 | 39 | // BootDiskRRN returns a Relative Resource Name of a disk key in the region or zone for the 40 | // specified project 41 | func BootDiskRRN(projectID, location, ringName, keyName string) string { 42 | return fmt.Sprintf("projects/%s/locations/%s/keyRings/%s/cryptoKeys/%s", projectID, location, ringName, keyName) 43 | } 44 | -------------------------------------------------------------------------------- /pkg/gke/services/gke.go: -------------------------------------------------------------------------------- 1 | package services 2 | 3 | import ( 4 | "context" 5 | 6 | "golang.org/x/oauth2" 7 | gkeapi "google.golang.org/api/container/v1" 8 | "google.golang.org/api/option" 9 | ) 10 | 11 | type GKEClusterService interface { 12 | ClusterCreate(ctx context.Context, parent string, createclusterrequest *gkeapi.CreateClusterRequest) (*gkeapi.Operation, error) 13 | ClusterList(ctx context.Context, parent string) (*gkeapi.ListClustersResponse, error) 14 | ClusterGet(ctx context.Context, name string) (*gkeapi.Cluster, error) 15 | ClusterUpdate(ctx context.Context, name string, updateclusterrequest *gkeapi.UpdateClusterRequest) (*gkeapi.Operation, error) 16 | ClusterDelete(ctx context.Context, name string) (*gkeapi.Operation, error) 17 | SetNetworkPolicy(ctx context.Context, name string, networkpolicyrequest *gkeapi.SetNetworkPolicyRequest) (*gkeapi.Operation, error) 18 | SetMaintenancePolicy(ctx context.Context, name string, maintenancepolicyrequest *gkeapi.SetMaintenancePolicyRequest) (*gkeapi.Operation, error) 19 | SetResourceLabels(ctx context.Context, name string, resourcelabelsrequest *gkeapi.SetLabelsRequest) (*gkeapi.Operation, error) 20 | NodePoolCreate(ctx context.Context, parent string, createnodepoolrequest *gkeapi.CreateNodePoolRequest) (*gkeapi.Operation, error) 21 | NodePoolList(ctx context.Context, parent string) (*gkeapi.ListNodePoolsResponse, error) 22 | NodePoolGet(ctx context.Context, name string) (*gkeapi.NodePool, error) 23 | NodePoolUpdate(ctx context.Context, name string, updatenodepoolrequest *gkeapi.UpdateNodePoolRequest) (*gkeapi.Operation, error) 24 | NodePoolDelete(ctx context.Context, name string) (*gkeapi.Operation, error) 25 | SetSize(ctx context.Context, name string, setnodepoolsizerequest *gkeapi.SetNodePoolSizeRequest) (*gkeapi.Operation, error) 26 | SetAutoscaling(ctx context.Context, name string, setnodepoolautoscalingrequest *gkeapi.SetNodePoolAutoscalingRequest) (*gkeapi.Operation, error) 27 | SetManagement(ctx context.Context, name string, setnodepoolmanagementrequest *gkeapi.SetNodePoolManagementRequest) (*gkeapi.Operation, error) 28 | } 29 | 30 | type gkeClusterService struct { 31 | svc gkeapi.Service 32 | } 33 | 34 | func NewGKEClusterService(ctx context.Context, ts oauth2.TokenSource) (GKEClusterService, error) { 35 | svc, err := gkeapi.NewService(ctx, option.WithHTTPClient(oauth2.NewClient(ctx, ts))) 36 | if err != nil { 37 | return nil, err 38 | } 39 | return &gkeClusterService{ 40 | svc: *svc, 41 | }, nil 42 | } 43 | 44 | func (g *gkeClusterService) ClusterCreate(ctx context.Context, parent string, createclusterrequest *gkeapi.CreateClusterRequest) (*gkeapi.Operation, error) { 45 | return g.svc.Projects.Locations.Clusters.Create(parent, createclusterrequest).Context(ctx).Do() 46 | } 47 | 48 | func (g *gkeClusterService) ClusterList(ctx context.Context, parent string) (*gkeapi.ListClustersResponse, error) { 49 | return g.svc.Projects.Locations.Clusters.List(parent).Context(ctx).Do() 50 | } 51 | 52 | func (g *gkeClusterService) ClusterGet(ctx context.Context, name string) (*gkeapi.Cluster, error) { 53 | return g.svc.Projects.Locations.Clusters.Get(name).Context(ctx).Do() 54 | } 55 | 56 | func (g *gkeClusterService) ClusterUpdate(ctx context.Context, name string, updateclusterrequest *gkeapi.UpdateClusterRequest) (*gkeapi.Operation, error) { 57 | return g.svc.Projects.Locations.Clusters.Update(name, updateclusterrequest).Context(ctx).Do() 58 | } 59 | 60 | func (g *gkeClusterService) ClusterDelete(ctx context.Context, name string) (*gkeapi.Operation, error) { 61 | return g.svc.Projects.Locations.Clusters.Delete(name).Context(ctx).Do() 62 | } 63 | 64 | func (g *gkeClusterService) SetNetworkPolicy(ctx context.Context, name string, networkpolicyrequest *gkeapi.SetNetworkPolicyRequest) (*gkeapi.Operation, error) { 65 | return g.svc.Projects.Locations.Clusters.SetNetworkPolicy(name, networkpolicyrequest).Context(ctx).Do() 66 | } 67 | 68 | func (g *gkeClusterService) SetMaintenancePolicy(ctx context.Context, name string, maintenancepolicyrequest *gkeapi.SetMaintenancePolicyRequest) (*gkeapi.Operation, error) { 69 | return g.svc.Projects.Locations.Clusters.SetMaintenancePolicy(name, maintenancepolicyrequest).Context(ctx).Do() 70 | } 71 | 72 | func (g *gkeClusterService) SetResourceLabels(ctx context.Context, name string, resourcelabelsrequest *gkeapi.SetLabelsRequest) (*gkeapi.Operation, error) { 73 | return g.svc.Projects.Locations.Clusters.SetResourceLabels(name, resourcelabelsrequest).Context(ctx).Do() 74 | } 75 | 76 | func (g *gkeClusterService) NodePoolCreate(ctx context.Context, parent string, createnodepoolrequest *gkeapi.CreateNodePoolRequest) (*gkeapi.Operation, error) { 77 | return g.svc.Projects.Locations.Clusters.NodePools.Create(parent, createnodepoolrequest).Context(ctx).Do() 78 | } 79 | 80 | func (g *gkeClusterService) NodePoolList(ctx context.Context, parent string) (*gkeapi.ListNodePoolsResponse, error) { 81 | return g.svc.Projects.Locations.Clusters.NodePools.List(parent).Context(ctx).Do() 82 | } 83 | 84 | func (g *gkeClusterService) NodePoolGet(ctx context.Context, name string) (*gkeapi.NodePool, error) { 85 | return g.svc.Projects.Locations.Clusters.NodePools.Get(name).Context(ctx).Do() 86 | } 87 | 88 | func (g *gkeClusterService) NodePoolUpdate(ctx context.Context, name string, updatenodepoolrequest *gkeapi.UpdateNodePoolRequest) (*gkeapi.Operation, error) { 89 | return g.svc.Projects.Locations.Clusters.NodePools.Update(name, updatenodepoolrequest).Context(ctx).Do() 90 | } 91 | 92 | func (g *gkeClusterService) NodePoolDelete(ctx context.Context, name string) (*gkeapi.Operation, error) { 93 | return g.svc.Projects.Locations.Clusters.NodePools.Delete(name).Context(ctx).Do() 94 | } 95 | 96 | func (g *gkeClusterService) SetSize(ctx context.Context, name string, setnodepoolsizerequest *gkeapi.SetNodePoolSizeRequest) (*gkeapi.Operation, error) { 97 | return g.svc.Projects.Locations.Clusters.NodePools.SetSize(name, setnodepoolsizerequest).Context(ctx).Do() 98 | } 99 | 100 | func (g *gkeClusterService) SetAutoscaling(ctx context.Context, name string, setnodepoolautoscalingrequest *gkeapi.SetNodePoolAutoscalingRequest) (*gkeapi.Operation, error) { 101 | return g.svc.Projects.Locations.Clusters.NodePools.SetAutoscaling(name, setnodepoolautoscalingrequest).Context(ctx).Do() 102 | } 103 | 104 | func (g *gkeClusterService) SetManagement(ctx context.Context, name string, setnodepoolmanagementrequest *gkeapi.SetNodePoolManagementRequest) (*gkeapi.Operation, error) { 105 | return g.svc.Projects.Locations.Clusters.NodePools.SetManagement(name, setnodepoolmanagementrequest).Context(ctx).Do() 106 | } 107 | -------------------------------------------------------------------------------- /pkg/gke/services/mock_services/doc.go: -------------------------------------------------------------------------------- 1 | package mock_services 2 | 3 | // Run go generate to regenerate this mock. 4 | // 5 | //go:generate ../../../../bin/mockgen -destination gke_mock.go -package mock_services -source ../gke.go GKEClusterServiceInerface,GKENodePoolServiceInerface 6 | -------------------------------------------------------------------------------- /pkg/gke/suite_test.go: -------------------------------------------------------------------------------- 1 | package gke 2 | 3 | import ( 4 | "testing" 5 | 6 | . "github.com/onsi/ginkgo/v2" 7 | . "github.com/onsi/gomega" 8 | "golang.org/x/net/context" 9 | ) 10 | 11 | var ( 12 | ctx context.Context 13 | ) 14 | 15 | func TestAPIs(t *testing.T) { 16 | RegisterFailHandler(Fail) 17 | RunSpecs(t, "GKE services Suite") 18 | } 19 | -------------------------------------------------------------------------------- /pkg/test/cleanup.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | "github.com/pkg/errors" 8 | corev1 "k8s.io/api/core/v1" 9 | apierrors "k8s.io/apimachinery/pkg/api/errors" 10 | kerrors "k8s.io/apimachinery/pkg/util/errors" 11 | "k8s.io/apimachinery/pkg/util/wait" 12 | runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" 13 | ) 14 | 15 | var ( 16 | cacheSyncBackoff = wait.Backoff{ 17 | Duration: 100 * time.Millisecond, 18 | Factor: 1.5, 19 | Steps: 8, 20 | Jitter: 0.4, 21 | } 22 | ) 23 | 24 | // CleanupAndWait deletes all the given objects and waits for the cache to be updated accordingly. 25 | func CleanupAndWait(ctx context.Context, cl runtimeclient.Client, objs ...runtimeclient.Object) error { 26 | if err := cleanup(ctx, cl, objs...); err != nil { 27 | return err 28 | } 29 | 30 | // Makes sure the cache is updated with the deleted object 31 | errs := []error{} 32 | for _, o := range objs { 33 | // Ignoring namespaces because in testenv the namespace cleaner is not running. 34 | if o.GetObjectKind().GroupVersionKind().GroupKind() == corev1.SchemeGroupVersion.WithKind("Namespace").GroupKind() { 35 | continue 36 | } 37 | 38 | oCopy := o.DeepCopyObject().(runtimeclient.Object) 39 | key := runtimeclient.ObjectKeyFromObject(o) 40 | err := wait.ExponentialBackoff( 41 | cacheSyncBackoff, 42 | func() (done bool, err error) { 43 | if err := cl.Get(ctx, key, oCopy); err != nil { 44 | if apierrors.IsNotFound(err) { 45 | return true, nil 46 | } 47 | if o.GetName() == "" { // resource is being deleted 48 | return true, nil 49 | } 50 | return false, err 51 | } 52 | return false, nil 53 | }) 54 | errs = append(errs, errors.Wrapf(err, "key %s, %s is not being deleted from the testenv client cache", o.GetObjectKind().GroupVersionKind().String(), key)) 55 | } 56 | return kerrors.NewAggregate(errs) 57 | } 58 | 59 | // cleanup deletes all the given objects. 60 | func cleanup(ctx context.Context, cl runtimeclient.Client, objs ...runtimeclient.Object) error { 61 | errs := []error{} 62 | for _, o := range objs { 63 | copyObj := o.DeepCopyObject().(runtimeclient.Object) 64 | 65 | if err := cl.Get(ctx, runtimeclient.ObjectKeyFromObject(o), copyObj); err != nil { 66 | if apierrors.IsNotFound(err) { 67 | continue 68 | } 69 | if o.GetName() == "" { // resource is being deleted 70 | continue 71 | } 72 | errs = append(errs, err) 73 | continue 74 | } 75 | 76 | // Remove finalizers from the object 77 | if copyObj.GetFinalizers() != nil { 78 | copyObj.SetFinalizers(nil) 79 | } 80 | 81 | err := cl.Update(ctx, copyObj) 82 | if apierrors.IsNotFound(err) { 83 | continue 84 | } 85 | errs = append(errs, err) 86 | 87 | err = cl.Delete(ctx, copyObj) 88 | if apierrors.IsNotFound(err) { 89 | continue 90 | } 91 | errs = append(errs, err) 92 | } 93 | return kerrors.NewAggregate(errs) 94 | } 95 | -------------------------------------------------------------------------------- /pkg/test/envtest.go: -------------------------------------------------------------------------------- 1 | package test 2 | 3 | import ( 4 | "errors" 5 | "path" 6 | goruntime "runtime" 7 | 8 | gkev1 "github.com/rancher/gke-operator/pkg/apis/gke.cattle.io/v1" 9 | apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" 10 | "k8s.io/apimachinery/pkg/runtime" 11 | utilruntime "k8s.io/apimachinery/pkg/util/runtime" 12 | clientgoscheme "k8s.io/client-go/kubernetes/scheme" 13 | "k8s.io/client-go/rest" 14 | runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" 15 | "sigs.k8s.io/controller-runtime/pkg/envtest" 16 | ) 17 | 18 | var ( 19 | scheme = runtime.NewScheme() 20 | ) 21 | 22 | func init() { 23 | utilruntime.Must(clientgoscheme.AddToScheme(scheme)) 24 | utilruntime.Must(gkev1.AddToScheme(scheme)) 25 | } 26 | 27 | func StartEnvTest(testEnv *envtest.Environment) (*rest.Config, runtimeclient.Client, error) { 28 | // Get the root of the current file to use in CRD paths. 29 | _, filename, _, _ := goruntime.Caller(0) //nolint:dogsled 30 | root := path.Join(path.Dir(filename), "..", "..", "..", "gke-operator") 31 | 32 | testEnv.CRDs = []*apiextensionsv1.CustomResourceDefinition{ 33 | // Add later if needed. 34 | } 35 | testEnv.CRDDirectoryPaths = []string{ 36 | path.Join(root, "charts", "gke-operator-crd", "templates"), 37 | } 38 | testEnv.ErrorIfCRDPathMissing = true 39 | 40 | cfg, err := testEnv.Start() 41 | if err != nil { 42 | return nil, nil, err 43 | } 44 | 45 | if cfg == nil { 46 | return nil, nil, errors.New("envtest.Environment.Start() returned nil config") 47 | } 48 | 49 | cl, err := runtimeclient.New(cfg, runtimeclient.Options{Scheme: scheme}) 50 | if err != nil { 51 | return nil, nil, err 52 | } 53 | 54 | return cfg, cl, nil 55 | } 56 | 57 | func StopEnvTest(testEnv *envtest.Environment) error { 58 | return testEnv.Stop() 59 | } 60 | -------------------------------------------------------------------------------- /pkg/utils/parse.go: -------------------------------------------------------------------------------- 1 | package utils 2 | 3 | // StringValue returns the value of the string pointer passed in or 4 | // "" if the pointer is nil. 5 | func StringValue(v *string) string { 6 | if v != nil { 7 | return *v 8 | } 9 | return "" 10 | } 11 | -------------------------------------------------------------------------------- /pkg/version/version.go: -------------------------------------------------------------------------------- 1 | package version 2 | 3 | var ( 4 | GitCommit string 5 | Version string 6 | ) 7 | -------------------------------------------------------------------------------- /scripts/build: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source $(dirname $0)/version 4 | 5 | cd $(dirname $0)/.. 6 | 7 | mkdir -p bin 8 | if [ "$(uname)" = "Linux" ]; then 9 | OTHER_LINKFLAGS="-extldflags -static -s" 10 | fi 11 | CGO_ENABLED=0 go build -ldflags "$OTHER_LINKFLAGS" -o bin/gke-operator 12 | -------------------------------------------------------------------------------- /scripts/ci: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | cd $(dirname $0) 6 | 7 | ./validate 8 | ./build 9 | ./package 10 | -------------------------------------------------------------------------------- /scripts/go_install.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -o errexit 4 | set -o nounset 5 | set -o pipefail 6 | 7 | if [ -z "${1}" ]; then 8 | echo "must provide module as first parameter" 9 | exit 1 10 | fi 11 | 12 | if [ -z "${2}" ]; then 13 | echo "must provide binary name as second parameter" 14 | exit 1 15 | fi 16 | 17 | if [ -z "${3}" ]; then 18 | echo "must provide version as third parameter" 19 | exit 1 20 | fi 21 | 22 | if [ -z "${GOBIN}" ]; then 23 | echo "GOBIN is not set. Must set GOBIN to install the bin in a specified directory." 24 | exit 1 25 | fi 26 | 27 | rm "${GOBIN}/${2}"* 2> /dev/null || true 28 | 29 | # install the golang module specified as the first argument 30 | go install -tags tools "${1}@${3}" 31 | mv "${GOBIN}/${2}" "${GOBIN}/${2}-${3}" 32 | ln -sf "${GOBIN}/${2}-${3}" "${GOBIN}/${2}" -------------------------------------------------------------------------------- /scripts/package: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | source $(dirname $0)/version 5 | 6 | cd $(dirname $0)/.. 7 | 8 | mkdir -p dist/artifacts 9 | cp bin/gke-operator dist/artifacts/gke-operator-linux${SUFFIX} 10 | for i in bin/gke-operator-*; do 11 | if [ -e "$i" ]; then 12 | cp $i dist/artifacts 13 | fi 14 | done 15 | 16 | ./scripts/package-helm -------------------------------------------------------------------------------- /scripts/package-helm: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | if ! hash helm 2>/dev/null; then 5 | exit 0 6 | fi 7 | 8 | cd $(dirname $0)/.. 9 | . ./scripts/version 10 | 11 | rm -rf build/charts 12 | mkdir -p build dist/artifacts 13 | cp -rf charts build/ 14 | 15 | sed -i \ 16 | -e 's/^version:.*/version: '${HELM_VERSION}'/' \ 17 | -e 's/appVersion:.*/appVersion: '${HELM_VERSION}'/' \ 18 | build/charts/gke-operator/Chart.yaml 19 | 20 | sed -i \ 21 | -e 's/tag:.*/tag: '${HELM_TAG}'/' \ 22 | build/charts/gke-operator/values.yaml 23 | 24 | sed -i \ 25 | -e 's/^version:.*/version: '${HELM_VERSION}'/' \ 26 | -e 's/appVersion:.*/appVersion: '${HELM_VERSION}'/' \ 27 | build/charts/gke-operator-crd/Chart.yaml 28 | 29 | helm package -d ./dist/artifacts ./build/charts/gke-operator 30 | helm package -d ./dist/artifacts ./build/charts/gke-operator-crd 31 | -------------------------------------------------------------------------------- /scripts/setup-kind-cluster.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | KUBE_VERSION="${KUBE_VERSION:-v1.32.2}" 6 | CLUSTER_NAME="${CLUSTER_NAME:-gke-operator-e2e}" 7 | 8 | if ! kind get clusters | grep "$CLUSTER_NAME"; then 9 | cat << EOF > kind.config 10 | kind: Cluster 11 | apiVersion: kind.x-k8s.io/v1alpha4 12 | nodes: 13 | - role: control-plane 14 | image: kindest/node:$KUBE_VERSION 15 | kubeadmConfigPatches: 16 | - | 17 | kind: InitConfiguration 18 | nodeRegistration: 19 | kubeletExtraArgs: 20 | node-labels: "ingress-ready=true" 21 | EOF 22 | kind create cluster --name $CLUSTER_NAME --config kind.config 23 | rm -rf kind.config 24 | fi 25 | 26 | kubectl cluster-info --context kind-$CLUSTER_NAME 27 | echo "Sleep to give times to node to populate with all info" 28 | kubectl wait --for=condition=Ready node/$CLUSTER_NAME-control-plane 29 | # Label the nodes with node-role.kubernetes.io/master as it appears that 30 | # label is no longer added on >=1.24.X clusters while it was set on <=1.23.X 31 | # https://github.com/kubernetes/enhancements/tree/master/keps/sig-cluster-lifecycle/kubeadm/2067-rename-master-label-taint 32 | # https://kubernetes.io/blog/2022/04/07/upcoming-changes-in-kubernetes-1-24/#api-removals-deprecations-and-other-changes-for-kubernetes-1-24 33 | # system-upgrade-controller 0.9.1 still uses it to schedule pods 34 | kubectl label nodes --all node-role.kubernetes.io/master= 35 | kubectl get nodes -o wide -------------------------------------------------------------------------------- /scripts/validate: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | cd $(dirname $0)/.. 6 | 7 | if ! command -v golangci-lint; then 8 | echo Skipping validation: no golangci-lint available 9 | exit 10 | fi 11 | 12 | echo 'Running: golangci-lint' 13 | golangci-lint run 14 | 15 | echo 'Running: go mod verify' 16 | go mod verify 17 | 18 | echo 'Running: go fmt' 19 | go fmt 20 | if [ -n "$(git status --porcelain --untracked-files=no)" ]; then 21 | echo 'go fmt produced differences' 22 | exit 1 23 | fi 24 | 25 | echo 'Running: go generate' 26 | go generate 27 | if [ -n "$(git status --porcelain --untracked-files=no)" ]; then 28 | echo 'go generate produced differences' 29 | exit 1 30 | fi 31 | 32 | echo 'Running: go mod tidy' 33 | go mod tidy 34 | if [ -n "$(git status --porcelain --untracked-files=no)" ]; then 35 | echo 'go mod tidy produced differences' 36 | exit 1 37 | fi 38 | -------------------------------------------------------------------------------- /scripts/version: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ -n "$(git status --porcelain --untracked-files=no)" ]; then 4 | DIRTY="-dirty" 5 | fi 6 | 7 | COMMIT=$(git rev-parse --short HEAD) 8 | GIT_TAG=${DRONE_TAG:-$(git tag -l --contains HEAD | head -n 1)} 9 | 10 | if [[ -z "$DIRTY" && -n "$GIT_TAG" ]]; then 11 | VERSION=$GIT_TAG 12 | else 13 | VERSION="${COMMIT}${DIRTY}" 14 | fi 15 | 16 | if [ -z "$ARCH" ]; then 17 | ARCH=$(go env GOHOSTARCH) 18 | fi 19 | 20 | SUFFIX="-${ARCH}" 21 | 22 | HELM_TAG=${TAG:-${VERSION}} 23 | HELM_VERSION=${HELM_TAG/v/} 24 | TAG=${TAG:-${VERSION}${SUFFIX}} 25 | REPO=${REPO:-rancher} 26 | 27 | if echo $TAG | grep -q dirty; then 28 | TAG=dev 29 | HELM_TAG=dev 30 | HELM_VERSION=0.0.0-dev 31 | fi 32 | -------------------------------------------------------------------------------- /test/e2e/Dockerfile.e2e: -------------------------------------------------------------------------------- 1 | FROM registry.suse.com/bci/golang:1.23 AS build 2 | RUN zypper -n install -l openssl-devel 3 | WORKDIR /src 4 | COPY go.mod go.sum /src/ 5 | RUN go mod download 6 | COPY main.go /src/ 7 | COPY controller /src/controller 8 | COPY pkg /src/pkg 9 | FROM build AS build-operator 10 | ARG TAG=v0.0.0 11 | ARG COMMIT="" 12 | ARG COMMITDATE="" 13 | ENV CGO_ENABLED=0 14 | RUN go build \ 15 | -ldflags "-w -s \ 16 | -X github.com/rancher/gke-operator/pkg/version.Version=$TAG \ 17 | -X github.com/rancher/gke-operator/pkg/version.Commit=$COMMIT \ 18 | -X github.com/rancher/gke-operator/pkg/version.CommitDate=$COMMITDATE" \ 19 | -o /usr/sbin/gke-operator . 20 | 21 | FROM scratch AS gke-operator 22 | COPY --from=build /var/lib/ca-certificates/ca-bundle.pem /etc/ssl/certs/ca-certificates.crt 23 | COPY --from=build-operator /usr/sbin/gke-operator /usr/sbin/gke-operator 24 | ENTRYPOINT ["/usr/sbin/gke-operator"] 25 | -------------------------------------------------------------------------------- /test/e2e/basic_cluster_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright © 2024 SUSE LLC 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package e2e 18 | 19 | import ( 20 | "fmt" 21 | 22 | . "github.com/onsi/ginkgo/v2" 23 | . "github.com/onsi/gomega" 24 | gkev1 "github.com/rancher/gke-operator/pkg/apis/gke.cattle.io/v1" 25 | managementv3 "github.com/rancher/rancher/pkg/apis/management.cattle.io/v3" 26 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 27 | 28 | runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" 29 | ) 30 | 31 | var _ = Describe("BasicCluster", func() { 32 | var ( 33 | gkeConfig *gkev1.GKEClusterConfig 34 | cluster *managementv3.Cluster 35 | ) 36 | 37 | BeforeEach(func() { 38 | var ok bool 39 | gkeConfig, ok = clusterTemplates[basicClusterTemplateName] 40 | Expect(ok).To(BeTrue()) 41 | Expect(gkeConfig).NotTo(BeNil()) 42 | 43 | cluster = &managementv3.Cluster{ 44 | ObjectMeta: metav1.ObjectMeta{ 45 | Name: gkeConfig.Name, 46 | }, 47 | Spec: managementv3.ClusterSpec{ 48 | GKEConfig: &gkeConfig.Spec, 49 | }, 50 | } 51 | 52 | }) 53 | 54 | It("Succesfully creates a cluster", func() { 55 | By("Creating a cluster") 56 | Expect(cl.Create(ctx, cluster)).Should(Succeed()) 57 | 58 | By("Waiting for cluster to be ready") 59 | Eventually(func() error { 60 | currentCluster := &gkev1.GKEClusterConfig{} 61 | 62 | if err := cl.Get(ctx, runtimeclient.ObjectKey{ 63 | Name: cluster.Name, 64 | Namespace: gkeClusterConfigNamespace, 65 | }, currentCluster); err != nil { 66 | return err 67 | } 68 | 69 | if currentCluster.Status.Phase == "active" { 70 | return nil 71 | } 72 | 73 | return fmt.Errorf("cluster is not ready yet. Current phase: %s", currentCluster.Status.Phase) 74 | }, waitLong, pollInterval).ShouldNot(HaveOccurred()) 75 | }) 76 | 77 | It("Successfully adds and removes a node pool", func() { 78 | initialNodePools := gkeConfig.DeepCopy().Spec.NodePools // save to restore later and test deletion 79 | 80 | Expect(cl.Get(ctx, runtimeclient.ObjectKey{Name: cluster.Name}, cluster)).Should(Succeed()) 81 | patch := runtimeclient.MergeFrom(cluster.DeepCopy()) 82 | 83 | nodePoolName := "gke-e2e-additional-node-pool" 84 | initialNodeCount := int64(1) 85 | maxPodsConstraint := int64(110) 86 | nodePool := gkev1.GKENodePoolConfig{ 87 | Name: &nodePoolName, 88 | InitialNodeCount: &initialNodeCount, 89 | Version: gkeConfig.Spec.KubernetesVersion, 90 | MaxPodsConstraint: &maxPodsConstraint, 91 | Config: &gkev1.GKENodeConfig{}, 92 | Autoscaling: &gkev1.GKENodePoolAutoscaling{ 93 | Enabled: true, 94 | MinNodeCount: 1, 95 | MaxNodeCount: 2, 96 | }, 97 | Management: &gkev1.GKENodePoolManagement{ 98 | AutoRepair: true, 99 | AutoUpgrade: true, 100 | }, 101 | } 102 | 103 | cluster.Spec.GKEConfig.NodePools = append(cluster.Spec.GKEConfig.NodePools, nodePool) 104 | 105 | Expect(cl.Patch(ctx, cluster, patch)).Should(Succeed()) 106 | 107 | By("Waiting for cluster to start adding node pool") 108 | Eventually(func() error { 109 | currentCluster := &gkev1.GKEClusterConfig{} 110 | 111 | if err := cl.Get(ctx, runtimeclient.ObjectKey{ 112 | Name: cluster.Name, 113 | Namespace: gkeClusterConfigNamespace, 114 | }, currentCluster); err != nil { 115 | return err 116 | } 117 | 118 | if currentCluster.Status.Phase == "updating" && len(currentCluster.Spec.NodePools) == 2 { 119 | return nil 120 | } 121 | 122 | return fmt.Errorf("cluster didn't get new node pool. Current phase: %s, node pool count %d", currentCluster.Status.Phase, len(currentCluster.Spec.NodePools)) 123 | }, waitLong, pollInterval).ShouldNot(HaveOccurred()) 124 | 125 | By("Waiting for cluster to finish adding node pool") 126 | Eventually(func() error { 127 | currentCluster := &gkev1.GKEClusterConfig{} 128 | 129 | if err := cl.Get(ctx, runtimeclient.ObjectKey{ 130 | Name: cluster.Name, 131 | Namespace: gkeClusterConfigNamespace, 132 | }, currentCluster); err != nil { 133 | return err 134 | } 135 | 136 | if currentCluster.Status.Phase == "active" && len(currentCluster.Spec.NodePools) == 2 { 137 | return nil 138 | } 139 | 140 | return fmt.Errorf("cluster didn't finish adding node pool. Current phase: %s, node pool count %d", currentCluster.Status.Phase, len(currentCluster.Spec.NodePools)) 141 | }, waitLong, pollInterval).ShouldNot(HaveOccurred()) 142 | 143 | By("Restoring initial node pools") 144 | 145 | Expect(cl.Get(ctx, runtimeclient.ObjectKey{Name: cluster.Name}, cluster)).Should(Succeed()) 146 | patch = runtimeclient.MergeFrom(cluster.DeepCopy()) 147 | 148 | cluster.Spec.GKEConfig.NodePools = initialNodePools 149 | 150 | Expect(cl.Patch(ctx, cluster, patch)).Should(Succeed()) 151 | 152 | By("Waiting for cluster to start removing node pool") 153 | Eventually(func() error { 154 | currentCluster := &gkev1.GKEClusterConfig{} 155 | 156 | if err := cl.Get(ctx, runtimeclient.ObjectKey{ 157 | Name: cluster.Name, 158 | Namespace: gkeClusterConfigNamespace, 159 | }, currentCluster); err != nil { 160 | return err 161 | } 162 | 163 | if currentCluster.Status.Phase == "updating" && len(currentCluster.Spec.NodePools) == 1 { 164 | return nil 165 | } 166 | 167 | return fmt.Errorf("cluster didn't start removing node pool. Current phase: %s, node pool count %d", currentCluster.Status.Phase, len(currentCluster.Spec.NodePools)) 168 | }, waitLong, pollInterval).ShouldNot(HaveOccurred()) 169 | 170 | By("Waiting for cluster to finish removing node pool") 171 | Eventually(func() error { 172 | currentCluster := &gkev1.GKEClusterConfig{} 173 | 174 | if err := cl.Get(ctx, runtimeclient.ObjectKey{ 175 | Name: cluster.Name, 176 | Namespace: gkeClusterConfigNamespace, 177 | }, currentCluster); err != nil { 178 | return err 179 | } 180 | 181 | if currentCluster.Status.Phase == "active" && len(currentCluster.Spec.NodePools) == 1 { 182 | return nil 183 | } 184 | 185 | return fmt.Errorf("cluster didn't finish removing node pool. Current phase: %s, node pool count %d", currentCluster.Status.Phase, len(currentCluster.Spec.NodePools)) 186 | }, waitLong, pollInterval).ShouldNot(HaveOccurred()) 187 | 188 | By("Done waiting for cluster to finish removing node pool") 189 | }) 190 | 191 | }) 192 | -------------------------------------------------------------------------------- /test/e2e/config/config.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright © 2024 SUSE LLC 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package config 18 | 19 | import ( 20 | "errors" 21 | "fmt" 22 | "os" 23 | 24 | "github.com/drone/envsubst/v2" 25 | "sigs.k8s.io/yaml" 26 | ) 27 | 28 | type E2EConfig struct { 29 | OperatorChart string `yaml:"operatorChart"` 30 | CRDChart string `yaml:"crdChart"` 31 | ExternalIP string `yaml:"externalIP"` 32 | MagicDNS string `yaml:"magicDNS"` 33 | BridgeIP string `yaml:"bridgeIP"` 34 | ArtifactsDir string `yaml:"artifactsDir"` 35 | 36 | CertManagerVersion string `yaml:"certManagerVersion"` 37 | CertManagerChartURL string `yaml:"certManagerChartURL"` 38 | 39 | RancherVersion string `yaml:"rancherVersion"` 40 | RancherChartURL string `yaml:"rancherChartURL"` 41 | 42 | GkeCredentials string `json:"gkeCredentials"` 43 | GkeProjectID string `yaml:"gkeProjectID"` 44 | } 45 | 46 | // ReadE2EConfig read config from yaml and substitute variables using envsubst. 47 | // All variables can be overridden by environmental variables. 48 | func ReadE2EConfig(configPath string) (*E2EConfig, error) { //nolint:gocyclo 49 | config := &E2EConfig{} 50 | 51 | configData, err := os.ReadFile(configPath) 52 | if err != nil { 53 | return nil, fmt.Errorf("failed to read config file: %w", err) 54 | } 55 | 56 | if configData == nil { 57 | return nil, errors.New("config file can't be empty") 58 | } 59 | 60 | if err := yaml.Unmarshal(configData, config); err != nil { 61 | return nil, fmt.Errorf("failed to unmarhal config file: %s", err) 62 | } 63 | 64 | if operatorChart := os.Getenv("OPERATOR_CHART"); operatorChart != "" { 65 | config.OperatorChart = operatorChart 66 | } 67 | 68 | if config.OperatorChart == "" { 69 | return nil, errors.New("no OPERATOR_CHART provided, an operator helm chart is required to run e2e tests") 70 | } 71 | 72 | if crdChart := os.Getenv("CRD_CHART"); crdChart != "" { 73 | config.CRDChart = crdChart 74 | } 75 | 76 | if config.CRDChart == "" { 77 | return nil, errors.New("no CRD_CHART provided, a crd helm chart is required to run e2e tests") 78 | } 79 | 80 | if externalIP := os.Getenv("EXTERNAL_IP"); externalIP != "" { 81 | config.ExternalIP = externalIP 82 | } 83 | 84 | if config.ExternalIP == "" { 85 | return nil, errors.New("no EXTERNAL_IP provided, a known (reachable) node external ip it is required to run e2e tests") 86 | } 87 | 88 | if magicDNS := os.Getenv("MAGIC_DNS"); magicDNS != "" { 89 | config.MagicDNS = magicDNS 90 | } 91 | 92 | if bridgeIP := os.Getenv("BRIDGE_IP"); bridgeIP != "" { 93 | config.BridgeIP = bridgeIP 94 | } 95 | 96 | if artifactsDir := os.Getenv("ARTIFACTS_DIR"); artifactsDir != "" { 97 | config.ArtifactsDir = artifactsDir 98 | } 99 | 100 | if gkeCredentials := os.Getenv("GKE_CREDENTIALS"); gkeCredentials != "" { 101 | config.GkeCredentials = gkeCredentials 102 | } 103 | 104 | if gkeProjectID := os.Getenv("GKE_PROJECT_ID"); gkeProjectID != "" { 105 | config.GkeProjectID = gkeProjectID 106 | } 107 | 108 | if certManagerVersion := os.Getenv("CERT_MANAGER_VERSION"); certManagerVersion != "" { 109 | config.CertManagerVersion = certManagerVersion 110 | } 111 | 112 | if certManagerURL := os.Getenv("CERT_MANAGER_CHART_URL"); certManagerURL != "" { 113 | config.CertManagerChartURL = certManagerURL 114 | } 115 | 116 | if rancherVersion := os.Getenv("RANCHER_VERSION"); rancherVersion != "" { 117 | config.RancherVersion = rancherVersion 118 | } 119 | 120 | if rancherURL := os.Getenv("RANCHER_CHART_URL"); rancherURL != "" { 121 | config.RancherChartURL = rancherURL 122 | } 123 | 124 | if err := substituteVersions(config); err != nil { 125 | return nil, err 126 | } 127 | 128 | return config, validateGKECredentials(config) 129 | } 130 | 131 | func substituteVersions(config *E2EConfig) error { 132 | certManagerURL, err := envsubst.Eval(config.CertManagerChartURL, func(_ string) string { 133 | return config.CertManagerVersion 134 | }) 135 | if err != nil { 136 | return fmt.Errorf("failed to substitute cert manager chart url: %w", err) 137 | } 138 | config.CertManagerChartURL = certManagerURL 139 | 140 | rancherURL, err := envsubst.Eval(config.RancherChartURL, func(_ string) string { 141 | return config.RancherVersion 142 | }) 143 | if err != nil { 144 | return fmt.Errorf("failed to substitute rancher chart url: %w", err) 145 | } 146 | config.RancherChartURL = rancherURL 147 | 148 | return nil 149 | } 150 | 151 | func validateGKECredentials(config *E2EConfig) error { 152 | if config.GkeCredentials == "" { 153 | return errors.New("no GkeCredentials provided, GKE credentials is required to run e2e tests") 154 | } 155 | 156 | return nil 157 | } 158 | -------------------------------------------------------------------------------- /test/e2e/config/config.yaml: -------------------------------------------------------------------------------- 1 | # E2E Tests config 2 | 3 | magicDNS: sslip.io 4 | bridgeIP: 172.17.0.1 5 | operatorReplicas: 1 6 | artifactsDir: ../../_artifacts 7 | 8 | certManagerVersion: v1.11.1 9 | certManagerChartURL: https://charts.jetstack.io/charts/cert-manager-${CERT_MANAGER_VERSION}.tgz 10 | 11 | rancherVersion: v2.9-head 12 | rancherChartURL: https://releases.rancher.com/server-charts/latest/ 13 | -------------------------------------------------------------------------------- /test/e2e/deploy_operator_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright © 2024 SUSE LLC 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package e2e 18 | 19 | import ( 20 | . "github.com/onsi/ginkgo/v2" 21 | ) 22 | 23 | var _ = Describe("Do nothing, used to deploy rancher and operator", Label("do-nothing"), func() { 24 | It("Does nothing", func() {}) 25 | }) 26 | -------------------------------------------------------------------------------- /test/e2e/suite_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright © 2024 SUSE LLC 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package e2e 18 | 19 | import ( 20 | "bytes" 21 | "context" 22 | "embed" 23 | "fmt" 24 | "os" 25 | "path" 26 | "path/filepath" 27 | "strings" 28 | "testing" 29 | "time" 30 | 31 | "k8s.io/apiserver/pkg/storage/names" 32 | 33 | . "github.com/onsi/ginkgo/v2" 34 | . "github.com/onsi/gomega" 35 | kubectl "github.com/rancher-sandbox/ele-testhelpers/kubectl" 36 | gkev1 "github.com/rancher/gke-operator/pkg/apis/gke.cattle.io/v1" 37 | e2eConfig "github.com/rancher/gke-operator/test/e2e/config" 38 | managementv3 "github.com/rancher/rancher/pkg/apis/management.cattle.io/v3" 39 | appsv1 "k8s.io/api/apps/v1" 40 | corev1 "k8s.io/api/core/v1" 41 | apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" 42 | apierrors "k8s.io/apimachinery/pkg/api/errors" 43 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 44 | utilruntime "k8s.io/apimachinery/pkg/util/runtime" 45 | clientgoscheme "k8s.io/client-go/kubernetes/scheme" 46 | runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" 47 | runtimeconfig "sigs.k8s.io/controller-runtime/pkg/client/config" 48 | "sigs.k8s.io/yaml" 49 | ) 50 | 51 | func init() { 52 | utilruntime.Must(clientgoscheme.AddToScheme(clientgoscheme.Scheme)) 53 | utilruntime.Must(managementv3.AddToScheme(clientgoscheme.Scheme)) 54 | utilruntime.Must(apiextensionsv1.AddToScheme(clientgoscheme.Scheme)) 55 | utilruntime.Must(gkev1.AddToScheme(clientgoscheme.Scheme)) 56 | } 57 | 58 | const ( 59 | operatorDeploymentName = "gke-config-operator" 60 | operatorReleaseName = "rancher-gke-operator" 61 | operatorCrdReleaseName = "rancher-gke-operator-crd" 62 | certManagerNamespace = "cert-manager" 63 | certManagerName = "cert-manager" 64 | certManagerCAInjectorName = "cert-manager-cainjector" 65 | gkeCredentialsSecretName = "gke-credentials" 66 | cattleSystemNamespace = "cattle-system" 67 | rancherName = "rancher" 68 | gkeClusterConfigNamespace = "cattle-global-data" 69 | ) 70 | 71 | // Test configuration 72 | var ( 73 | e2eCfg *e2eConfig.E2EConfig 74 | cl runtimeclient.Client 75 | ctx = context.Background() 76 | crdNames = []string{ 77 | "gkeclusterconfigs.gke.cattle.io", 78 | } 79 | 80 | pollInterval = 10 * time.Second 81 | waitLong = 15 * time.Minute 82 | ) 83 | 84 | // Cluster Templates 85 | var ( 86 | //go:embed templates/* 87 | templates embed.FS 88 | 89 | clusterTemplates = map[string]*gkev1.GKEClusterConfig{} 90 | basicClusterTemplateName = "basic-cluster" 91 | ) 92 | 93 | func TestE2e(t *testing.T) { 94 | RegisterFailHandler(Fail) 95 | RunSpecs(t, "gke-operator e2e test Suite") 96 | } 97 | 98 | var _ = BeforeSuite(func() { 99 | configPath := os.Getenv("CONFIG_PATH") 100 | if configPath == "" { 101 | Fail("config path can't be empty") 102 | } 103 | 104 | var err error 105 | e2eCfg, err = e2eConfig.ReadE2EConfig(configPath) 106 | Expect(err).ToNot(HaveOccurred()) 107 | 108 | cfg, err := runtimeconfig.GetConfig() 109 | Expect(err).ToNot(HaveOccurred()) 110 | 111 | cl, err = runtimeclient.New(cfg, runtimeclient.Options{}) 112 | Expect(err).ToNot(HaveOccurred()) 113 | 114 | By("Deploying rancher and cert-manager", func() { 115 | By("Installing cert-manager", func() { 116 | if isDeploymentReady(certManagerNamespace, certManagerName) { 117 | By("already installed") 118 | } else { 119 | Expect(kubectl.RunHelmBinaryWithCustomErr( 120 | "-n", 121 | certManagerNamespace, 122 | "install", 123 | "--set", 124 | "installCRDs=true", 125 | "--create-namespace", 126 | certManagerNamespace, 127 | e2eCfg.CertManagerChartURL, 128 | )).To(Succeed()) 129 | Eventually(func() bool { 130 | return isDeploymentReady(certManagerNamespace, certManagerName) 131 | }, 5*time.Minute, 2*time.Second).Should(BeTrue()) 132 | Eventually(func() bool { 133 | return isDeploymentReady(certManagerNamespace, certManagerCAInjectorName) 134 | }, 5*time.Minute, 2*time.Second).Should(BeTrue()) 135 | } 136 | }) 137 | 138 | By("Add rancher helm chart repository", func() { 139 | Expect(kubectl.RunHelmBinaryWithCustomErr( 140 | "repo", 141 | "add", 142 | "--force-update", 143 | "rancher-latest", 144 | fmt.Sprintf(e2eCfg.RancherChartURL), 145 | )).To(Succeed()) 146 | }) 147 | 148 | By("Update helm repositories", func() { 149 | Expect(kubectl.RunHelmBinaryWithCustomErr( 150 | "repo", 151 | "update", 152 | )).To(Succeed()) 153 | }) 154 | 155 | By("Installing rancher", func() { 156 | if isDeploymentReady(cattleSystemNamespace, rancherName) { 157 | By("already installed") 158 | } else { 159 | Expect(kubectl.RunHelmBinaryWithCustomErr( 160 | "install", 161 | "-n", 162 | cattleSystemNamespace, 163 | "--set", 164 | "bootstrapPassword=admin", 165 | "--set", 166 | "replicas=1", 167 | "--set", 168 | "extraEnv[0].name=CATTLE_SKIP_HOSTED_CLUSTER_CHART_INSTALLATION", 169 | "--set-string", 170 | "extraEnv[0].value=true", 171 | "--set", fmt.Sprintf("hostname=%s.%s", e2eCfg.ExternalIP, e2eCfg.MagicDNS), 172 | "--create-namespace", 173 | "--devel", 174 | "--set", fmt.Sprintf("rancherImageTag=%s", e2eCfg.RancherVersion), 175 | rancherName, 176 | "rancher-latest/rancher", 177 | )).To(Succeed()) 178 | Eventually(func() bool { 179 | return isDeploymentReady(cattleSystemNamespace, rancherName) 180 | }, 7*time.Minute, 2*time.Second).Should(BeTrue()) 181 | } 182 | }) 183 | }) 184 | 185 | By("Deploying gke operator CRD chart", func() { 186 | if isDeploymentReady(cattleSystemNamespace, operatorCrdReleaseName) { 187 | By("already installed") 188 | } else { 189 | Expect(kubectl.RunHelmBinaryWithCustomErr( 190 | "-n", 191 | cattleSystemNamespace, 192 | "install", 193 | "--create-namespace", 194 | "--set", "debug=true", 195 | operatorCrdReleaseName, 196 | e2eCfg.CRDChart, 197 | )).To(Succeed()) 198 | 199 | By("Waiting for CRDs to be created") 200 | Eventually(func() bool { 201 | for _, crdName := range crdNames { 202 | crd := &apiextensionsv1.CustomResourceDefinition{} 203 | if err := cl.Get(ctx, 204 | runtimeclient.ObjectKey{ 205 | Name: crdName, 206 | }, 207 | crd, 208 | ); err != nil { 209 | return false 210 | } 211 | } 212 | return true 213 | }, 5*time.Minute, 2*time.Second).Should(BeTrue()) 214 | } 215 | }) 216 | 217 | By("Deploying gke operator chart", func() { 218 | if isDeploymentReady(cattleSystemNamespace, operatorReleaseName) { 219 | By("already installed") 220 | } else { 221 | Expect(kubectl.RunHelmBinaryWithCustomErr( 222 | "-n", 223 | cattleSystemNamespace, 224 | "install", 225 | "--create-namespace", 226 | "--set", "debug=true", 227 | operatorReleaseName, 228 | e2eCfg.OperatorChart, 229 | )).To(Succeed()) 230 | 231 | By("Waiting for gke operator deployment to be available") 232 | Eventually(func() bool { 233 | return isDeploymentReady(cattleSystemNamespace, operatorDeploymentName) 234 | }, 5*time.Minute, 2*time.Second).Should(BeTrue()) 235 | } 236 | // As we are not bootstrapping rancher in the tests (going to the first login page, setting new password and rancher-url) 237 | // We need to manually set this value, which is the same value you would get from doing the bootstrap 238 | setting := &managementv3.Setting{} 239 | Expect(cl.Get(ctx, 240 | runtimeclient.ObjectKey{ 241 | Name: "server-url", 242 | }, 243 | setting, 244 | )).To(Succeed()) 245 | 246 | setting.Source = "env" 247 | setting.Value = fmt.Sprintf("https://%s.%s", e2eCfg.ExternalIP, e2eCfg.MagicDNS) 248 | 249 | Expect(cl.Update(ctx, setting)).To(Succeed()) 250 | 251 | }) 252 | 253 | By("Creating gke credentials secret", func() { 254 | secret := &corev1.Secret{ 255 | ObjectMeta: metav1.ObjectMeta{ 256 | Name: gkeCredentialsSecretName, 257 | Namespace: "default", 258 | }, 259 | Data: map[string][]byte{ 260 | "googlecredentialConfig-authEncodedJson": []byte(e2eCfg.GkeCredentials), 261 | }, 262 | } 263 | 264 | err := cl.Create(ctx, secret) 265 | if err != nil { 266 | fmt.Println(err) 267 | Expect(apierrors.IsAlreadyExists(err)).To(BeTrue()) 268 | } 269 | }) 270 | 271 | By("Reading cluster templates", func() { 272 | assets, err := templates.ReadDir("templates") 273 | Expect(err).ToNot(HaveOccurred()) 274 | 275 | for _, asset := range assets { 276 | b, err := templates.ReadFile(path.Join("templates", asset.Name())) 277 | Expect(err).ToNot(HaveOccurred()) 278 | 279 | // Replace the placeholder in the file content with the actual value 280 | content := strings.Replace(string(b), "${GKE_PROJECT_ID}", e2eCfg.GkeProjectID, -1) 281 | gkeCluster := &gkev1.GKEClusterConfig{} 282 | Expect(yaml.Unmarshal([]byte(content), gkeCluster)).To(Succeed()) 283 | 284 | name := strings.TrimSuffix(asset.Name(), ".yaml") 285 | generatedName := names.SimpleNameGenerator.GenerateName(name + "-") 286 | gkeCluster.Name = generatedName 287 | gkeCluster.Spec.ClusterName = generatedName 288 | 289 | clusterTemplates[name] = gkeCluster 290 | } 291 | }) 292 | }) 293 | 294 | var _ = AfterSuite(func() { 295 | By("Creating artifact directory") 296 | 297 | if _, err := os.Stat(e2eCfg.ArtifactsDir); os.IsNotExist(err) { 298 | Expect(os.Mkdir(e2eCfg.ArtifactsDir, os.ModePerm)).To(Succeed()) 299 | } 300 | 301 | By("Getting gke operator logs") 302 | 303 | podList := &corev1.PodList{} 304 | Expect(cl.List(ctx, podList, runtimeclient.MatchingLabels{ 305 | "ke.cattle.io/operator": "gke", 306 | }, runtimeclient.InNamespace(cattleSystemNamespace), 307 | )).To(Succeed()) 308 | 309 | for _, pod := range podList.Items { 310 | for _, container := range pod.Spec.Containers { 311 | output, err := kubectl.Run("logs", pod.Name, "-c", container.Name, "-n", pod.Namespace) 312 | Expect(err).ToNot(HaveOccurred()) 313 | Expect(os.WriteFile(filepath.Join(e2eCfg.ArtifactsDir, pod.Name+"-"+container.Name+".log"), redactSensitiveData([]byte(output)), 0644)).To(Succeed()) 314 | } 315 | } 316 | 317 | By("Getting GKE Clusters") 318 | 319 | gkeClusterList := &gkev1.GKEClusterConfigList{} 320 | Expect(cl.List(ctx, gkeClusterList, &runtimeclient.ListOptions{})).To(Succeed()) 321 | 322 | for _, gkeCluster := range gkeClusterList.Items { 323 | output, err := yaml.Marshal(gkeCluster) 324 | Expect(err).ToNot(HaveOccurred()) 325 | Expect(os.WriteFile(filepath.Join(e2eCfg.ArtifactsDir, "gke-cluster-config-"+gkeCluster.Name+".yaml"), redactSensitiveData([]byte(output)), 0644)).To(Succeed()) 326 | } 327 | 328 | By("Getting Rancher Clusters") 329 | 330 | rancherClusterList := &managementv3.ClusterList{} 331 | Expect(cl.List(ctx, rancherClusterList, &runtimeclient.ListOptions{})).To(Succeed()) 332 | 333 | for _, rancherCluster := range rancherClusterList.Items { 334 | output, err := yaml.Marshal(rancherCluster) 335 | Expect(err).ToNot(HaveOccurred()) 336 | Expect(os.WriteFile(filepath.Join(e2eCfg.ArtifactsDir, "rancher-cluster-"+rancherCluster.Name+".yaml"), redactSensitiveData([]byte(output)), 0644)).To(Succeed()) 337 | } 338 | 339 | By("Cleaning up Rancher Clusters") 340 | 341 | for _, rancherCluster := range rancherClusterList.Items { 342 | Expect(cl.Delete(ctx, &rancherCluster)).To(Succeed()) 343 | Eventually(func() error { 344 | return cl.Get(ctx, runtimeclient.ObjectKey{ 345 | Name: rancherCluster.Name, 346 | Namespace: rancherCluster.Namespace, 347 | }, &gkev1.GKEClusterConfig{}) 348 | }, waitLong, pollInterval).ShouldNot(Succeed()) 349 | } 350 | }) 351 | 352 | func isDeploymentReady(namespace, name string) bool { 353 | deployment := &appsv1.Deployment{} 354 | if err := cl.Get(ctx, 355 | runtimeclient.ObjectKey{ 356 | Namespace: namespace, 357 | Name: name, 358 | }, 359 | deployment, 360 | ); err != nil { 361 | return false 362 | } 363 | 364 | if deployment.Status.AvailableReplicas == *deployment.Spec.Replicas { 365 | return true 366 | } 367 | 368 | return false 369 | } 370 | 371 | func redactSensitiveData(input []byte) []byte { 372 | output := bytes.Replace(input, []byte(e2eCfg.GkeCredentials), []byte("***"), -1) 373 | return output 374 | } 375 | -------------------------------------------------------------------------------- /test/e2e/templates/basic-cluster.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: gke.cattle.io/v1 2 | kind: GKEClusterConfig 3 | metadata: 4 | name: basic-cluster 5 | spec: 6 | clusterName: "basic-cluster" 7 | description: "gke e2e basic cluster" 8 | labels: {} 9 | region: "us-west1" 10 | projectID: "${GKE_PROJECT_ID}" 11 | kubernetesVersion: "1.28.15-gke.1480000" 12 | loggingService: "" 13 | monitoringService: "" 14 | enableKubernetesAlpha: false 15 | clusterIpv4Cidr: "10.42.0.0/16" 16 | ipAllocationPolicy: 17 | useIpAliases: true 18 | nodePools: 19 | - name: gke-e2e-basic-cluster-node-pool 20 | autoscaling: 21 | enabled: false 22 | config: 23 | labels: {} 24 | initialNodeCount: 1 25 | maxPodsConstraint: 110 26 | version: "1.28.15-gke.1480000" 27 | management: 28 | autoRepair: true 29 | autoUpgrade: true 30 | clusterAddons: 31 | httpLoadBalancing: true 32 | networkPolicyConfig: false 33 | horizontalPodAutoscaling: true 34 | networkPolicyEnabled: false 35 | network: default 36 | subnetwork: default 37 | privateClusterConfig: 38 | enablePrivateEndpoint: false 39 | enablePrivateNodes: false 40 | masterAuthorizedNetworks: 41 | enabled: false 42 | locations: ["us-west1-c"] 43 | maintenanceWindow: "" 44 | googleCredentialSecret: default:gke-credentials 45 | --------------------------------------------------------------------------------