├── .copyrightignore ├── .github └── workflows │ ├── dco.yml │ ├── go-postsubmit.yaml │ ├── go-presubmit.yaml │ └── go-release.yaml ├── .gitignore ├── CHANGELOG ├── CHANGELOG-v0.1.md └── CHANGELOG-v0.2.md ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── DCO ├── Dockerfile ├── LICENSE ├── Makefile ├── OWNERS ├── README.md ├── SECURITY.md ├── arch.png ├── charts └── multicluster-controlplane │ ├── .helmignore │ ├── Chart.yaml │ ├── templates │ ├── _validate.tpl │ ├── clusterrole.yaml │ ├── clusterrolebinding-admin.yaml │ ├── clusterrolebinding.yaml │ ├── deployment.yaml │ ├── pvc.yaml │ ├── role.yaml │ ├── rolebinding.yaml │ ├── route.yaml │ ├── secret.yaml │ ├── service.yaml │ └── serviceaccount.yaml │ └── values.yaml ├── cmd └── server │ └── main.go ├── go.mod ├── go.sum ├── hack ├── check │ ├── check-copyright.sh │ └── copyright-header.txt ├── crd-update │ ├── copy-crds.sh │ └── init.sh ├── demo │ ├── README.md │ ├── demo-magic.sh │ └── next-generation.sh ├── deploy-etcd.sh ├── deploy-multicluster-controlplane.sh ├── deploy │ ├── addon │ │ ├── managed-serviceaccount │ │ │ ├── README.md │ │ │ ├── hub │ │ │ │ ├── clustermanagementaddon │ │ │ │ │ ├── clustermanagementaddon.yaml │ │ │ │ │ └── kustomization.yaml │ │ │ │ ├── crds │ │ │ │ │ ├── authentication.open-cluster-management.io_managedserviceaccounts.yaml │ │ │ │ │ └── kustomization.yaml │ │ │ │ └── kustomization.yaml │ │ │ └── manager │ │ │ │ ├── kustomization.yaml │ │ │ │ ├── manager-deployment.yaml │ │ │ │ ├── role.yaml │ │ │ │ └── rolebinding.yaml │ │ ├── policy │ │ │ ├── README.md │ │ │ ├── hub │ │ │ │ ├── clustermanagementaddon │ │ │ │ │ ├── config-policy-controller.yaml │ │ │ │ │ ├── governance-policy-framework.yaml │ │ │ │ │ └── kustomization.yaml │ │ │ │ ├── crds │ │ │ │ │ ├── apps.open-cluster-management.io_placementrules_crd.yaml │ │ │ │ │ ├── kustomization.yaml │ │ │ │ │ ├── policy.open-cluster-management.io_placementbindings.yaml │ │ │ │ │ ├── policy.open-cluster-management.io_policies.yaml │ │ │ │ │ ├── policy.open-cluster-management.io_policyautomations.yaml │ │ │ │ │ └── policy.open-cluster-management.io_policysets.yaml │ │ │ │ └── kustomization.yaml │ │ │ └── manager │ │ │ │ ├── addon-deployment.yaml │ │ │ │ ├── kustomization.yaml │ │ │ │ ├── propagator-deployment.yaml │ │ │ │ ├── propagator-metrics-service.yaml │ │ │ │ ├── role.yaml │ │ │ │ ├── rolebinding.yaml │ │ │ │ └── serviceaccount.yaml │ │ └── work-manager │ │ │ ├── README.md │ │ │ ├── hub │ │ │ ├── agent-clusterrole.yaml │ │ │ ├── clustermanagementaddon │ │ │ │ ├── clustermanagementaddon.yaml │ │ │ │ └── kustomization.yaml │ │ │ ├── crds │ │ │ │ ├── action.open-cluster-management.io_managedclusteractions.crd.yaml │ │ │ │ ├── hive.openshift.io_clusterclaims.yaml │ │ │ │ ├── hive.openshift.io_clusterdeployments.yaml │ │ │ │ ├── hive.openshift.io_clusterpools.yaml │ │ │ │ ├── hive.openshift.io_syncsets.yaml │ │ │ │ ├── hiveinternal.openshift.io_clustersyncs.yaml │ │ │ │ ├── imageregistry.open-cluster-management.io_managedclusterimageregistries.crd.yaml │ │ │ │ ├── internal.open-cluster-management.io_managedclusterinfos.crd.yaml │ │ │ │ ├── kustomization.yaml │ │ │ │ └── view.open-cluster-management.io_managedclusterviews.crd.yaml │ │ │ └── kustomization.yaml │ │ │ └── manager │ │ │ ├── controller.yaml │ │ │ ├── kustomization.yaml │ │ │ ├── role.yaml │ │ │ └── rolebinding.yaml │ ├── agent │ │ ├── clusterrole.yaml │ │ ├── clusterrolebinding-admin.yaml │ │ ├── clusterrolebinding.yaml │ │ ├── deployment.yaml │ │ ├── kustomization.yaml │ │ ├── role.yaml │ │ ├── rolebinding.yaml │ │ └── serviceaccount.yaml │ └── etcd │ │ ├── kustomization.yaml │ │ ├── statefulset.yaml │ │ └── statefulsetservice.yaml ├── install-etcd.sh ├── lib │ ├── .gitattributes │ ├── deps.sh │ ├── etcd.sh │ ├── init.sh │ ├── logging.sh │ ├── util.sh │ └── version.sh └── start-multicluster-controlplane.sh ├── pkg ├── agent │ ├── addons │ │ └── managedserviceaccount.go │ ├── agent.go │ └── crds │ │ ├── 0000_01_work.open-cluster-management.io_appliedmanifestworks.crd.yaml │ │ └── 0000_02_clusters.open-cluster-management.io_clusterclaims.crd.yaml ├── certificate │ ├── certchains │ │ ├── cainfo.go │ │ ├── certchains.go │ │ ├── certchains_test.go │ │ ├── chainsbuilder.go │ │ ├── chainsbuilder_test.go │ │ ├── doc.go │ │ ├── errors.go │ │ ├── signerbuilder.go │ │ ├── signers.go │ │ ├── signers_test.go │ │ └── util.go │ ├── certificate.go │ └── certificateinfo.go ├── cmd │ ├── agent │ │ └── agent.go │ └── controller │ │ └── controller.go ├── controllers │ ├── addons │ │ └── managedserviceaccount.go │ ├── bootstrap │ │ ├── crds.go │ │ ├── crds │ │ │ ├── 0000_00_addon.open-cluster-management.io_clustermanagementaddons.crd.yaml │ │ │ ├── 0000_00_clusters.open-cluster-management.io_managedclusters.crd.yaml │ │ │ ├── 0000_00_clusters.open-cluster-management.io_managedclustersets.crd.yaml │ │ │ ├── 0000_00_multicluster.x-k8s.io_clusterprofiles.crd.yaml │ │ │ ├── 0000_00_work.open-cluster-management.io_manifestworkreplicasets.crd.yaml │ │ │ ├── 0000_00_work.open-cluster-management.io_manifestworks.crd.yaml │ │ │ ├── 0000_01_addon.open-cluster-management.io_managedclusteraddons.crd.yaml │ │ │ ├── 0000_01_clusters.open-cluster-management.io_managedclustersetbindings.crd.yaml │ │ │ ├── 0000_02_addon.open-cluster-management.io_addondeploymentconfigs.crd.yaml │ │ │ ├── 0000_02_clusters.open-cluster-management.io_placements.crd.yaml │ │ │ ├── 0000_03_addon.open-cluster-management.io_addontemplates.crd.yaml │ │ │ ├── 0000_03_clusters.open-cluster-management.io_placementdecisions.crd.yaml │ │ │ ├── 0000_05_clusters.open-cluster-management.io_addonplacementscores.crd.yaml │ │ │ └── 0000_06_authentication.open-cluster-management.io_managedserviceaccounts_crd.yaml │ │ └── hub.go │ ├── controller.go │ ├── kubecontroller │ │ ├── bootstrap.go │ │ ├── certificates.go │ │ ├── clusterroleaggregation.go │ │ ├── config │ │ │ └── config.go │ │ ├── controllermanager.go │ │ ├── core.go │ │ ├── import_known_versions.go │ │ └── options │ │ │ ├── csrsigningcontroller.go │ │ │ ├── garbagecollectorcontroller.go │ │ │ ├── namespacecontroller.go │ │ │ ├── options.go │ │ │ └── serviceaccountcontroller.go │ └── ocmcontroller │ │ ├── ocmagent.go │ │ ├── ocmcontroller.go │ │ ├── ocmcrd.go │ │ └── ocmhubresource.go ├── etcd │ └── etcd.go ├── feature │ └── feature.go ├── servers │ ├── aggregator.go │ ├── apiextensions.go │ ├── configs │ │ └── configs.go │ ├── kubeapiserver.go │ ├── options │ │ ├── admission.go │ │ ├── authentication.go │ │ ├── authorization.go │ │ ├── embeddedetcd.go │ │ ├── options.go │ │ └── plugins.go │ ├── server.go │ └── simplerestoptionsfactroy.go └── util │ ├── recorder.go │ └── util.go ├── plugin └── admission │ ├── managedclustermutating │ └── admission.go │ ├── managedclustersetbindingvalidating │ └── admission.go │ ├── managedclustervalidating │ └── admission.go │ ├── manifestworkvalidating │ └── admission.go │ └── util │ └── util.go └── test ├── bin └── util.sh ├── e2e ├── hack │ ├── cleanup.sh │ └── e2e.sh ├── loopback_test.go ├── managedserviceaccount_test.go └── suite_test.go ├── integration └── hack │ ├── cleanup.sh │ └── integration.sh └── performance ├── README.md ├── cluster ├── cleanup.go └── create.go ├── cmd ├── cleanup.go └── create.go ├── hack └── performance.sh ├── metrics └── metrics.go ├── perftool.go └── utils └── utils.go /.copyrightignore: -------------------------------------------------------------------------------- 1 | #file extensions to ignore on top of .gitignore 2 | .copyrightignore 3 | vendor 4 | *.yaml 5 | *.yml 6 | *.sh 7 | hack/lib/etcd.sh 8 | hack/lib/version.sh 9 | config/helpers/bootstrap.go 10 | pkg/controllers/kubecontroller/bootstrap.go 11 | pkg/controllers/kubecontroller/import_known_versions.go 12 | pkg/controllers/kubecontroller/options/csrsigningcontroller.go 13 | pkg/controllers/kubecontroller/options/garbagecollectorcontroller.go 14 | pkg/controllers/kubecontroller/options/namespacecontroller.go 15 | pkg/controllers/kubecontroller/options/serviceaccountcontroller.go 16 | -------------------------------------------------------------------------------- /.github/workflows/dco.yml: -------------------------------------------------------------------------------- 1 | # Copyright Contributors to the Open Cluster Management project 2 | 3 | name: DCO 4 | on: 5 | workflow_dispatch: {} 6 | pull_request: 7 | branches: 8 | - main 9 | 10 | jobs: 11 | dco_check: 12 | runs-on: ubuntu-latest 13 | name: DCO Check 14 | steps: 15 | - name: Get PR Commits 16 | id: 'get-pr-commits' 17 | uses: tim-actions/get-pr-commits@master 18 | with: 19 | token: ${{ secrets.GITHUB_TOKEN }} 20 | - name: DCO Check 21 | uses: tim-actions/dco@master 22 | with: 23 | commits: ${{ steps.get-pr-commits.outputs.commits }} 24 | -------------------------------------------------------------------------------- /.github/workflows/go-postsubmit.yaml: -------------------------------------------------------------------------------- 1 | name: GoPostSubmit 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | workflow_dispatch: {} 8 | 9 | env: 10 | # Common versions 11 | GO_VERSION: '1.22' 12 | GO_REQUIRED_MIN_VERSION: '' 13 | GOPATH: '/home/runner/work/multicluster-controlplane/multicluster-controlplane/go' 14 | defaults: 15 | run: 16 | working-directory: go/src/open-cluster-management.io/multicluster-controlplane 17 | 18 | jobs: 19 | image: 20 | name: image 21 | runs-on: ubuntu-latest 22 | strategy: 23 | matrix: 24 | arch: [ amd64, arm64 ] 25 | steps: 26 | - name: checkout code 27 | uses: actions/checkout@v3 28 | with: 29 | fetch-depth: 1 30 | path: go/src/open-cluster-management.io/multicluster-controlplane 31 | - name: install Go 32 | uses: actions/setup-go@v3 33 | with: 34 | go-version: ${{ env.GO_VERSION }} 35 | - name: install imagebuilder 36 | run: go install github.com/openshift/imagebuilder/cmd/imagebuilder@v1.2.4-0.20230309135844-a3c3f8358ca3 37 | - name: pull base image 38 | run: docker pull registry.access.redhat.com/ubi8/ubi-minimal:latest --platform=linux/${{ matrix.arch }} 39 | - name: image 40 | run: | 41 | IMAGE_TAG=latest-${{ matrix.arch }} \ 42 | IMAGE_BUILD_EXTRA_FLAGS="--build-arg OS=linux --build-arg ARCH=${{ matrix.arch }}" \ 43 | make image 44 | - name: push 45 | run: | 46 | echo ${{ secrets.DOCKER_PASSWORD }} | docker login quay.io --username ${{ secrets.DOCKER_USER }} --password-stdin 47 | docker push quay.io/open-cluster-management/multicluster-controlplane:latest-${{ matrix.arch }} 48 | image-manifest: 49 | name: image manifest 50 | runs-on: ubuntu-latest 51 | needs: [ image ] 52 | steps: 53 | - name: checkout code 54 | uses: actions/checkout@v3 55 | with: 56 | fetch-depth: 1 57 | path: go/src/open-cluster-management.io/multicluster-controlplane 58 | - name: create 59 | run: | 60 | echo ${{ secrets.DOCKER_PASSWORD }} | docker login quay.io --username ${{ secrets.DOCKER_USER }} --password-stdin 61 | docker manifest create quay.io/open-cluster-management/multicluster-controlplane:latest \ 62 | quay.io/open-cluster-management/multicluster-controlplane:latest-amd64 \ 63 | quay.io/open-cluster-management/multicluster-controlplane:latest-arm64 64 | - name: annotate 65 | run: | 66 | docker manifest annotate quay.io/open-cluster-management/multicluster-controlplane:latest \ 67 | quay.io/open-cluster-management/multicluster-controlplane:latest-amd64 --arch amd64 68 | docker manifest annotate quay.io/open-cluster-management/multicluster-controlplane:latest \ 69 | quay.io/open-cluster-management/multicluster-controlplane:latest-arm64 --arch arm64 70 | - name: push 71 | run: | 72 | docker manifest push quay.io/open-cluster-management/multicluster-controlplane:latest 73 | -------------------------------------------------------------------------------- /.github/workflows/go-presubmit.yaml: -------------------------------------------------------------------------------- 1 | name: GoPreSubmit 2 | 3 | on: 4 | workflow_dispatch: 5 | inputs: 6 | debug_enabled: 7 | type: boolean 8 | description: 'Run the build with tmate debugging enabled (https://github.com/marketplace/actions/debugging-with-tmate)' 9 | required: false 10 | default: false 11 | pull_request: 12 | branches: 13 | - main 14 | - release-* 15 | 16 | env: 17 | # Common versions 18 | GO_VERSION: '1.22' 19 | GO_REQUIRED_MIN_VERSION: '' 20 | GOPATH: '/home/runner/work/multicluster-controlplane/go' 21 | 22 | defaults: 23 | run: 24 | working-directory: go/src/open-cluster-management.io/multicluster-controlplane 25 | 26 | jobs: 27 | check: 28 | name: check 29 | runs-on: ubuntu-latest 30 | steps: 31 | - name: checkout code 32 | uses: actions/checkout@v3 33 | with: 34 | fetch-depth: 1 35 | path: go/src/open-cluster-management.io/multicluster-controlplane 36 | - name: Install Go 37 | uses: actions/setup-go@v3 38 | with: 39 | go-version: ${{ env.GO_VERSION }} 40 | - name: Check 41 | run: make check 42 | 43 | verify: 44 | name: verify 45 | runs-on: ubuntu-latest 46 | steps: 47 | - name: checkout code 48 | uses: actions/checkout@v3 49 | with: 50 | fetch-depth: 1 51 | path: go/src/open-cluster-management.io/multicluster-controlplane 52 | - name: Install Go 53 | uses: actions/setup-go@v3 54 | with: 55 | go-version: ${{ env.GO_VERSION }} 56 | - name: Verify 57 | run: make verify 58 | 59 | build: 60 | name: build 61 | runs-on: ubuntu-latest 62 | steps: 63 | - name: checkout code 64 | uses: actions/checkout@v3 65 | with: 66 | fetch-depth: 1 67 | path: go/src/open-cluster-management.io/multicluster-controlplane 68 | - name: Install Go 69 | uses: actions/setup-go@v3 70 | with: 71 | go-version: ${{ env.GO_VERSION }} 72 | - name: Build 73 | run: make vendor && make build 74 | 75 | integration: 76 | name: integration 77 | runs-on: ubuntu-latest 78 | steps: 79 | - name: Checkout code 80 | uses: actions/checkout@v2 81 | with: 82 | fetch-depth: 1 83 | path: go/src/open-cluster-management.io/multicluster-controlplane 84 | - name: Install Go 85 | uses: actions/setup-go@v2 86 | with: 87 | go-version: ${{ env.GO_VERSION }} 88 | - name: Setup tmate session 89 | uses: mxschmitt/action-tmate@v3 90 | if: ${{ github.event_name == 'workflow_dispatch' && inputs.debug_enabled }} 91 | timeout-minutes: 15 92 | - name: Test integration 93 | run: make test-integration 94 | 95 | e2e: 96 | name: e2e 97 | runs-on: ubuntu-latest 98 | steps: 99 | - name: Checkout code 100 | uses: actions/checkout@v2 101 | with: 102 | fetch-depth: 1 103 | path: go/src/open-cluster-management.io/multicluster-controlplane 104 | - name: Install Go 105 | uses: actions/setup-go@v3 106 | with: 107 | go-version: ${{ env.GO_VERSION }} 108 | - name: Set Image label 109 | id: vars 110 | run: | 111 | echo "::set-output name=label::$(git rev-parse --short ${{ github.sha }})" 112 | - name: Build image 113 | run: make image 114 | env: 115 | IMAGE_NAME: quay.io/open-cluster-management/multicluster-controlplane:${{ steps.vars.outputs.label }} 116 | - name: Setup tmate session 117 | uses: mxschmitt/action-tmate@v3 118 | if: ${{ github.event_name == 'workflow_dispatch' && inputs.debug_enabled }} 119 | timeout-minutes: 15 120 | - name: Run e2e test 121 | run: | 122 | make test-e2e 123 | env: 124 | IMAGE_NAME: quay.io/open-cluster-management/multicluster-controlplane:${{ steps.vars.outputs.label }} 125 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Copyright Contributors to the Open Cluster Management project 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | 9 | # Test binary, built with `go test -c` 10 | *.test 11 | 12 | # Output of the go coverage tool, specifically when used with LiteIDE 13 | *.out 14 | _output/ 15 | 16 | # Dependency directories (remove the comment below to include it) 17 | vendor/ 18 | bin/ 19 | !test/bin/ 20 | 21 | # IDE config files 22 | .dead/ 23 | .vscode/ 24 | .idea/ 25 | 26 | # Cert 27 | .ocmconfig 28 | multicluster_ca 29 | hack/deploy/cert-* 30 | hack/deploy/controlplane/cert/* 31 | hack/deploy/controlplane/cert* 32 | hack/deploy/etcd/cert* 33 | 34 | ocmconfig.yaml 35 | *.kubeconfig 36 | 37 | # Test 38 | test/resources 39 | -------------------------------------------------------------------------------- /CHANGELOG/CHANGELOG-v0.1.md: -------------------------------------------------------------------------------- 1 | [comment]: # ( Copyright Contributors to the Open Cluster Management project ) 2 | # Changelog 3 | All notable changes to this project will be documented in this file. 4 | 5 | ## v0.1.0 6 | 7 | ### New Features 8 | * Support to deploy multicluster-controlplane as a deployment on openshift cluster. 9 | * Support to run multicluster-controlplane as a binary with embed/external etcd. 10 | 11 | ### Added 12 | N/C 13 | 14 | ### Changes 15 | N/C 16 | 17 | ### Bug Fixes 18 | N/C 19 | 20 | ### Removed & Deprecated 21 | N/C 22 | -------------------------------------------------------------------------------- /CHANGELOG/CHANGELOG-v0.2.md: -------------------------------------------------------------------------------- 1 | [comment]: # ( Copyright Contributors to the Open Cluster Management project ) 2 | # Changelog since v0.1.0 3 | 4 | All notable changes to this project will be documented in this file. 5 | 6 | ## v0.2.0 7 | 8 | ### New Features 9 | * Support to expose controlplane service with load balance service. 10 | * Support to run registration agent and worker agent as a standalone agent. 11 | * Support to use helm chart to deploy controlplane. 12 | * Support to manage controlplane hosting cluster by controlplane itself 13 | * Support to delegate the user authentication to controlplane hosting cluster kube-apiserver. 14 | 15 | ### Added 16 | * Support to share one etcd with multiple controlplanes. 17 | * Support to initiate controlplane certificates automatically. 18 | 19 | ### Changes 20 | * Upgrade kube dependencies to v1.26. 21 | * Upgrade open-cluster-management dependencies to v0.11.0. 22 | 23 | ### Bug Fixes 24 | N/C 25 | 26 | ### Removed & Deprecated 27 | N/C 28 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | [comment]: # ( Copyright Contributors to the Open Cluster Management project )**Table of Contents** 2 | 3 | # Contributor Covenant Code of Conduct 4 | 5 | ## Our Pledge 6 | 7 | In the interest of fostering an open and welcoming environment, we as 8 | contributors and maintainers pledge to making participation in our project and 9 | our community a harassment-free experience for everyone, regardless of age, body 10 | size, disability, ethnicity, sex characteristics, gender identity and expression, 11 | level of experience, education, socio-economic status, nationality, personal 12 | appearance, race, religion, or sexual identity and orientation. 13 | 14 | ## Our Standards 15 | 16 | Examples of behavior that contributes to creating a positive environment 17 | include: 18 | 19 | * Using welcoming and inclusive language 20 | * Being respectful of differing viewpoints and experiences 21 | * Gracefully accepting constructive criticism 22 | * Focusing on what is best for the community 23 | * Showing empathy towards other community members 24 | 25 | Examples of unacceptable behavior by participants include: 26 | 27 | * The use of sexualized language or imagery and unwelcome sexual attention or 28 | advances 29 | * Trolling, insulting/derogatory comments, and personal or political attacks 30 | * Public or private harassment 31 | * Publishing others' private information, such as a physical or electronic 32 | address, without explicit permission 33 | * Other conduct which could reasonably be considered inappropriate in a 34 | professional setting 35 | 36 | ## Our Responsibilities 37 | 38 | Project maintainers are responsible for clarifying the standards of acceptable 39 | behavior and are expected to take appropriate and fair corrective action in 40 | response to any instances of unacceptable behavior. 41 | 42 | Project maintainers have the right and responsibility to remove, edit, or 43 | reject comments, commits, code, wiki edits, issues, and other contributions 44 | that are not aligned to this Code of Conduct, or to ban temporarily or 45 | permanently any contributor for other behaviors that they deem inappropriate, 46 | threatening, offensive, or harmful. 47 | 48 | ## Scope 49 | 50 | This Code of Conduct applies both within project spaces and in public spaces 51 | when an individual is representing the project or its community. Examples of 52 | representing a project or community include using an official project e-mail 53 | address, posting via an official social media account, or acting as an appointed 54 | representative at an online or offline event. Representation of a project may be 55 | further defined and clarified by project maintainers. 56 | 57 | ## Enforcement 58 | 59 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 60 | reported by contacting the project team at [acm-contact@redhat.com](mailto:acm-contact@redhat.com). All 61 | complaints will be reviewed and investigated and will result in a response that 62 | is deemed necessary and appropriate to the circumstances. The project team is 63 | obligated to maintain confidentiality with regard to the reporter of an incident. 64 | Further details of specific enforcement policies may be posted separately. 65 | 66 | Project maintainers who do not follow or enforce the Code of Conduct in good 67 | faith may face temporary or permanent repercussions as determined by other 68 | members of the project's leadership. 69 | 70 | ## Attribution 71 | 72 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 73 | available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html. 74 | 75 | [homepage]: https://www.contributor-covenant.org 76 | 77 | For answers to common questions about this code of conduct, see 78 | https://www.contributor-covenant.org/faq. 79 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | [comment]: # ( Copyright Contributors to the Open Cluster Management project )**Table of Contents** 2 | 3 | - [Contributing guidelines](#contributing-guidelines) 4 | - [Terms](#terms) 5 | - [Certificate of Origin](#certificate-of-origin) 6 | - [Contributing a patch](#contributing-a-patch) 7 | - [Issue and pull request management](#issue-and-pull-request-management) 8 | - [Requirements](#requirements) 9 | - [Develop new commands](#develop-new-commands) 10 | - [Resources](#resources) 11 | - [Client](#client) 12 | - [Unit tests](#unit-tests) 13 | - [E2E tests](#e2e-tests) 14 | 15 | # Contributing guidelines 16 | 17 | ## Terms 18 | 19 | All contributions to the repository must be submitted under the terms of the [Apache Public License 2.0](https://www.apache.org/licenses/LICENSE-2.0). 20 | 21 | ## Certificate of Origin 22 | 23 | By contributing to this project, you agree to the Developer Certificate of Origin (DCO). This document was created by the Linux Kernel community and is a simple statement that you, as a contributor, have the legal right to make the contribution. See the [DCO](DCO) file for details. 24 | 25 | ## Contributing a patch 26 | 27 | 1. Submit an issue describing your proposed change to the repository in question. The repository owners will respond to your issue promptly. 28 | 2. Fork the desired repository, then develop and test your code changes. 29 | 3. Submit a pull request. 30 | 31 | ## Issue and pull request management 32 | 33 | Anyone can comment on issues and submit reviews for pull requests. In order to be assigned an issue or pull request, you can leave a `/assign ` comment on the issue or pull request. 34 | 35 | # Requirements 36 | 37 | - Go 1.17 38 | 39 | # Develop new commands 40 | 41 | - The project tries to follow the following grammar for the commands: 42 | 43 | ```bash 44 | clusteradm [subcmd] [flags] 45 | ``` 46 | 47 | - Each cmd/subcmd are in a package, the code is split in 3 files: The [cmd.go](pkg/cmd/version/cmd.go) which creates the cobra command, the [options.go](pkg/cmd/version/options.go) which defines the different option parameters for the command and the the [exec.go](pkg/cmd/version/exec.go) which contains the code to execute the command. 48 | - Each command must support the flag `--dry-run`. 49 | - The command uses [klog V2](https://github.com/kubernetes/klog) as logging package. All messages must be using `klog.V(x)`, in rare exception `klog.Error` and `klog.Warning` can be used. 50 | 51 | ## Resources 52 | 53 | - Some commands needs resources files, in the project uses the `Go 1.17` `go:embed` functionality to store the resources files. 54 | - Each command package contains its own resources in the scenario package. The scenario package contains one go file which provides the `go:embed` `embed.FS` files. 55 | 56 | ## Client 57 | 58 | - The [main](cmd/clusteradm.go) provides a cmdutil.Factory which can be leveraged to get different clients and also the *rest.Config. The factory can be passed to the cobra.Command and then save in the Options. 59 | 60 | ```Go 61 | kubeClient, err := o.factory.KubernetesClientSet() 62 | ``` 63 | 64 | ```Go 65 | config, err := f.ToRESTConfig() 66 | ``` 67 | 68 | ## Unit tests 69 | 70 | - If the unit test needs files to be executed, these files are stored under the pair `//test/unit`. 71 | A total coverage is shown when running `make test`. For the time being, the `cmd.go` and `client.go` are excluded from the total coverage. 72 | - The `make test` is part of the PR acceptance and it is launched by PROW. 73 | 74 | ## E2E tests 75 | 76 | - The project use `make test-e2e` to run e2e tests, this will deploy kind cluster and run a set of tests for clusteradm commands. A prerequisite is that Docker is already running. 77 | - We have a [README](/test/e2e/README.md) indicating the way to write e2e tests. 78 | - The `make test-e2e` is part of the PR acceptance and it is launched using git-actions. 79 | -------------------------------------------------------------------------------- /DCO: -------------------------------------------------------------------------------- 1 | Developer Certificate of Origin 2 | Version 1.1 3 | 4 | Copyright (C) 2004, 2006 The Linux Foundation and its contributors. 5 | 1 Letterman Drive 6 | Suite D4700 7 | San Francisco, CA, 94129 8 | 9 | Everyone is permitted to copy and distribute verbatim copies of this 10 | license document, but changing it is not allowed. 11 | 12 | 13 | Developer's Certificate of Origin 1.1 14 | 15 | By making a contribution to this project, I certify that: 16 | 17 | (a) The contribution was created in whole or in part by me and I 18 | have the right to submit it under the open source license 19 | indicated in the file; or 20 | 21 | (b) The contribution is based upon previous work that, to the best 22 | of my knowledge, is covered under an appropriate open source 23 | license and I have the right under that license to submit that 24 | work with modifications, whether created in whole or in part 25 | by me, under the same open source license (unless I am 26 | permitted to submit under a different license), as indicated 27 | in the file; or 28 | 29 | (c) The contribution was provided directly to me by some other 30 | person who certified (a), (b) or (c) and I have not modified 31 | it. 32 | 33 | (d) I understand and agree that this project and the contribution 34 | are public and that a record of the contribution (including all 35 | personal information I submit with it, including my sign-off) is 36 | maintained indefinitely and may be redistributed consistent with 37 | this project or the open source license(s) involved. 38 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Copyright Contributors to the Open Cluster Management project 2 | FROM golang:1.22 AS builder 3 | 4 | ARG OS=linux 5 | ARG ARCH=amd64 6 | ENV DIRPATH /go/src/open-cluster-management.io/multicluster-controlplane 7 | WORKDIR ${DIRPATH} 8 | 9 | COPY . . 10 | 11 | #RUN apt-get update && apt-get install net-tools && make vendor 12 | RUN GOOS=${OS} \ 13 | GOARCH=${ARCH} \ 14 | make build 15 | 16 | 17 | FROM registry.access.redhat.com/ubi8/ubi-minimal:latest 18 | ENV USER_UID=10001 19 | 20 | COPY --from=builder /go/src/open-cluster-management.io/multicluster-controlplane/bin/multicluster-controlplane / 21 | 22 | USER ${USER_UID} 23 | -------------------------------------------------------------------------------- /OWNERS: -------------------------------------------------------------------------------- 1 | approvers: 2 | - qiujian16 3 | - ycyaoxdu 4 | - clyang82 5 | - tamalsaha 6 | - yanmxa 7 | 8 | reviewers: 9 | - qiujian16 10 | - ycyaoxdu 11 | - clyang82 12 | - tamalsaha 13 | - yanmxa 14 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | [comment]: # ( Copyright Contributors to the Open Cluster Management project ) 2 | Refer to our [Community Security Response](https://github.com/open-cluster-management-io/community/blob/main/SECURITY.md). 3 | -------------------------------------------------------------------------------- /arch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-cluster-management-io/multicluster-controlplane/9d91e61b4b23aff19c28816419fa1cf47a11e2c3/arch.png -------------------------------------------------------------------------------- /charts/multicluster-controlplane/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /charts/multicluster-controlplane/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: multicluster-controlplane 3 | description: A Helm chart for multicluster-controlplane 4 | 5 | # A chart can be either an 'application' or a 'library' chart. 6 | # 7 | # Application charts are a collection of templates that can be packaged into versioned archives 8 | # to be deployed. 9 | # 10 | # Library charts provide useful utilities or functions for the chart developer. They're included as 11 | # a dependency of application charts to inject those utilities and functions into the rendering 12 | # pipeline. Library charts do not define any templates and therefore cannot be deployed. 13 | type: application 14 | 15 | # This is the chart version. This version number should be incremented each time you make changes 16 | # to the chart and its templates, including the app version. 17 | # Versions are expected to follow Semantic Versioning (https://semver.org/) 18 | version: 0.7.0 19 | 20 | # This is the version number of the application being deployed. This version number should be 21 | # incremented each time you make changes to the application. Versions are not expected to 22 | # follow Semantic Versioning. They should reflect the version the application is using. 23 | # It is recommended to use it with quotes. 24 | appVersion: "0.7.0" 25 | -------------------------------------------------------------------------------- /charts/multicluster-controlplane/templates/_validate.tpl: -------------------------------------------------------------------------------- 1 | {{/* validate service */}} 2 | {{- define "validate.exposeService" }} 3 | {{- if .Values.route.enabled }} 4 | {{- if or .Values.loadbalancer.enabled .Values.nodeport.enabled }} 5 | {{- fail "route, loadbalancer and nodeport should not be enabled more than 1" }} 6 | {{- end }} 7 | 8 | {{- else if .Values.loadbalancer.enabled }} 9 | {{- if or .Values.route.enabled .Values.nodeport.enabled }} 10 | {{- fail "route, loadbalancer and nodeport should not be enabled more than 1" }} 11 | {{- end }} 12 | 13 | {{- else if .Values.nodeport.enabled }} 14 | {{- if or .Values.route.enabled .Values.loadbalancer.enabled }} 15 | {{- fail "route, loadbalancer and nodeport should not be enabled more than 1" }} 16 | {{- end }} 17 | {{- if not .Values.nodeport.port }} 18 | {{- fail "nodeport.port should be set while nodeport is enabled" }} 19 | {{- end }} 20 | {{- else }} 21 | {{/* service exposed as ClusterIP */}} 22 | {{- end }} 23 | {{- end }} 24 | -------------------------------------------------------------------------------- /charts/multicluster-controlplane/templates/clusterrole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: open-cluster-management:multicluster-controlplane:{{ .Release.Namespace }} 5 | rules: 6 | - apiGroups: [""] 7 | resources: ["secrets", "configmaps"] 8 | verbs: ["get", "list"] 9 | - apiGroups: ["authentication.k8s.io"] 10 | resources: ["tokenreviews"] 11 | verbs: ["create"] 12 | {{ if eq .Values.enableSelfManagement true }} 13 | # Allow agent to manage crds 14 | - apiGroups: ["apiextensions.k8s.io"] 15 | resources: ["customresourcedefinitions"] 16 | verbs: ["create", "get", "list", "update", "watch", "patch", "delete"] 17 | # Allow agent to get/list/watch nodes 18 | # list nodes to calculates the capacity and allocatable resources of the managed cluster 19 | - apiGroups: [""] 20 | resources: ["nodes"] 21 | verbs: ["get", "list", "watch"] 22 | # Allow agent to list clusterclaims 23 | - apiGroups: ["cluster.open-cluster-management.io"] 24 | resources: ["clusterclaims"] 25 | verbs: ["get", "list", "watch"] 26 | # Allow agent to create/update/patch/delete namespaces, get/list/watch are contained in admin role already 27 | - apiGroups: [""] 28 | resources: ["namespaces"] 29 | verbs: ["create", "update", "patch", "delete"] 30 | # Allow agent to manage role/rolebinding/clusterrole/clusterrolebinding 31 | - apiGroups: ["rbac.authorization.k8s.io"] 32 | resources: ["clusterrolebindings", "rolebindings"] 33 | verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] 34 | - apiGroups: ["rbac.authorization.k8s.io"] 35 | resources: ["clusterroles", "roles"] 36 | verbs: ["get", "list", "watch", "create", "update", "patch", "delete", "escalate", "bind"] 37 | # Allow OCM addons to setup metrics collection with Prometheus 38 | # TODO: Move this permission to the open-cluster-management:{{ .KlusterletName }}-work:execution Role (not ClusterRole) 39 | # when it is created. 40 | - apiGroups: ["monitoring.coreos.com"] 41 | resources: ["servicemonitors"] 42 | verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] 43 | # Allow agent to manage oauth clients 44 | # TODO refactor permission control of work agent to remove this 45 | - apiGroups: ["oauth.openshift.io"] 46 | resources: ["oauthclients"] 47 | verbs: ["get", "list", "watch", "create", "patch","update", "delete"] 48 | # Allow agent to manage appliedmanifestworks 49 | - apiGroups: ["work.open-cluster-management.io"] 50 | resources: ["appliedmanifestworks"] 51 | verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] 52 | - apiGroups: ["work.open-cluster-management.io"] 53 | resources: ["appliedmanifestworks/status"] 54 | verbs: ["patch", "update"] 55 | - apiGroups: ["work.open-cluster-management.io"] 56 | resources: ["appliedmanifestworks/finalizers"] 57 | verbs: ["update"] 58 | # Allow agent to check executor permissions 59 | - apiGroups: ["authorization.k8s.io"] 60 | resources: ["subjectaccessreviews"] 61 | verbs: ["create"] 62 | - apiGroups: [""] 63 | resources: ["serviceaccounts"] 64 | verbs: ["impersonate"] 65 | - apiGroups: ["config.openshift.io"] 66 | resources: ["clusterversions"] 67 | verbs: ["get", "list"] 68 | {{ end }} 69 | -------------------------------------------------------------------------------- /charts/multicluster-controlplane/templates/clusterrolebinding-admin.yaml: -------------------------------------------------------------------------------- 1 | {{ if eq .Values.enableSelfManagement true }} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: open-cluster-management:multicluster-controlplane:self-management:work-execution-admin 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: ClusterRole 9 | # We deploy a controller that could work with permission lower than cluster-admin, the tradeoff is 10 | # responsivity because list/watch cannot be maintained over too many namespaces. 11 | name: admin 12 | subjects: 13 | - kind: ServiceAccount 14 | name: multicluster-controlplane-sa 15 | namespace: {{ .Release.Namespace }} 16 | {{ end }} 17 | -------------------------------------------------------------------------------- /charts/multicluster-controlplane/templates/clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: open-cluster-management:multicluster-controlplane:{{ .Release.Namespace }} 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: open-cluster-management:multicluster-controlplane:{{ .Release.Namespace }} 9 | subjects: 10 | - kind: ServiceAccount 11 | name: multicluster-controlplane-sa 12 | namespace: {{ .Release.Namespace }} 13 | -------------------------------------------------------------------------------- /charts/multicluster-controlplane/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: multicluster-controlplane 5 | namespace: {{ .Release.Namespace }} 6 | labels: 7 | app: multicluster-controlplane 8 | spec: 9 | replicas: {{ .Values.replicas }} 10 | selector: 11 | matchLabels: 12 | app: multicluster-controlplane 13 | template: 14 | metadata: 15 | labels: 16 | app: multicluster-controlplane 17 | spec: 18 | serviceAccountName: multicluster-controlplane-sa 19 | containers: 20 | - name: controlplane 21 | image: {{ .Values.image }} 22 | imagePullPolicy: {{ .Values.imagePullPolicy }} 23 | args: 24 | - "/multicluster-controlplane" 25 | - "server" 26 | {{- if .Values.features }} 27 | - "--feature-gates={{ .Values.features }}" 28 | {{- end }} 29 | {{- if .Values.autoApprovalBootstrapUsers }} 30 | - "--auto-approved-csr-users={{ .Values.autoApprovalBootstrapUsers }}" 31 | {{- end }} 32 | {{- if eq .Values.enableSelfManagement true }} 33 | - "--self-management" 34 | {{- end }} 35 | {{- if .Values.selfManagementClusterName }} 36 | - "--self-management-cluster-name={{ .Values.selfManagementClusterName }}" 37 | {{- end }} 38 | {{- if eq .Values.enableDelegatingAuthentication true }} 39 | - "--delegating-authentication" 40 | {{- end }} 41 | {{- if .Values.containerSecurityContext }} 42 | securityContext: 43 | {{- toYaml .Values.containerSecurityContext | nindent 10 }} 44 | {{- end }} 45 | env: 46 | - name: ETCD_SNAPSHOT_COUNT 47 | value: "{{ .Values.etcd.snapshotCount }}" 48 | livenessProbe: 49 | httpGet: 50 | path: /livez 51 | scheme: HTTPS 52 | port: 9443 53 | failureThreshold: 8 54 | initialDelaySeconds: 10 55 | periodSeconds: 10 56 | successThreshold: 1 57 | timeoutSeconds: 15 58 | readinessProbe: 59 | httpGet: 60 | path: /readyz 61 | scheme: HTTPS 62 | port: 9443 63 | failureThreshold: 3 64 | initialDelaySeconds: 2 65 | periodSeconds: 1 66 | successThreshold: 1 67 | timeoutSeconds: 15 68 | startupProbe: 69 | failureThreshold: 24 70 | httpGet: 71 | path: /livez 72 | port: 9443 73 | scheme: HTTPS 74 | initialDelaySeconds: 10 75 | periodSeconds: 10 76 | successThreshold: 1 77 | timeoutSeconds: 15 78 | volumeMounts: 79 | - name: controlplane-config 80 | mountPath: /controlplane_config 81 | - name: ocm-data 82 | mountPath: /.ocm 83 | volumes: 84 | - name: controlplane-config 85 | secret: 86 | secretName: controlplane-config 87 | - name: ocm-data 88 | persistentVolumeClaim: 89 | claimName: multicluster-controlplane-pvc-volume 90 | {{- if .Values.securityContext }} 91 | securityContext: 92 | {{- toYaml .Values.securityContext | nindent 8 }} 93 | {{- end }} 94 | -------------------------------------------------------------------------------- /charts/multicluster-controlplane/templates/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: multicluster-controlplane-pvc-volume 5 | namespace: {{ .Release.Namespace }} 6 | spec: 7 | accessModes: 8 | - ReadWriteOnce 9 | resources: 10 | requests: 11 | storage: {{ .Values.pvc.storageCapacity }} 12 | {{- if .Values.pvc.storageClassName }} 13 | storageClassName: {{ .Values.pvc.storageClassName }} 14 | {{- end }} 15 | {{- if .Values.pvc.selector }} 16 | selector:: {{ .Values.pvc.selector }} 17 | {{- end }} 18 | -------------------------------------------------------------------------------- /charts/multicluster-controlplane/templates/role.yaml: -------------------------------------------------------------------------------- 1 | # Copyright Contributors to the Open Cluster Management project 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | name: multicluster-controlplane 6 | namespace: {{ .Release.Namespace }} 7 | rules: 8 | - apiGroups: [""] 9 | resources: ["secrets"] 10 | verbs: ["get", "list", "watch", "create", "delete", "update", "patch"] 11 | - apiGroups: [""] 12 | resources: ["services"] 13 | verbs: ["get", "list", "watch"] 14 | {{- if eq .Values.route.enabled true }} 15 | - apiGroups: ["route.openshift.io"] 16 | resources: ["routes"] 17 | verbs: ["get", "list", "watch"] 18 | {{- end }} 19 | -------------------------------------------------------------------------------- /charts/multicluster-controlplane/templates/rolebinding.yaml: -------------------------------------------------------------------------------- 1 | # Copyright Contributors to the Open Cluster Management project 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: RoleBinding 4 | metadata: 5 | name: multicluster-controlplane 6 | namespace: {{ .Release.Namespace }} 7 | roleRef: 8 | apiGroup: rbac.authorization.k8s.io 9 | kind: Role 10 | name: multicluster-controlplane 11 | subjects: 12 | - kind: ServiceAccount 13 | name: multicluster-controlplane-sa 14 | namespace: {{ .Release.Namespace }} 15 | -------------------------------------------------------------------------------- /charts/multicluster-controlplane/templates/route.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq .Values.route.enabled true }} 2 | apiVersion: route.openshift.io/v1 3 | kind: Route 4 | metadata: 5 | name: multicluster-controlplane 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | component: multicluster-controlplane 9 | spec: 10 | tls: 11 | termination: passthrough 12 | to: 13 | kind: Service 14 | name: multicluster-controlplane 15 | weight: 100 16 | wildcardPolicy: None 17 | {{- end }} 18 | -------------------------------------------------------------------------------- /charts/multicluster-controlplane/templates/secret.yaml: -------------------------------------------------------------------------------- 1 | {{ $caCrt := "" }} 2 | {{ $caKey := "" }} 3 | 4 | {{- if .Values.apiserver.ca }} 5 | {{ $caCrt = .Values.apiserver.ca }} 6 | {{ $caKey = (required "apiserver.cakey should be set together with apiserver.ca" .Values.apiserver.cakey) }} 7 | {{- end }} 8 | 9 | {{- if .Values.apiserver.generateCA }} 10 | {{ $ca := genCA "ca" 3650 }} 11 | {{ $caCrt = $ca.Cert }} 12 | {{ $caKey = $ca.Key }} 13 | {{- end }} 14 | 15 | {{- $proxyCA := genCA "proxy-ca" 3650 }} 16 | {{- $proxyClient := genSignedCert "front-proxy-client" nil nil 3650 $proxyCA }} 17 | 18 | apiVersion: v1 19 | kind: Secret 20 | metadata: 21 | name: controlplane-config 22 | namespace: {{ .Release.Namespace }} 23 | type: Opaque 24 | stringData: 25 | ocmconfig.yaml: |- 26 | apiserver: 27 | externalHostname: {{ .Values.apiserver.externalHostname }} 28 | port: {{ .Values.apiserver.externalPort }} 29 | {{- if $caCrt }} 30 | caFile: "/controlplane_config/apiserver_ca.crt" 31 | caKeyFile: "/controlplane_config/apiserver_ca.key" 32 | {{- end }} 33 | etcd: 34 | mode: {{ .Values.etcd.mode }} 35 | prefix: {{ .Release.Namespace }} 36 | {{- if (eq .Values.etcd.mode "external") }} 37 | {{- if (not .Values.etcd.servers) }} 38 | {{- fail "etcd.servers should be set together with etcd.mode" }} 39 | {{- end }} 40 | servers: 41 | {{- range .Values.etcd.servers }} 42 | - {{ . }} 43 | {{- end }} 44 | caFile: "/controlplane_config/etcd_ca.crt" 45 | certFile: "/controlplane_config/etcd_cert.crt" 46 | keyFile: "/controlplane_config/etcd_cert.key" 47 | {{- end }} 48 | aggregator: 49 | proxyClientCertFile: /controlplane_config/proxy-client.crt 50 | proxyClientKeyFile: /controlplane_config/proxy-client.key 51 | requestheaderClientCAFile: /controlplane_config/requestheader-client-ca.crt 52 | requestheaderUsernameHeaders: ["X-Remote-User"] 53 | requestheaderGroupHeaders: ["X-Remote-Group"] 54 | requestheaderExtraHeadersPrefix: ["X-Remote-Extra-"] 55 | requestheaderAllowedNames: ["front-proxy-client"] 56 | 57 | {{- if $caCrt }} 58 | apiserver_ca.crt: {{ $caCrt | quote }} 59 | apiserver_ca.key: {{ $caKey | quote }} 60 | {{- end }} 61 | 62 | requestheader-client-ca.crt: {{ $proxyCA.Cert | quote }} 63 | requestheader-client-ca.key: {{ $proxyCA.Key | quote }} 64 | proxy-client.crt: {{ $proxyClient.Cert | quote }} 65 | proxy-client.key: {{ $proxyClient.Key | quote }} 66 | 67 | {{- if (eq .Values.etcd.mode "external") }} 68 | etcd_ca.crt: {{ (required "etcd.ca should be set together with etcd.mode" .Values.etcd.ca) | quote }} 69 | etcd_cert.crt: {{ (required "etcd.cert should be set together with etcd.mode" .Values.etcd.cert) | quote }} 70 | etcd_cert.key: {{ (required "etcd.certkey should be set together with etcd.mode" .Values.etcd.certkey) | quote }} 71 | {{- end }} 72 | -------------------------------------------------------------------------------- /charts/multicluster-controlplane/templates/service.yaml: -------------------------------------------------------------------------------- 1 | {{- include "validate.exposeService" . }} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: multicluster-controlplane 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | component: multicluster-controlplane 9 | spec: 10 | {{- if eq .Values.route.enabled true }} 11 | type: ClusterIP 12 | {{- else if eq .Values.loadbalancer.enabled true }} 13 | type: LoadBalancer 14 | {{- if .Values.loadbalancer.ip }} 15 | loadBalancerIP: {{ .Values.loadbalancer.ip }} 16 | {{- end }} 17 | {{- else if eq .Values.nodeport.enabled true }} 18 | type: NodePort 19 | {{- else }} 20 | type: ClusterIP 21 | {{- end }} 22 | selector: 23 | app: multicluster-controlplane 24 | ports: 25 | - name: app 26 | protocol: TCP 27 | targetPort: 9443 28 | port: 443 29 | {{- if eq .Values.nodeport.enabled true }} 30 | nodePort: {{ .Values.nodeport.port }} 31 | {{- end }} 32 | -------------------------------------------------------------------------------- /charts/multicluster-controlplane/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: multicluster-controlplane-sa 5 | namespace: {{ .Release.Namespace }} 6 | -------------------------------------------------------------------------------- /charts/multicluster-controlplane/values.yaml: -------------------------------------------------------------------------------- 1 | # Default values for multicluster-controlplane. 2 | # This is a YAML-formatted file. 3 | # Declare variables to be passed into your templates. 4 | 5 | image: quay.io/open-cluster-management/multicluster-controlplane:latest 6 | imagePullPolicy: IfNotPresent 7 | 8 | replicas: 1 9 | 10 | features: "DefaultClusterSet=true,ManagedClusterAutoApproval=true" 11 | 12 | autoApprovalBootstrapUsers: "" 13 | 14 | # TODO: should add restriction while enable selfmanagement 15 | enableSelfManagement: false 16 | selfManagementClusterName: "" 17 | 18 | enableDelegatingAuthentication: false 19 | 20 | apiserver: 21 | externalHostname: "" 22 | externalPort: 443 23 | ca: "" 24 | cakey: "" 25 | generateCA: false 26 | etcd: 27 | mode: "embed" 28 | snapshotCount: 5000 29 | servers: [] 30 | ca: "" 31 | cert: "" 32 | certkey: "" 33 | 34 | pvc: 35 | storageCapacity: 1Gi 36 | storageClassName: "" 37 | selector: {} 38 | 39 | route: 40 | enabled: false 41 | # for route, we may have more config to support in the future 42 | 43 | loadbalancer: 44 | enabled: false 45 | ip: "" 46 | 47 | nodeport: 48 | enabled: false 49 | port: 30443 50 | 51 | containerSecurityContext: 52 | allowPrivilegeEscalation: false 53 | capabilities: 54 | drop: 55 | - ALL 56 | privileged: false 57 | runAsNonRoot: true 58 | seccompProfile: 59 | type: RuntimeDefault 60 | 61 | securityContext: 62 | -------------------------------------------------------------------------------- /cmd/server/main.go: -------------------------------------------------------------------------------- 1 | // Copyright Contributors to the Open Cluster Management project 2 | 3 | package main 4 | 5 | import ( 6 | "fmt" 7 | "os" 8 | 9 | "github.com/spf13/cobra" 10 | utilruntime "k8s.io/apimachinery/pkg/util/runtime" 11 | utilfeature "k8s.io/apiserver/pkg/util/feature" 12 | "k8s.io/component-base/cli" 13 | logsapi "k8s.io/component-base/logs/api/v1" 14 | 15 | "open-cluster-management.io/multicluster-controlplane/pkg/cmd/agent" 16 | "open-cluster-management.io/multicluster-controlplane/pkg/cmd/controller" 17 | ) 18 | 19 | func init() { 20 | utilruntime.Must(logsapi.AddFeatureGates(utilfeature.DefaultMutableFeatureGate)) // register log to featuregate 21 | } 22 | 23 | func main() { 24 | command := newControlPlaneCommand() 25 | os.Exit(cli.Run(command)) 26 | } 27 | 28 | func newControlPlaneCommand() *cobra.Command { 29 | cmd := &cobra.Command{ 30 | Use: "controlplane", 31 | Short: "Start a multicluster controlplane", 32 | Run: func(cmd *cobra.Command, args []string) { 33 | if err := cmd.Help(); err != nil { 34 | fmt.Fprintf(os.Stderr, "%v\n", err) 35 | } 36 | os.Exit(1) 37 | }, 38 | } 39 | 40 | cmd.AddCommand(controller.NewController()) 41 | cmd.AddCommand(agent.NewAgent()) 42 | 43 | return cmd 44 | } 45 | -------------------------------------------------------------------------------- /hack/check/check-copyright.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright Contributors to the Open Cluster Management project 3 | 4 | # TESTED ON MAC! 5 | 6 | # NOTE: When running against a node repo, delete the node_modules directories first! Then npm ci once all the 7 | # copyright changes are incorporated. 8 | 9 | # set -x 10 | TMP_FILE="tmp_file" 11 | 12 | ALL_FILES=$(git ls-files | \ 13 | grep -v -f <(sed 's/\([.|]\)/\\\1/g; s/\?/./g ; s/\*/.*/g' .copyrightignore)) 14 | 15 | COMMUNITY_COPY_HEADER_FILE="$PWD/hack/check/copyright-header.txt" 16 | 17 | if [ ! -f $COMMUNITY_COPY_HEADER_FILE ]; then 18 | echo "File $COMMUNITY_COPY_HEADER_FILE not found!" 19 | exit 1 20 | fi 21 | 22 | COMMUNITY_COPY_HEADER_STRING=$(cat $COMMUNITY_COPY_HEADER_FILE) 23 | 24 | echo "Desired copyright header is: $COMMUNITY_COPY_HEADER_STRING" 25 | 26 | # NOTE: Only use one newline or javascript and typescript linter/prettier will complain about the extra blank lines 27 | NEWLINE="\n" 28 | 29 | ERROR=false 30 | 31 | for FILE in $ALL_FILES 32 | do 33 | if [[ -d $FILE ]] ; then 34 | continue 35 | fi 36 | 37 | COMMENT_START="# " 38 | COMMENT_END="" 39 | 40 | if [[ $FILE == *".go" ]]; then 41 | COMMENT_START="// " 42 | fi 43 | 44 | if [[ $FILE == *".ts" || $FILE == *".tsx" || $FILE == *".js" ]]; then 45 | COMMENT_START="/* " 46 | COMMENT_END=" */" 47 | fi 48 | 49 | if [[ $FILE == *".md" ]]; then 50 | COMMENT_START="\[comment\]: # ( " 51 | COMMENT_END=" )" 52 | fi 53 | 54 | if [[ $FILE == *".html" ]]; then 55 | COMMENT_START="" 57 | fi 58 | 59 | if [[ $FILE == *".go" \ 60 | || $FILE == *".yaml" \ 61 | || $FILE == *".yml" \ 62 | || $FILE == *".sh" \ 63 | || $FILE == *".js" \ 64 | || $FILE == *".ts" \ 65 | || $FILE == *".tsx" \ 66 | || $FILE == *"Dockerfile" \ 67 | || $FILE == *"Makefile" \ 68 | || $FILE == *".mk" \ 69 | || $FILE == *"Dockerfile.prow" \ 70 | || $FILE == *"Makefile.prow" \ 71 | || $FILE == *".gitignore" \ 72 | || $FILE == *".md" ]]; then 73 | 74 | COMMUNITY_HEADER_AS_COMMENT="$COMMENT_START$COMMUNITY_COPY_HEADER_STRING$COMMENT_END" 75 | 76 | if [ -f "$FILE" ] && ! grep -q "$COMMUNITY_HEADER_AS_COMMENT" "$FILE"; then 77 | echo "FILE: $FILE:" 78 | echo -e "\t- Need to add Community copyright header to file" 79 | ERROR=true 80 | fi 81 | fi 82 | done 83 | 84 | if $ERROR == true 85 | then 86 | exit 1 87 | fi 88 | rm -f $TMP_FILE 89 | -------------------------------------------------------------------------------- /hack/check/copyright-header.txt: -------------------------------------------------------------------------------- 1 | Copyright Contributors to the Open Cluster Management project -------------------------------------------------------------------------------- /hack/crd-update/copy-crds.sh: -------------------------------------------------------------------------------- 1 | # Copyright Contributors to the Open Cluster Management project 2 | #!/bin/bash 3 | 4 | source "$(dirname "${BASH_SOURCE}")/init.sh" 5 | 6 | for f in $HUB_CRD_FILES 7 | do 8 | cp $f ./pkg/controllers/bootstrap/crds/ 9 | done 10 | 11 | for f in $SPOKE_CRD_FILES 12 | do 13 | cp $f ./pkg/agent/crds/ 14 | done 15 | -------------------------------------------------------------------------------- /hack/crd-update/init.sh: -------------------------------------------------------------------------------- 1 | # Copyright Contributors to the Open Cluster Management project 2 | #!/bin/bash 3 | 4 | set -o errexit 5 | set -o nounset 6 | set -o pipefail 7 | 8 | HUB_CRD_FILES="./vendor/open-cluster-management.io/api/cluster/v1/*.crd.yaml 9 | ./vendor/open-cluster-management.io/api/addon/v1alpha1/*.crd.yaml 10 | ./vendor/open-cluster-management.io/api/cluster/v1beta2/0000_01_clusters.open-cluster-management.io_managedclustersetbindings.crd.yaml 11 | ./vendor/open-cluster-management.io/api/cluster/v1beta2/0000_00_clusters.open-cluster-management.io_managedclustersets.crd.yaml 12 | ./vendor/open-cluster-management.io/api/cluster/v1beta1/0000_02_clusters.open-cluster-management.io_placements.crd.yaml 13 | ./vendor/open-cluster-management.io/api/cluster/v1beta1/0000_03_clusters.open-cluster-management.io_placementdecisions.crd.yaml 14 | ./vendor/open-cluster-management.io/api/cluster/v1alpha1/0000_05_clusters.open-cluster-management.io_addonplacementscores.crd.yaml 15 | ./vendor/open-cluster-management.io/api/work/v1/0000_00_work.open-cluster-management.io_manifestworks.crd.yaml 16 | ./vendor/open-cluster-management.io/api/work/v1alpha1/0000_00_work.open-cluster-management.io_manifestworkreplicasets.crd.yaml 17 | " 18 | 19 | SPOKE_CRD_FILES="./vendor/open-cluster-management.io/api/cluster/v1alpha1/0000_02_clusters.open-cluster-management.io_clusterclaims.crd.yaml 20 | ./vendor/open-cluster-management.io/api/work/v1/0000_01_work.open-cluster-management.io_appliedmanifestworks.crd.yaml 21 | " 22 | -------------------------------------------------------------------------------- /hack/demo/README.md: -------------------------------------------------------------------------------- 1 | [comment]: # ( Copyright Contributors to the Open Cluster Management project ) 2 | # Get started 3 | 4 | ## Prerequisites 5 | 1. Connect to an OpenShift cluster 6 | 2. Install the latest [clusteradm](https://github.com/open-cluster-management-io/clusteradm#install-the-clusteradm-command-line) 7 | 3. Install the latest [kustomize](https://kubectl.docs.kubernetes.io/installation/kustomize/binaries/) 8 | 9 | ## Install 10 | 11 | ```bash 12 | ./next-generation.sh 2 (the number of control planes) 13 | ``` 14 | 15 | ## Clean up 16 | 17 | ```bash 18 | ./next-generation.sh 2 clean 19 | ``` -------------------------------------------------------------------------------- /hack/demo/next-generation.sh: -------------------------------------------------------------------------------- 1 | # Copyright Contributors to the Open Cluster Management project 2 | #!/usr/bin/env bash 3 | 4 | ################################# 5 | # include the -=magic=- 6 | # you can pass command line args 7 | # 8 | # example: 9 | # to disable simulated typing 10 | # . ../demo-magic.sh -d 11 | # 12 | # pass -h to see all options 13 | ################################# 14 | . ./demo-magic.sh 15 | 16 | 17 | ######################## 18 | # Configure the options 19 | ######################## 20 | 21 | # 22 | # speed at which to simulate typing. bigger num = faster 23 | # 24 | TYPE_SPEED=40 25 | 26 | # 27 | # custom prompt 28 | # 29 | # see http://www.tldp.org/HOWTO/Bash-Prompt-HOWTO/bash-prompt-escape-sequences.html for escape sequences 30 | # 31 | DEMO_PROMPT="${GREEN}➜ ${CYAN}\W ${COLOR_RESET}" 32 | ROOT_DIR="$(pwd)" 33 | number=${1:-$1} 34 | 35 | # this is needed for the controlplane deploy 36 | echo "* Testing connection" 37 | HOST_URL=$(oc -n openshift-console get routes console -o jsonpath='{.status.ingress[0].routerCanonicalHostname}') 38 | if [ $? -ne 0 ]; then 39 | echo "ERROR: Make sure you are logged into an OpenShift Container Platform before running this script" 40 | exit 41 | fi 42 | 43 | # shorten to the basedomain 44 | DEFAULT_HOST_POSTFIX=${HOST_URL/#router-default./} 45 | HOST_POSTFIX=${HOST_POSTFIX:-$DEFAULT_HOST_POSTFIX} 46 | 47 | if [[ "$2" == "clean" ]]; then 48 | for i in $(seq 1 "${number}"); do 49 | namespace=multicluster-controlplane-$i 50 | oc delete ns $namespace 51 | kind delete cluster --name $namespace-mc1 52 | rm -rf ${ROOT_DIR}/../deploy/cert-${namespace} 53 | done 54 | oc delete -k multicluster-global-hub-lite/deploy/server -n default 55 | rm -rf multicluster-global-hub-lite 56 | exit 57 | fi 58 | 59 | # text color 60 | # DEMO_CMD_COLOR=$BLACK 61 | 62 | # hide the evidence 63 | clear 64 | 65 | for i in $(seq 1 "${number}"); do 66 | 67 | # put your demo awesomeness here 68 | namespace=multicluster-controlplane-$i 69 | p "deploy standalone controlplane and addons(workmgr and managedserviceaccount) in namespace ${namespace}" 70 | export HUB_NAME="${namespace}" 71 | API_HOST="multicluster-controlplane-${HUB_NAME}.${HOST_POSTFIX}" 72 | pei "cd ../.. && make deploy-all" 73 | cd ${ROOT_DIR} 74 | pei "oc get pod -n ${namespace}" 75 | 76 | CERTS_DIR=${ROOT_DIR}/../deploy/cert-${namespace} 77 | p "create a KinD cluster as a managedcluster" 78 | pei "kind create cluster --name $namespace-mc1 --kubeconfig ${CERTS_DIR}/mc1-kubeconfig" 79 | 80 | output=$(clusteradm --kubeconfig=${CERTS_DIR}/kubeconfig get token --use-bootstrap-token) 81 | token=$(echo $output | awk -F ' ' '{print $1}' | awk -F '=' '{print $2}') 82 | p "join to the control plane" 83 | pei "clusteradm --kubeconfig=${CERTS_DIR}/mc1-kubeconfig join --hub-token $token --hub-apiserver https://$API_HOST --cluster-name $namespace-mc1" 84 | PROMPT_TIMEOUT=10 85 | wait 86 | pei "clusteradm --kubeconfig=${CERTS_DIR}/kubeconfig accept --clusters $namespace-mc1" 87 | 88 | pei "oc --kubeconfig=${CERTS_DIR}/kubeconfig get managedcluster" 89 | 90 | PROMPT_TIMEOUT=10 91 | wait 92 | pei "oc --kubeconfig=${CERTS_DIR}/kubeconfig get managedclusteraddon -n $namespace-mc1" 93 | 94 | done 95 | 96 | # show a prompt so as not to reveal our true nature after 97 | # the demo has concluded 98 | 99 | p "deploy the global hub in default namespace" 100 | rm -rf multicluster-global-hub-lite 101 | git clone git@github.com:clyang82/multicluster-global-hub-lite.git 102 | pei "cd multicluster-global-hub-lite && make deploy && cd .." 103 | 104 | for i in $(seq 1 "${number}"); do 105 | 106 | namespace=multicluster-controlplane-$i 107 | p "deploy syncer into namespace ${namespace}" 108 | oc create secret generic multicluster-global-hub-kubeconfig --from-file=kubeconfig=multicluster-global-hub-lite/deploy/server/certs/kube-aggregator.kubeconfig -n ${namespace} 109 | pei "oc apply -n ${namespace} -k multicluster-global-hub-lite/deploy/syncer" 110 | 111 | done 112 | 113 | cp multicluster-global-hub-lite/deploy/server/certs/kube-aggregator.kubeconfig /tmp/global-hub-kubeconfig 114 | p "Use oc --kubeconfig /tmp/global-hub-kubeconfig to access the global hub" 115 | 116 | p "" -------------------------------------------------------------------------------- /hack/deploy-etcd.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | REPO_DIR="$(cd "$(dirname ${BASH_SOURCE[0]})/.." ; pwd -P)" 4 | 5 | set -o nounset 6 | set -o pipefail 7 | set -o errexit 8 | 9 | source ${REPO_DIR}/hack/lib/deps.sh 10 | 11 | check_kubectl 12 | check_kustomize 13 | check_cfssl 14 | 15 | SED=sed 16 | if [ "$(uname)" = 'Darwin' ]; then 17 | # run `brew install gnu-${SED}` to install gsed 18 | SED=gsed 19 | fi 20 | 21 | ETCD_NS=${ETCD_NS:-"multicluster-controlplane-etcd"} 22 | ETCD_IMAGE_NAME=${ETCD_IMAGE_NAME:-"quay.io/coreos/etcd"} 23 | REUSE_CA=${REUSE_CA:-false} 24 | STORAGE_CLASS_NAME=${STORAGE_CLASS_NAME:-""} 25 | 26 | deploy_dir=${REPO_DIR}/_output/etcd/deploy 27 | cert_dir=${deploy_dir}/cert-etcd 28 | 29 | echo "Deploy etcd on the namespace ${ETCD_NS} in the cluster ${KUBECONFIG}" 30 | mkdir -p ${cert_dir} 31 | cp -r ${REPO_DIR}/hack/deploy/etcd/* $deploy_dir 32 | 33 | kubectl delete ns ${ETCD_NS} --ignore-not-found 34 | kubectl create ns ${ETCD_NS} 35 | 36 | if [ "${REUSE_CA}" != true ]; then 37 | pushd $cert_dir 38 | echo '{"CN":"multicluster-controlplane","key":{"algo":"rsa","size":2048}}' | cfssl gencert -initca - | cfssljson -bare ca - 39 | echo '{"signing":{"default":{"expiry":"43800h","usages":["signing","key encipherment","server auth","client auth"]}}}' > ca-config.json 40 | echo '{"CN":"'client'","hosts":[""],"key":{"algo":"rsa","size":2048}}' | cfssl gencert -config=ca-config.json -ca=ca.pem -ca-key=ca-key.pem -hostname="" - | cfssljson -bare client 41 | popd 42 | fi 43 | 44 | if [ "$STORAGE_CLASS_NAME" != "gp2" ]; then 45 | ${SED} -i "s/gp2/${STORAGE_CLASS_NAME}/g" $deploy_dir/statefulset.yaml 46 | fi 47 | 48 | pushd $deploy_dir 49 | kustomize edit set namespace ${ETCD_NS} 50 | kustomize edit set image quay.io/coreos/etcd=${ETCD_IMAGE_NAME} 51 | popd 52 | 53 | kustomize build ${deploy_dir} | kubectl apply -f - 54 | 55 | wait_seconds="60"; until [[ $((wait_seconds--)) -eq 0 ]] || eval "kubectl -n ${ETCD_NS} get pod etcd-0 &> /dev/null" ; do sleep 1; done 56 | kubectl -n ${ETCD_NS} wait pod/etcd-0 --for condition=Ready --timeout=180s 57 | 58 | wait_seconds="60"; until [[ $((wait_seconds--)) -eq 0 ]] || eval "kubectl -n ${ETCD_NS} get pod etcd-1 &> /dev/null" ; do sleep 1; done 59 | kubectl -n ${ETCD_NS} wait pod/etcd-1 --for condition=Ready --timeout=180s 60 | 61 | wait_seconds="60"; until [[ $((wait_seconds--)) -eq 0 ]] || eval "kubectl -n ${ETCD_NS} get pod etcd-2 &> /dev/null" ; do sleep 1; done 62 | kubectl -n ${ETCD_NS} wait pod/etcd-2 --for condition=Ready --timeout=180s 63 | 64 | echo "wait for etcd health (timeout=180s) ..." 65 | wait_seconds="180"; until [[ $((wait_seconds--)) -eq 0 ]] || eval "kubectl -n ${ETCD_NS} exec etcd-0 -- etcdctl cluster-health &> /dev/null" ; do sleep 1; done 66 | 67 | echo "etcd is health" 68 | -------------------------------------------------------------------------------- /hack/deploy-multicluster-controlplane.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | REPO_DIR="$(cd "$(dirname ${BASH_SOURCE[0]})/.." ; pwd -P)" 4 | 5 | set -o nounset 6 | set -o pipefail 7 | set -o errexit 8 | 9 | source ${REPO_DIR}/hack/lib/deps.sh 10 | 11 | check_kubectl 12 | check_helm 13 | 14 | uninstall=${1:-""} 15 | only_render=${ONLY_RENDER:-false} 16 | controlplane_namespace=${HUB_NAME} 17 | self_management=${SELF_MANAGEMENT:-false} 18 | image=${IMAGE_NAME:-"quay.io/stolostron/multicluster-controlplane:latest"} 19 | external_hostname=${EXTERNAL_HOSTNAME:-""} 20 | node_port=${NODE_PORT:-0} 21 | etcd_mod=${ETCD_MOD:-""} 22 | feature_gates=${FEATURE_GATES:-"DefaultClusterSet=true,ManagedClusterAutoApproval=true"} 23 | 24 | if [ "$uninstall"x = "uninstall"x ]; then 25 | helm -n ${HUB_NAME} uninstall multicluster-controlplane 26 | kubectl delete ns multicluster-controlplane --ignore-not-found 27 | exit 0 28 | fi 29 | 30 | echo "Deploy multicluster controlplane on the namespace $controlplane_namespace" 31 | echo "Image: $image" 32 | 33 | args="--create-namespace" 34 | args="$args --set enableSelfManagement=${self_management}" 35 | args="$args --set image=${image}" 36 | args="$args --set autoApprovalBootstrapUsers=system:admin" 37 | args="$args --set features=${feature_gates}" 38 | 39 | if [ 0 -eq $node_port ]; then 40 | args="$args --set route.enabled=true" 41 | else 42 | args="$args --set nodeport.enabled=true" 43 | args="$args --set nodeport.port=${node_port}" 44 | args="$args --set apiserver.externalHostname=${external_hostname}" 45 | args="$args --set apiserver.externalPort=${node_port}" 46 | fi 47 | 48 | if [ "$etcd_mod"x = "external"x ]; then 49 | args="$args --set etcd.mode=external" 50 | args="$args --set etcd.servers={\"http://etcd-0.etcd.multicluster-controlplane-etcd:2379\",\"http://etcd-1.etcd.multicluster-controlplane-etcd:2379\",\"http://etcd-2.etcd.multicluster-controlplane-etcd:2379\"}" 51 | args="$args --set-file etcd.ca=${REPO_DIR}/_output/etcd/deploy/cert-etcd/ca.pem" 52 | args="$args --set-file etcd.cert=${REPO_DIR}/_output/etcd/deploy/cert-etcd/client.pem" 53 | args="$args --set-file etcd.certkey=${REPO_DIR}/_output/etcd/deploy/cert-etcd/client-key.pem" 54 | fi 55 | 56 | if [ "${only_render}" = true ]; then 57 | mkdir -p ${REPO_DIR}/_output/controlplane 58 | deploy_file=${REPO_DIR}/_output/controlplane/${controlplane_namespace}.yaml 59 | helm template multicluster-controlplane ${REPO_DIR}/charts/multicluster-controlplane -n $controlplane_namespace $args > ${deploy_file} 60 | exit 0 61 | fi 62 | 63 | helm install multicluster-controlplane ${REPO_DIR}/charts/multicluster-controlplane -n $controlplane_namespace $args 64 | -------------------------------------------------------------------------------- /hack/deploy/addon/managed-serviceaccount/README.md: -------------------------------------------------------------------------------- 1 | [comment]: # ( Copyright Contributors to the Open Cluster Management project ) 2 | # Install managed-serviceaccount add-on 3 | 4 | 1. Install manifests on the standalone cluster 5 | 6 | ```bash 7 | kubectl apply -k deploy/addon/managed-serviceaccount/hub --kubeconfig= 8 | ``` 9 | 10 | 2. Install manifests on the hosting cluster 11 | 12 | ```bash 13 | cd deploy/addon/managed-serviceaccount/manager && kustomize edit set namespace $HUB_NAME 14 | 15 | kubectl apply -k deploy/addon/managed-serviceaccount/manager 16 | ``` -------------------------------------------------------------------------------- /hack/deploy/addon/managed-serviceaccount/hub/clustermanagementaddon/clustermanagementaddon.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: addon.open-cluster-management.io/v1alpha1 2 | kind: ClusterManagementAddOn 3 | metadata: 4 | name: managed-serviceaccount 5 | spec: 6 | addOnMeta: 7 | displayName: managed-serviceaccount 8 | description: managed-serviceaccount 9 | -------------------------------------------------------------------------------- /hack/deploy/addon/managed-serviceaccount/hub/clustermanagementaddon/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - clustermanagementaddon.yaml 3 | -------------------------------------------------------------------------------- /hack/deploy/addon/managed-serviceaccount/hub/crds/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - authentication.open-cluster-management.io_managedserviceaccounts.yaml 3 | -------------------------------------------------------------------------------- /hack/deploy/addon/managed-serviceaccount/hub/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - clustermanagementaddon 3 | - crds 4 | -------------------------------------------------------------------------------- /hack/deploy/addon/managed-serviceaccount/manager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - manager-deployment.yaml 6 | - role.yaml 7 | - rolebinding.yaml 8 | 9 | images: 10 | - name: quay.io/open-cluster-management/managed-serviceaccount:latest 11 | newName: quay.io/clyang82/managed-serviceaccount 12 | newTag: latest 13 | -------------------------------------------------------------------------------- /hack/deploy/addon/managed-serviceaccount/manager/manager-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: managed-serviceaccount-addon-manager 5 | spec: 6 | replicas: 1 7 | selector: 8 | matchLabels: 9 | open-cluster-management.io/addon: managed-serviceaccount 10 | template: 11 | metadata: 12 | labels: 13 | open-cluster-management.io/addon: managed-serviceaccount 14 | spec: 15 | containers: 16 | - name: manager 17 | image: quay.io/open-cluster-management/managed-serviceaccount:latest 18 | imagePullPolicy: Always 19 | command: 20 | - /manager 21 | args: 22 | - --leader-elect=true 23 | - --agent-image-name=quay.io/open-cluster-management/managed-serviceaccount:latest 24 | - --agent-install-all=true 25 | - --feature-gates=EphemeralIdentity=false 26 | - --kubeconfig=/var/run/secrets/hub/kubeconfig 27 | volumeMounts: 28 | - mountPath: /var/run/secrets/hub 29 | name: kubeconfig 30 | readOnly: true 31 | volumes: 32 | - name: kubeconfig 33 | secret: 34 | defaultMode: 420 35 | secretName: multicluster-controlplane-kubeconfig 36 | -------------------------------------------------------------------------------- /hack/deploy/addon/managed-serviceaccount/manager/role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: open-cluster-management:managed-serviceaccount:addon-manager 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - configmaps 10 | - secrets 11 | verbs: 12 | - "*" 13 | - apiGroups: 14 | - coordination.k8s.io 15 | resources: 16 | - leases 17 | verbs: 18 | - "*" 19 | -------------------------------------------------------------------------------- /hack/deploy/addon/managed-serviceaccount/manager/rolebinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: open-cluster-management:managed-serviceaccount:addon-manager 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: Role 8 | name: open-cluster-management:managed-serviceaccount:addon-manager 9 | subjects: 10 | - kind: ServiceAccount 11 | name: default 12 | -------------------------------------------------------------------------------- /hack/deploy/addon/policy/README.md: -------------------------------------------------------------------------------- 1 | [comment]: # ( Copyright Contributors to the Open Cluster Management project ) 2 | # Install policy add-on 3 | 4 | 1. Install manifests on the standalone cluster 5 | 6 | ```bash 7 | kubectl apply -k deploy/addon/policy/hub --kubeconfig= 8 | ``` 9 | 10 | 2. Install manifests on the hosting cluster 11 | 12 | ```bash 13 | cd deploy/addon/policy/manager && kustomize edit set namespace $HUB_NAME 14 | 15 | kubectl apply -k deploy/addon/policy/manager 16 | ``` -------------------------------------------------------------------------------- /hack/deploy/addon/policy/hub/clustermanagementaddon/config-policy-controller.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: addon.open-cluster-management.io/v1alpha1 2 | kind: ClusterManagementAddOn 3 | metadata: 4 | name: config-policy-controller 5 | spec: 6 | addOnMeta: 7 | description: Audits k8s resources and remediates violation based on configuration policies. 8 | displayName: Config Policy Addon 9 | -------------------------------------------------------------------------------- /hack/deploy/addon/policy/hub/clustermanagementaddon/governance-policy-framework.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: addon.open-cluster-management.io/v1alpha1 2 | kind: ClusterManagementAddOn 3 | metadata: 4 | name: governance-policy-framework 5 | spec: 6 | addOnMeta: 7 | description: Distributes policies and collects policy evaluation results. 8 | displayName: Governance Policy Framework Addon 9 | -------------------------------------------------------------------------------- /hack/deploy/addon/policy/hub/clustermanagementaddon/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - config-policy-controller.yaml 3 | - governance-policy-framework.yaml 4 | -------------------------------------------------------------------------------- /hack/deploy/addon/policy/hub/crds/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - apps.open-cluster-management.io_placementrules_crd.yaml 3 | - policy.open-cluster-management.io_placementbindings.yaml 4 | - policy.open-cluster-management.io_policies.yaml 5 | - policy.open-cluster-management.io_policyautomations.yaml 6 | - policy.open-cluster-management.io_policysets.yaml 7 | -------------------------------------------------------------------------------- /hack/deploy/addon/policy/hub/crds/policy.open-cluster-management.io_placementbindings.yaml: -------------------------------------------------------------------------------- 1 | 2 | --- 3 | apiVersion: apiextensions.k8s.io/v1 4 | kind: CustomResourceDefinition 5 | metadata: 6 | annotations: 7 | controller-gen.kubebuilder.io/version: v0.6.1 8 | creationTimestamp: null 9 | name: placementbindings.policy.open-cluster-management.io 10 | spec: 11 | group: policy.open-cluster-management.io 12 | names: 13 | kind: PlacementBinding 14 | listKind: PlacementBindingList 15 | plural: placementbindings 16 | shortNames: 17 | - pb 18 | singular: placementbinding 19 | scope: Namespaced 20 | versions: 21 | - name: v1 22 | schema: 23 | openAPIV3Schema: 24 | description: PlacementBinding is the Schema for the placementbindings API 25 | properties: 26 | apiVersion: 27 | description: 'APIVersion defines the versioned schema of this representation 28 | of an object. Servers should convert recognized schemas to the latest 29 | internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' 30 | type: string 31 | kind: 32 | description: 'Kind is a string value representing the REST resource this 33 | object represents. Servers may infer this from the endpoint the client 34 | submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' 35 | type: string 36 | metadata: 37 | type: object 38 | placementRef: 39 | description: PlacementSubject defines the resource that can be used as 40 | PlacementBinding placementRef 41 | properties: 42 | apiGroup: 43 | enum: 44 | - apps.open-cluster-management.io 45 | - cluster.open-cluster-management.io 46 | minLength: 1 47 | type: string 48 | kind: 49 | enum: 50 | - PlacementRule 51 | - Placement 52 | minLength: 1 53 | type: string 54 | name: 55 | minLength: 1 56 | type: string 57 | required: 58 | - apiGroup 59 | - kind 60 | - name 61 | type: object 62 | status: 63 | description: PlacementBindingStatus defines the observed state of PlacementBinding 64 | type: object 65 | subjects: 66 | items: 67 | description: Subject defines the resource that can be used as PlacementBinding 68 | subject 69 | properties: 70 | apiGroup: 71 | enum: 72 | - policy.open-cluster-management.io 73 | minLength: 1 74 | type: string 75 | kind: 76 | enum: 77 | - Policy 78 | - PolicySet 79 | minLength: 1 80 | type: string 81 | name: 82 | minLength: 1 83 | type: string 84 | required: 85 | - apiGroup 86 | - kind 87 | - name 88 | type: object 89 | minItems: 1 90 | type: array 91 | required: 92 | - placementRef 93 | - subjects 94 | type: object 95 | served: true 96 | storage: true 97 | subresources: 98 | status: {} 99 | status: 100 | acceptedNames: 101 | kind: "" 102 | plural: "" 103 | conditions: [] 104 | storedVersions: [] 105 | -------------------------------------------------------------------------------- /hack/deploy/addon/policy/hub/crds/policy.open-cluster-management.io_policysets.yaml: -------------------------------------------------------------------------------- 1 | 2 | --- 3 | apiVersion: apiextensions.k8s.io/v1 4 | kind: CustomResourceDefinition 5 | metadata: 6 | annotations: 7 | controller-gen.kubebuilder.io/version: v0.6.1 8 | creationTimestamp: null 9 | name: policysets.policy.open-cluster-management.io 10 | spec: 11 | group: policy.open-cluster-management.io 12 | names: 13 | kind: PolicySet 14 | listKind: PolicySetList 15 | plural: policysets 16 | shortNames: 17 | - plcset 18 | singular: policyset 19 | scope: Namespaced 20 | versions: 21 | - additionalPrinterColumns: 22 | - jsonPath: .status.compliant 23 | name: Compliance state 24 | type: string 25 | - jsonPath: .metadata.creationTimestamp 26 | name: Age 27 | type: date 28 | name: v1beta1 29 | schema: 30 | openAPIV3Schema: 31 | description: PolicySet is the Schema for the policysets API 32 | properties: 33 | apiVersion: 34 | description: 'APIVersion defines the versioned schema of this representation 35 | of an object. Servers should convert recognized schemas to the latest 36 | internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' 37 | type: string 38 | kind: 39 | description: 'Kind is a string value representing the REST resource this 40 | object represents. Servers may infer this from the endpoint the client 41 | submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' 42 | type: string 43 | metadata: 44 | type: object 45 | spec: 46 | description: PolicySetSpec describes a group of policies that are related 47 | and can be placed on the same managed clusters. 48 | properties: 49 | description: 50 | description: Description of this PolicySet. 51 | type: string 52 | policies: 53 | description: Policies that are grouped together within the PolicySet. 54 | items: 55 | minLength: 1 56 | type: string 57 | type: array 58 | required: 59 | - policies 60 | type: object 61 | status: 62 | description: PolicySetStatus defines the observed state of PolicySet 63 | properties: 64 | compliant: 65 | type: string 66 | placement: 67 | items: 68 | description: PolicySetStatusPlacement defines a placement object 69 | for the status 70 | properties: 71 | placement: 72 | type: string 73 | placementBinding: 74 | type: string 75 | placementRule: 76 | type: string 77 | type: object 78 | type: array 79 | statusMessage: 80 | type: string 81 | type: object 82 | required: 83 | - spec 84 | type: object 85 | served: true 86 | storage: true 87 | subresources: 88 | status: {} 89 | status: 90 | acceptedNames: 91 | kind: "" 92 | plural: "" 93 | conditions: [] 94 | storedVersions: [] 95 | -------------------------------------------------------------------------------- /hack/deploy/addon/policy/hub/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - crds 3 | - clustermanagementaddon 4 | -------------------------------------------------------------------------------- /hack/deploy/addon/policy/manager/addon-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app: grc 6 | component: ocm-policy-addon-ctrl 7 | name: grc-policy-addon-controller 8 | spec: 9 | progressDeadlineSeconds: 600 10 | replicas: 1 11 | revisionHistoryLimit: 10 12 | selector: 13 | matchLabels: 14 | app: grc 15 | strategy: 16 | rollingUpdate: 17 | maxSurge: 25% 18 | maxUnavailable: 25% 19 | type: RollingUpdate 20 | template: 21 | metadata: 22 | annotations: 23 | kubectl.kubernetes.io/default-container: manager 24 | labels: 25 | app: grc 26 | spec: 27 | affinity: 28 | nodeAffinity: 29 | requiredDuringSchedulingIgnoredDuringExecution: 30 | nodeSelectorTerms: 31 | - matchExpressions: 32 | - key: kubernetes.io/arch 33 | operator: In 34 | values: 35 | - amd64 36 | - ppc64le 37 | - s390x 38 | - arm64 39 | podAntiAffinity: 40 | preferredDuringSchedulingIgnoredDuringExecution: 41 | - podAffinityTerm: 42 | labelSelector: 43 | matchExpressions: 44 | - key: ocm-antiaffinity-selector 45 | operator: In 46 | values: 47 | - grcpolicyaddon 48 | topologyKey: topology.kubernetes.io/zone 49 | weight: 70 50 | - podAffinityTerm: 51 | labelSelector: 52 | matchExpressions: 53 | - key: ocm-antiaffinity-selector 54 | operator: In 55 | values: 56 | - grcpolicyaddon 57 | topologyKey: kubernetes.io/hostname 58 | weight: 35 59 | containers: 60 | - args: 61 | - controller 62 | command: 63 | - governance-policy-addon-controller 64 | - "--controlplane" 65 | - "--hubkubeconfig=/var/run/secrets/hub/kubeconfig" 66 | env: 67 | - name: POD_NAME 68 | valueFrom: 69 | fieldRef: 70 | apiVersion: v1 71 | fieldPath: metadata.name 72 | - name: CONFIG_POLICY_CONTROLLER_IMAGE 73 | value: quay.io/stolostron/config-policy-controller:2.7.0-SNAPSHOT-2022-11-30-23-34-51 74 | - name: KUBE_RBAC_PROXY_IMAGE 75 | value: registry.redhat.io/openshift4/ose-kube-rbac-proxy:v4.10 76 | - name: GOVERNANCE_POLICY_FRAMEWORK_ADDON_IMAGE 77 | value: quay.io/stolostron/governance-policy-framework-addon:2.7.0-SNAPSHOT-2022-11-30-23-34-51 78 | image: quay.io/open-cluster-management/governance-policy-addon-controller:latest 79 | imagePullPolicy: Always 80 | name: manager 81 | resources: 82 | requests: 83 | cpu: 25m 84 | memory: 64Mi 85 | securityContext: 86 | allowPrivilegeEscalation: false 87 | capabilities: 88 | drop: 89 | - ALL 90 | privileged: false 91 | readOnlyRootFilesystem: true 92 | runAsNonRoot: true 93 | terminationMessagePath: /dev/termination-log 94 | terminationMessagePolicy: File 95 | volumeMounts: 96 | - mountPath: /tmp 97 | name: tmp 98 | - mountPath: /var/run/secrets/hub 99 | name: kubeconfig 100 | readOnly: true 101 | serviceAccountName: policy-sa 102 | volumes: 103 | - emptyDir: {} 104 | name: tmp 105 | - name: kubeconfig 106 | secret: 107 | defaultMode: 420 108 | secretName: multicluster-controlplane-kubeconfig 109 | -------------------------------------------------------------------------------- /hack/deploy/addon/policy/manager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - addon-deployment.yaml 6 | - propagator-deployment.yaml 7 | - propagator-metrics-service.yaml 8 | - role.yaml 9 | - rolebinding.yaml 10 | - serviceaccount.yaml 11 | 12 | images: 13 | - name: quay.io/open-cluster-management/governance-policy-addon-controller:latest 14 | newName: quay.io/clyang82/governance-policy-addon-controller 15 | newTag: latest 16 | - name: quay.io/open-cluster-management/governance-policy-propagator:latest 17 | newName: quay.io/morvencao/governance-policy-propagator 18 | newTag: latest 19 | -------------------------------------------------------------------------------- /hack/deploy/addon/policy/manager/propagator-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app: grc 6 | app.kubernetes.io/instance: grc 7 | app.kubernetes.io/name: grc 8 | component: ocm-policy-propagator 9 | release: grc 10 | name: grc-policy-propagator 11 | spec: 12 | replicas: 1 13 | selector: 14 | matchLabels: 15 | app: grc 16 | component: ocm-policy-propagator 17 | release: grc 18 | template: 19 | metadata: 20 | creationTimestamp: null 21 | labels: 22 | app: grc 23 | app.kubernetes.io/instance: grc 24 | app.kubernetes.io/name: grc 25 | component: ocm-policy-propagator 26 | name: governance-policy-propagator 27 | ocm-antiaffinity-selector: grcpolicypropagator 28 | release: grc 29 | spec: 30 | containers: 31 | - args: 32 | - --secure-listen-address=0.0.0.0:8443 33 | - --upstream=http://127.0.0.1:8383/ 34 | - --logtostderr=true 35 | - --v=6 36 | - --tls-cert-file=/var/run/policy-metrics-cert/tls.crt 37 | - --tls-private-key-file=/var/run/policy-metrics-cert/tls.key 38 | - --tls-min-version=VersionTLS13 39 | image: quay.io/open-cluster-management/kube-rbac-proxy:2.5.0-SNAPSHOT-2022-01-04-13-45-55 40 | imagePullPolicy: IfNotPresent 41 | name: kube-rbac-proxy 42 | ports: 43 | - containerPort: 8443 44 | name: https 45 | protocol: TCP 46 | volumeMounts: 47 | - mountPath: /var/run/policy-metrics-cert 48 | name: metrics-cert 49 | readOnly: true 50 | - command: 51 | - governance-policy-propagator 52 | - --kubeconfig=/var/run/secrets/hub/kubeconfig 53 | env: 54 | - name: WATCH_NAMESPACE 55 | - name: POD_NAME 56 | valueFrom: 57 | fieldRef: 58 | apiVersion: v1 59 | fieldPath: metadata.name 60 | - name: OPERATOR_NAME 61 | value: governance-policy-propagator 62 | image: quay.io/open-cluster-management/governance-policy-propagator:latest 63 | imagePullPolicy: IfNotPresent 64 | livenessProbe: 65 | exec: 66 | command: 67 | - ls 68 | failureThreshold: 3 69 | initialDelaySeconds: 15 70 | periodSeconds: 15 71 | successThreshold: 1 72 | timeoutSeconds: 1 73 | name: governance-policy-propagator 74 | readinessProbe: 75 | exec: 76 | command: 77 | - ls 78 | failureThreshold: 3 79 | initialDelaySeconds: 15 80 | periodSeconds: 15 81 | successThreshold: 1 82 | timeoutSeconds: 1 83 | resources: 84 | requests: 85 | cpu: 25m 86 | memory: 64Mi 87 | securityContext: 88 | allowPrivilegeEscalation: false 89 | capabilities: 90 | drop: 91 | - ALL 92 | privileged: false 93 | readOnlyRootFilesystem: true 94 | runAsNonRoot: true 95 | volumeMounts: 96 | - mountPath: /tmp 97 | name: tmp 98 | - mountPath: /var/run/secrets/hub 99 | name: kubeconfig 100 | readOnly: true 101 | securityContext: 102 | runAsNonRoot: true 103 | serviceAccountName: policy-sa 104 | tolerations: 105 | - effect: NoSchedule 106 | key: node-role.kubernetes.io/infra 107 | operator: Exists 108 | volumes: 109 | - emptyDir: {} 110 | name: tmp 111 | - name: metrics-cert 112 | secret: 113 | defaultMode: 420 114 | secretName: grc-metrics-cert 115 | - name: kubeconfig 116 | secret: 117 | defaultMode: 420 118 | secretName: multicluster-controlplane-kubeconfig 119 | -------------------------------------------------------------------------------- /hack/deploy/addon/policy/manager/propagator-metrics-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | annotations: 5 | service.beta.openshift.io/serving-cert-secret-name: grc-metrics-cert 6 | labels: 7 | app: grc 8 | app.kubernetes.io/instance: grc 9 | app.kubernetes.io/name: grc 10 | chart: grc-chart-2.7.0 11 | component: ocm-policy-propagator 12 | release: grc 13 | name: grc-policy-propagator-metrics 14 | spec: 15 | ports: 16 | - name: https 17 | port: 8443 18 | protocol: TCP 19 | targetPort: 8443 20 | selector: 21 | app: grc 22 | component: ocm-policy-propagator 23 | release: grc 24 | sessionAffinity: None 25 | type: ClusterIP 26 | -------------------------------------------------------------------------------- /hack/deploy/addon/policy/manager/role.yaml: -------------------------------------------------------------------------------- 1 | # permissions to do leader election. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | name: open-cluster-management:policy-controller:addon-manager 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - configmaps 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - create 16 | - update 17 | - patch 18 | - delete 19 | - apiGroups: 20 | - coordination.k8s.io 21 | resources: 22 | - leases 23 | verbs: 24 | - get 25 | - list 26 | - watch 27 | - create 28 | - update 29 | - patch 30 | - delete 31 | - apiGroups: 32 | - "" 33 | resources: 34 | - events 35 | verbs: 36 | - create 37 | - patch 38 | -------------------------------------------------------------------------------- /hack/deploy/addon/policy/manager/rolebinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: open-cluster-management:policy-controller:addon-manager 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: Role 8 | name: open-cluster-management:policy-controller:addon-manager 9 | subjects: 10 | - kind: ServiceAccount 11 | name: policy-sa 12 | -------------------------------------------------------------------------------- /hack/deploy/addon/policy/manager/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: policy-sa 5 | -------------------------------------------------------------------------------- /hack/deploy/addon/work-manager/README.md: -------------------------------------------------------------------------------- 1 | [comment]: # ( Copyright Contributors to the Open Cluster Management project ) 2 | # Install work-manager add-on 3 | 4 | 1. Install manifests on the standalone cluster 5 | 6 | ```bash 7 | kubectl apply -k deploy/addon/work-manager/hub --kubeconfig= 8 | ``` 9 | 10 | 2. Install manifests on the hosting cluster 11 | 12 | ```bash 13 | cd deploy/addon/work-manager/manager && kustomize edit set namespace $HUB_NAME 14 | kubectl apply -k work-manager/manager 15 | ``` -------------------------------------------------------------------------------- /hack/deploy/addon/work-manager/hub/agent-clusterrole.yaml: -------------------------------------------------------------------------------- 1 | kind: ClusterRole 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: managed-cluster-workmgr 5 | rules: 6 | - apiGroups: ["","events.k8s.io"] 7 | resources: ["events"] 8 | verbs: ["create", "update", "patch"] 9 | - apiGroups: ["action.open-cluster-management.io"] 10 | resources: ["managedclusteractions"] 11 | verbs: ["get", "list", "watch"] 12 | - apiGroups: ["action.open-cluster-management.io"] 13 | resources: ["managedclusteractions/status"] 14 | verbs: ["update", "patch"] 15 | - apiGroups: ["internal.open-cluster-management.io"] 16 | resources: ["managedclusterinfos"] 17 | verbs: ["get", "list", "watch"] 18 | - apiGroups: ["internal.open-cluster-management.io"] 19 | resources: ["managedclusterinfos/status"] 20 | verbs: ["update", "patch"] 21 | - apiGroups: ["view.open-cluster-management.io"] 22 | resources: ["managedclusterviews"] 23 | verbs: ["get", "list", "watch"] 24 | - apiGroups: ["view.open-cluster-management.io"] 25 | resources: ["managedclusterviews/status"] 26 | verbs: ["update", "patch"] 27 | - apiGroups: ["proxy.open-cluster-management.io"] 28 | resources: ["clusterstatuses/aggregator"] 29 | verbs: ["get", "create"] 30 | -------------------------------------------------------------------------------- /hack/deploy/addon/work-manager/hub/clustermanagementaddon/clustermanagementaddon.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: addon.open-cluster-management.io/v1alpha1 2 | kind: ClusterManagementAddOn 3 | metadata: 4 | name: work-manager 5 | spec: 6 | addOnMeta: 7 | displayName: work-manager 8 | description: "work-manager provides action, view and rbac settings" 9 | supportedConfigs: 10 | - group: addon.open-cluster-management.io 11 | resource: addondeploymentconfigs 12 | -------------------------------------------------------------------------------- /hack/deploy/addon/work-manager/hub/clustermanagementaddon/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - clustermanagementaddon.yaml 3 | -------------------------------------------------------------------------------- /hack/deploy/addon/work-manager/hub/crds/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - action.open-cluster-management.io_managedclusteractions.crd.yaml 3 | - hive.openshift.io_clusterclaims.yaml 4 | - hive.openshift.io_clusterdeployments.yaml 5 | - hive.openshift.io_clusterpools.yaml 6 | - hive.openshift.io_syncsets.yaml 7 | - hiveinternal.openshift.io_clustersyncs.yaml 8 | - imageregistry.open-cluster-management.io_managedclusterimageregistries.crd.yaml 9 | - internal.open-cluster-management.io_managedclusterinfos.crd.yaml 10 | - view.open-cluster-management.io_managedclusterviews.crd.yaml 11 | -------------------------------------------------------------------------------- /hack/deploy/addon/work-manager/hub/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - clustermanagementaddon 3 | - agent-clusterrole.yaml 4 | 5 | bases: 6 | - crds -------------------------------------------------------------------------------- /hack/deploy/addon/work-manager/manager/controller.yaml: -------------------------------------------------------------------------------- 1 | kind: Deployment 2 | apiVersion: apps/v1 3 | metadata: 4 | name: work-manager-addon-manager 5 | labels: 6 | app: work-manager 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: work-manager 12 | template: 13 | metadata: 14 | labels: 15 | app: work-manager 16 | spec: 17 | containers: 18 | - name: work-manager 19 | image: quay.io/stolostron/multicloud-manager 20 | imagePullPolicy: Always 21 | args: 22 | - "/controller" 23 | - "--enable-agent-deploy=true" 24 | - "--kubeconfig=/var/run/secrets/hub/kubeconfig" 25 | - "--agent-addon-image=quay.io/stolostron/multicloud-manager:2.7.0-SNAPSHOT-2022-11-30-23-34-51" 26 | livenessProbe: 27 | httpGet: 28 | path: /healthz 29 | port: 8000 30 | failureThreshold: 3 31 | periodSeconds: 10 32 | readinessProbe: 33 | httpGet: 34 | path: /readyz 35 | port: 8000 36 | failureThreshold: 3 37 | periodSeconds: 10 38 | volumeMounts: 39 | - mountPath: /var/run/secrets/hub 40 | name: kubeconfig 41 | readOnly: true 42 | volumes: 43 | - name: kubeconfig 44 | secret: 45 | defaultMode: 420 46 | secretName: multicluster-controlplane-kubeconfig 47 | -------------------------------------------------------------------------------- /hack/deploy/addon/work-manager/manager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - controller.yaml 6 | - role.yaml 7 | - rolebinding.yaml 8 | 9 | 10 | images: 11 | - name: quay.io/stolostron/multicloud-manager 12 | newName: quay.io/clyang82/multicloud-manager 13 | newTag: latest 14 | -------------------------------------------------------------------------------- /hack/deploy/addon/work-manager/manager/role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: open-cluster-management:work-manger:addon-manager 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - configmaps 10 | - secrets 11 | verbs: 12 | - "*" 13 | - apiGroups: 14 | - coordination.k8s.io 15 | resources: 16 | - leases 17 | verbs: 18 | - "*" 19 | -------------------------------------------------------------------------------- /hack/deploy/addon/work-manager/manager/rolebinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: open-cluster-management:work-manger:addon-manager 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: Role 8 | name: open-cluster-management:work-manger:addon-manager 9 | subjects: 10 | - kind: ServiceAccount 11 | name: default 12 | -------------------------------------------------------------------------------- /hack/deploy/agent/clusterrole.yaml: -------------------------------------------------------------------------------- 1 | #TODO only apply this role for controlplane when self management enabled 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: open-cluster-management:multicluster-controlplane-agent 6 | rules: 7 | # Allow agent to manage crds 8 | - apiGroups: ["apiextensions.k8s.io"] 9 | resources: ["customresourcedefinitions"] 10 | verbs: ["create", "get", "list", "update", "watch", "patch", "delete"] 11 | # Allow agent to get/list/watch nodes 12 | # list nodes to calculates the capacity and allocatable resources of the managed cluster 13 | - apiGroups: [""] 14 | resources: ["nodes"] 15 | verbs: ["get", "list", "watch"] 16 | # Allow agent to list clusterclaims 17 | - apiGroups: ["cluster.open-cluster-management.io"] 18 | resources: ["clusterclaims"] 19 | verbs: ["get", "list", "watch"] 20 | # Allow agent to create/update/patch/delete namespaces, get/list/watch are contained in admin role already 21 | - apiGroups: [""] 22 | resources: ["namespaces"] 23 | verbs: ["create", "update", "patch", "delete"] 24 | # Allow agent to manage role/rolebinding/clusterrole/clusterrolebinding 25 | - apiGroups: ["rbac.authorization.k8s.io"] 26 | resources: ["clusterrolebindings", "rolebindings"] 27 | verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] 28 | - apiGroups: ["rbac.authorization.k8s.io"] 29 | resources: ["clusterroles", "roles"] 30 | verbs: ["get", "list", "watch", "create", "update", "patch", "delete", "escalate", "bind"] 31 | # Allow OCM addons to setup metrics collection with Prometheus 32 | # TODO: Move this permission to the open-cluster-management:{{ .KlusterletName }}-work:execution Role (not ClusterRole) 33 | # when it is created. 34 | - apiGroups: ["monitoring.coreos.com"] 35 | resources: ["servicemonitors"] 36 | verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] 37 | # Allow agent to manage oauth clients 38 | # TODO refactor permission control of work agent to remove this 39 | - apiGroups: ["oauth.openshift.io"] 40 | resources: ["oauthclients"] 41 | verbs: ["get", "list", "watch", "create", "patch","update", "delete"] 42 | # Allow agent to manage appliedmanifestworks 43 | - apiGroups: ["work.open-cluster-management.io"] 44 | resources: ["appliedmanifestworks"] 45 | verbs: ["get", "list", "watch", "create", "update", "patch", "delete"] 46 | - apiGroups: ["work.open-cluster-management.io"] 47 | resources: ["appliedmanifestworks/status"] 48 | verbs: ["patch", "update"] 49 | - apiGroups: ["work.open-cluster-management.io"] 50 | resources: ["appliedmanifestworks/finalizers"] 51 | verbs: ["update"] 52 | # Allow agent to check executor permissions 53 | - apiGroups: ["authorization.k8s.io"] 54 | resources: ["subjectaccessreviews"] 55 | verbs: ["create"] 56 | - apiGroups: [""] 57 | resources: ["serviceaccounts"] 58 | verbs: ["impersonate"] 59 | - apiGroups: [""] 60 | resources: ["serviceaccounts", "serviceaccounts/token"] 61 | verbs: ["get", "watch", "list", "create", "delete"] 62 | - apiGroups: ["authentication.k8s.io"] 63 | resources: ["tokenreviews"] 64 | verbs: ["create"] -------------------------------------------------------------------------------- /hack/deploy/agent/clusterrolebinding-admin.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: open-cluster-management:multicluster-controlplane-agent:work-execution-admin 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | # We deploy a controller that could work with permission lower than cluster-admin, the tradeoff is 9 | # responsivity because list/watch cannot be maintained over too many namespaces. 10 | name: admin 11 | subjects: 12 | - kind: ServiceAccount 13 | name: multicluster-controlplane-agent-sa 14 | namespace: multicluster-controlplane-agent 15 | -------------------------------------------------------------------------------- /hack/deploy/agent/clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: open-cluster-management:multicluster-controlplane-agent 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: open-cluster-management:multicluster-controlplane-agent 9 | subjects: 10 | - kind: ServiceAccount 11 | name: multicluster-controlplane-agent-sa 12 | namespace: multicluster-controlplane-agent 13 | -------------------------------------------------------------------------------- /hack/deploy/agent/deployment.yaml: -------------------------------------------------------------------------------- 1 | kind: Deployment 2 | apiVersion: apps/v1 3 | metadata: 4 | name: multicluster-controlplane-agent 5 | labels: 6 | app: multicluster-controlplane-agent 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: multicluster-controlplane-agent 12 | template: 13 | metadata: 14 | labels: 15 | app: multicluster-controlplane-agent 16 | spec: 17 | serviceAccountName: multicluster-controlplane-agent-sa 18 | containers: 19 | - name: agent 20 | image: quay.io/open-cluster-management/multicluster-controlplane 21 | imagePullPolicy: IfNotPresent 22 | args: 23 | - "/multicluster-controlplane" 24 | - "agent" 25 | - "--cluster-name=loopback" 26 | - "--bootstrap-kubeconfig=/spoke/bootstrap/kubeconfig" 27 | - "--feature-gates=ManagedServiceAccount=true" 28 | securityContext: 29 | allowPrivilegeEscalation: false 30 | capabilities: 31 | drop: 32 | - ALL 33 | privileged: false 34 | runAsNonRoot: true 35 | volumeMounts: 36 | - name: bootstrap-kubeconfig 37 | mountPath: "/spoke/bootstrap" 38 | readOnly: true 39 | - name: hub-kubeconfig 40 | mountPath: "/spoke/hub-kubeconfig" 41 | volumes: 42 | - name: bootstrap-kubeconfig 43 | secret: 44 | secretName: bootstrap-kubeconfig 45 | - name: hub-kubeconfig 46 | emptyDir: 47 | medium: Memory 48 | -------------------------------------------------------------------------------- /hack/deploy/agent/kustomization.yaml: -------------------------------------------------------------------------------- 1 | 2 | # Value of this field is prepended to the 3 | # names of all resources, e.g. a deployment named 4 | # "wordpress" becomes "alices-wordpress". 5 | # Note that it should also match with the prefix (text before '-') of the namespace 6 | # field above. 7 | #namePrefix: multicloud- 8 | 9 | # Labels to add to all resources and selectors. 10 | #commonLabels: 11 | # someName: someValue 12 | 13 | # Each entry in this list must resolve to an existing 14 | # resource definition in YAML. These are the resource 15 | # files that kustomize reads, modifies and emits as a 16 | # YAML string, with resources separated by document 17 | # markers ("---"). 18 | # 19 | # General rule here is anything deployed by OLM bundles should go here as well, 20 | # this is used in "make deploy" for developers and should mimic what OLM deploys 21 | # for you. CRDs are an exception to this as we don't want to have to list them all 22 | # here. These are deployed via a "make install" dependency. 23 | apiVersion: kustomize.config.k8s.io/v1beta1 24 | kind: Kustomization 25 | 26 | resources: 27 | - clusterrole.yaml 28 | - clusterrolebinding-admin.yaml 29 | - clusterrolebinding.yaml 30 | - role.yaml 31 | - rolebinding.yaml 32 | - serviceaccount.yaml 33 | - deployment.yaml 34 | 35 | images: 36 | - name: quay.io/open-cluster-management/multicluster-controlplane 37 | newName: quay.io/open-cluster-management/multicluster-controlplane 38 | newTag: latest 39 | 40 | secretGenerator: 41 | - name: bootstrap-kubeconfig 42 | namespace: multicluster-controlplane-agent 43 | files: 44 | - kubeconfig=hub-kubeconfig 45 | type: "Opaque" 46 | 47 | generatorOptions: 48 | disableNameSuffixHash: true 49 | -------------------------------------------------------------------------------- /hack/deploy/agent/role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: multicluster-controlplane-agent 5 | rules: 6 | # create hub-kubeconfig and external-managed-registration/work secrets 7 | - apiGroups: [""] 8 | resources: ["secrets"] 9 | verbs: ["get", "list", "watch", "create", "delete", "update", "patch"] 10 | -------------------------------------------------------------------------------- /hack/deploy/agent/rolebinding.yaml: -------------------------------------------------------------------------------- 1 | # Copyright Contributors to the Open Cluster Management project 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: RoleBinding 4 | metadata: 5 | name: multicluster-controlplane-agent 6 | roleRef: 7 | kind: Role 8 | name: multicluster-controlplane-agent 9 | apiGroup: rbac.authorization.k8s.io 10 | subjects: 11 | - kind: ServiceAccount 12 | name: multicluster-controlplane-agent-sa 13 | -------------------------------------------------------------------------------- /hack/deploy/agent/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: multicluster-controlplane-agent-sa 5 | -------------------------------------------------------------------------------- /hack/deploy/etcd/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - statefulsetservice.yaml 6 | - statefulset.yaml 7 | 8 | secretGenerator: 9 | - name: trusted-ca 10 | files: 11 | - cert-etcd/ca.pem 12 | generatorOptions: 13 | disableNameSuffixHash: true 14 | -------------------------------------------------------------------------------- /hack/deploy/etcd/statefulset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | name: "etcd" 5 | labels: 6 | component: "etcd" 7 | spec: 8 | serviceName: "etcd" 9 | replicas: 3 10 | selector: 11 | matchLabels: 12 | component: "etcd" 13 | template: 14 | metadata: 15 | name: "etcd" 16 | labels: 17 | component: "etcd" 18 | spec: 19 | containers: 20 | - name: "etcd" 21 | image: "quay.io/coreos/etcd:v3.2.3" 22 | ports: 23 | - containerPort: 2379 24 | name: client 25 | - containerPort: 2380 26 | name: peer 27 | env: 28 | - name: CLUSTER_SIZE 29 | value: "3" 30 | - name: SET_NAME 31 | value: "etcd" 32 | - name: NAMESPACE 33 | valueFrom: 34 | fieldRef: 35 | fieldPath: metadata.namespace 36 | - name: ETCD_SNAPSHOT_COUNT 37 | value: "5000" 38 | command: 39 | - "/bin/sh" 40 | - "-ecx" 41 | - | 42 | IP=$(hostname -i) 43 | for i in $(seq 0 $((${CLUSTER_SIZE} - 1))); do 44 | while true; do 45 | echo "Waiting for ${SET_NAME}-${i}.${SET_NAME} to come up" 46 | ping -W 1 -c 1 ${SET_NAME}-${i}.${SET_NAME}.${NAMESPACE}.svc > /dev/null && break 47 | sleep 1s 48 | done 49 | done 50 | PEERS="" 51 | for i in $(seq 0 $((${CLUSTER_SIZE} - 1))); do 52 | PEERS="${PEERS}${PEERS:+,}${SET_NAME}-${i}=http://${SET_NAME}-${i}.${SET_NAME}.${NAMESPACE}.svc:2380" 53 | done 54 | # start etcd. If cluster is already initialized the `--initial-*` options will be ignored. 55 | exec etcd --name ${HOSTNAME} \ 56 | --listen-peer-urls http://${IP}:2380 \ 57 | --listen-client-urls http://0.0.0.0:2379 \ 58 | --advertise-client-urls http://${HOSTNAME}.${SET_NAME}:2379 \ 59 | --initial-advertise-peer-urls http://${HOSTNAME}.${SET_NAME}:2380 \ 60 | --initial-cluster-token etcd-cluster-1 \ 61 | --initial-cluster ${PEERS} \ 62 | --initial-cluster-state new \ 63 | --data-dir=/var/run/etcd/${IP}/${NAMESPACE}.etcd \ 64 | --client-cert-auth \ 65 | --trusted-ca-file=/client-ca/ca.pem 66 | volumeMounts: 67 | - name: trusted 68 | mountPath: /client-ca 69 | readOnly: true 70 | - mountPath: /var/run/etcd 71 | name: multicluster-config 72 | volumes: 73 | - name: trusted 74 | secret: 75 | secretName: trusted-ca 76 | volumeClaimTemplates: 77 | - metadata: 78 | name: multicluster-config 79 | spec: 80 | storageClassName: gp2 81 | accessModes: 82 | - "ReadWriteOnce" 83 | resources: 84 | requests: 85 | storage: 1Gi 86 | -------------------------------------------------------------------------------- /hack/deploy/etcd/statefulsetservice.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: "etcd" 5 | annotations: 6 | # Create endpoints also if the related pod isn't ready 7 | service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" 8 | spec: 9 | ports: 10 | - port: 2379 11 | name: client 12 | - port: 2380 13 | name: peer 14 | clusterIP: None 15 | selector: 16 | component: "etcd" 17 | -------------------------------------------------------------------------------- /hack/install-etcd.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Copyright 2014 The Kubernetes Authors. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # This script is convenience to download and install etcd in third_party. 18 | # Mostly just used by CI. 19 | # Usage: `hack/install-etcd.sh`. 20 | 21 | set -o errexit 22 | set -o nounset 23 | set -o pipefail 24 | 25 | KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. 26 | source "${KUBE_ROOT}/hack/lib/init.sh" 27 | 28 | kube::etcd::install 29 | -------------------------------------------------------------------------------- /hack/lib/.gitattributes: -------------------------------------------------------------------------------- 1 | version.sh export-subst 2 | -------------------------------------------------------------------------------- /hack/lib/deps.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | bin_dir="/usr/bin" 4 | 5 | function check_golang() { 6 | export PATH=$PATH:/usr/local/go/bin 7 | if ! command -v go >/dev/null 2>&1; then 8 | wget https://dl.google.com/go/go1.21.7.linux-amd64.tar.gz >/dev/null 2>&1 9 | tar -C /usr/local/ -xvf go1.21.7.linux-amd64.tar.gz >/dev/null 2>&1 10 | rm go1.21.7.linux-amd64.tar.gz 11 | fi 12 | if [[ $(go version) < "go version go1.21" ]]; then 13 | echo "go version is less than 1.21, update to 1.21" 14 | rm -rf /usr/local/go 15 | wget https://dl.google.com/go/go1.21.7.linux-amd64.tar.gz >/dev/null 2>&1 16 | tar -C /usr/local/ -xvf go1.21.7.linux-amd64.tar.gz >/dev/null 2>&1 17 | rm go1.21.7.linux-amd64.tar.gz 18 | sleep 2 19 | fi 20 | echo "go path: $(which go)" 21 | echo "go version: $(go version)" 22 | } 23 | 24 | function check_kind() { 25 | if ! command -v kind >/dev/null 2>&1; then 26 | curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.14.0/kind-linux-amd64 27 | chmod +x ./kind 28 | mv ./kind ${bin_dir}/kind 29 | fi 30 | if [[ $(kind version |awk '{print $2}') < "v0.12.0" ]]; then 31 | rm -rf $(which kind) 32 | curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.14.0/kind-linux-amd64 33 | chmod +x ./kind 34 | mv ./kind ${bin_dir}/kind 35 | fi 36 | echo "kind path: $(which kind)" 37 | echo "kind version: $(kind version)" 38 | } 39 | 40 | function check_kubectl() { 41 | if ! command -v kubectl >/dev/null 2>&1; then 42 | echo "This script will install kubectl (https://kubernetes.io/docs/tasks/tools/install-kubectl/) on your machine" 43 | if [[ "$(uname)" == "Linux" ]]; then 44 | curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl 45 | elif [[ "$(uname)" == "Darwin" ]]; then 46 | curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/darwin/amd64/kubectl 47 | fi 48 | chmod +x ./kubectl 49 | mv ./kubectl ${bin_dir}/kubectl 50 | fi 51 | echo "kubectl path: $(which kubectl)" 52 | echo "kubectl version: $(kubectl version --client --short)" 53 | } 54 | 55 | function check_kustomize() { 56 | if ! command -v kustomize >/dev/null 2>&1; then 57 | echo "This script will install kustomize on your machine" 58 | curl -LO "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" 59 | chmod +x ./install_kustomize.sh 60 | source ./install_kustomize.sh 4.5.7 ${bin_dir} 61 | rm ./install_kustomize.sh 62 | fi 63 | echo "kustomize path: $(which kustomize)" 64 | echo "kustomize version: $(kustomize version)" 65 | } 66 | 67 | function check_ginkgo() { 68 | if ! command -v ginkgo >/dev/null 2>&1; then 69 | go install github.com/onsi/ginkgo/v2/ginkgo@v2.5.0 70 | mv $(go env GOPATH)/bin/ginkgo ${bin_dir}/ginkgo 71 | fi 72 | echo "ginkgo path: $(which ginkgo)" 73 | echo "ginkgo version: $(ginkgo version)" 74 | } 75 | 76 | function check_cfssl() { 77 | if ! command -v cfssl >/dev/null 2>&1; then 78 | curl --retry 10 -L -o cfssl https://github.com/cloudflare/cfssl/releases/download/v1.5.0/cfssl_1.5.0_linux_amd64 79 | chmod +x cfssl || true 80 | mv cfssl ${bin_dir}/cfssl 81 | fi 82 | echo "cfssl path: $(which cfssl)" 83 | if ! command -v cfssljson >/dev/null 2>&1; then 84 | curl --retry 10 -L -o cfssljson https://github.com/cloudflare/cfssl/releases/download/v1.5.0/cfssljson_1.5.0_linux_amd64 85 | chmod +x cfssljson || true 86 | mv cfssljson ${bin_dir}/cfssljson 87 | fi 88 | echo "cfssljson path: $(which cfssljson)" 89 | } 90 | 91 | function check_helm() { 92 | if ! command -v helm >/dev/null 2>&1; then 93 | wget -q https://get.helm.sh/helm-v3.11.3-linux-amd64.tar.gz 94 | tar -xf helm-v3.11.3-linux-amd64.tar.gz 95 | mkdir -p helm 96 | tar -C helm -xf helm-v3.11.3-linux-amd64.tar.gz 97 | chmod +x helm/linux-amd64/helm 98 | mv helm/linux-amd64/helm ${bin_dir}/helm 99 | fi 100 | echo "helm path: $(which helm)" 101 | echo "helm version: $(helm version --short)" 102 | } 103 | -------------------------------------------------------------------------------- /hack/lib/init.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -o errexit 4 | set -o nounset 5 | set -o pipefail 6 | 7 | # Unset CDPATH so that path interpolation can work correctly 8 | # https://github.com/kubernetes/kubernetes/issues/52255 9 | unset CDPATH 10 | 11 | # Until all GOPATH references are removed from all build scripts as well, 12 | # explicitly disable module mode to avoid picking up user-set GO111MODULE preferences. 13 | # As individual scripts (like hack/update-vendor.sh) make use of go modules, 14 | # they can explicitly set GO111MODULE=on 15 | export GO111MODULE=off 16 | 17 | # The root of the build/dist directory 18 | KUBE_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd -P)" 19 | 20 | source "${KUBE_ROOT}/hack/lib/util.sh" 21 | source "${KUBE_ROOT}/hack/lib/logging.sh" 22 | 23 | kube::log::install_errexit 24 | kube::util::ensure-bash-version 25 | 26 | source "${KUBE_ROOT}/hack/lib/etcd.sh" 27 | -------------------------------------------------------------------------------- /hack/lib/logging.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Controls verbosity of the script output and logging. 4 | KUBE_VERBOSE="${KUBE_VERBOSE:-2}" 5 | 6 | # Handler for when we exit automatically on an error. 7 | # Borrowed from https://gist.github.com/ahendrix/7030300 8 | kube::log::errexit() { 9 | local err="${PIPESTATUS[*]}" 10 | 11 | # If the shell we are in doesn't have errexit set (common in subshells) then 12 | # don't dump stacks. 13 | set +o | grep -qe "-o errexit" || return 14 | 15 | set +o xtrace 16 | local code="${1:-1}" 17 | # Print out the stack trace described by $function_stack 18 | if [ ${#FUNCNAME[@]} -gt 2 ] 19 | then 20 | kube::log::error "Call tree:" 21 | for ((i=1;i<${#FUNCNAME[@]}-1;i++)) 22 | do 23 | kube::log::error " ${i}: ${BASH_SOURCE[${i}+1]}:${BASH_LINENO[${i}]} ${FUNCNAME[${i}]}(...)" 24 | done 25 | fi 26 | kube::log::error_exit "Error in ${BASH_SOURCE[1]}:${BASH_LINENO[0]}. '${BASH_COMMAND}' exited with status ${err}" "${1:-1}" 1 27 | } 28 | 29 | kube::log::install_errexit() { 30 | # trap ERR to provide an error handler whenever a command exits nonzero this 31 | # is a more verbose version of set -o errexit 32 | trap 'kube::log::errexit' ERR 33 | 34 | # setting errtrace allows our ERR trap handler to be propagated to functions, 35 | # expansions and subshells 36 | set -o errtrace 37 | } 38 | 39 | # Log an error but keep going. Don't dump the stack or exit. 40 | kube::log::error() { 41 | timestamp=$(date +"[%m%d %H:%M:%S]") 42 | echo "!!! ${timestamp} ${1-}" >&2 43 | shift 44 | for message; do 45 | echo " ${message}" >&2 46 | done 47 | } 48 | 49 | # Print a status line. Formatted to show up in a stream of output. 50 | kube::log::status() { 51 | local V="${V:-0}" 52 | if [[ ${KUBE_VERBOSE} < ${V} ]]; then 53 | return 54 | fi 55 | 56 | timestamp=$(date +"[%m%d %H:%M:%S]") 57 | echo "+++ ${timestamp} ${1}" 58 | shift 59 | for message; do 60 | echo " ${message}" 61 | done 62 | } 63 | 64 | # Print an usage message to stderr. The arguments are printed directly. 65 | kube::log::usage() { 66 | echo >&2 67 | local message 68 | for message; do 69 | echo "${message}" >&2 70 | done 71 | echo >&2 72 | } 73 | 74 | # Print out some info that isn't a top level status line 75 | kube::log::info() { 76 | local V="${V:-0}" 77 | if [[ ${KUBE_VERBOSE} < ${V} ]]; then 78 | return 79 | fi 80 | 81 | for message; do 82 | echo "${message}" 83 | done 84 | } 85 | 86 | # Log an error and exit. 87 | # Args: 88 | # $1 Message to log with the error 89 | # $2 The error code to return 90 | # $3 The number of stack frames to skip when printing. 91 | kube::log::error_exit() { 92 | local message="${1:-}" 93 | local code="${2:-1}" 94 | local stack_skip="${3:-0}" 95 | stack_skip=$((stack_skip + 1)) 96 | 97 | if [[ ${KUBE_VERBOSE} -ge 4 ]]; then 98 | local source_file=${BASH_SOURCE[${stack_skip}]} 99 | local source_line=${BASH_LINENO[$((stack_skip - 1))]} 100 | echo "!!! Error in ${source_file}:${source_line}" >&2 101 | [[ -z ${1-} ]] || { 102 | echo " ${1}" >&2 103 | } 104 | 105 | kube::log::stack ${stack_skip} 106 | 107 | echo "Exiting with status ${code}" >&2 108 | fi 109 | 110 | exit "${code}" 111 | } 112 | -------------------------------------------------------------------------------- /hack/start-multicluster-controlplane.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Copyright Contributors to the Open Cluster Management project 3 | 4 | REPO_DIR="$(cd "$(dirname ${BASH_SOURCE[0]})/.." ; pwd -P)" 5 | 6 | GO_OUT=${GO_OUT:-"${REPO_DIR}/bin"} 7 | 8 | WAIT_FOR_URL_API_SERVER=${WAIT_FOR_URL_API_SERVER:-60} 9 | MAX_TIME_FOR_URL_API_SERVER=${MAX_TIME_FOR_URL_API_SERVER:-1} 10 | 11 | CONFIG_DIR=${CONFIG_DIR:-"${REPO_DIR}/_output/controlplane"} 12 | DATA_DIR=${DATA_DIR:-"${REPO_DIR}/_output/controlplane/.ocm"} 13 | 14 | CONTROLPLANE_PORT=${CONTROLPLANE_PORT:-"9443"} 15 | ETCD_MODE=${ETCD_MODE:-"embed"} 16 | BOOTSTRAP_USERS=${BOOTSTRAP_USERS:-""} 17 | FEATURE_GATES=${FEATURE_GATES:-"DefaultClusterSet=true,ManagedClusterAutoApproval=true"} 18 | 19 | # Stop right away if the build fails 20 | set -e 21 | 22 | source "${REPO_DIR}/hack/lib/init.sh" 23 | 24 | # Shut down anyway if there's an error. 25 | set +e 26 | 27 | LOG_DIR=${LOG_DIR:-"/tmp"} 28 | APISERVER_LOG=${LOG_DIR}/kube-apiserver.log 29 | 30 | mkdir -p ${DATA_DIR} 31 | chmod 700 ${DATA_DIR} 32 | 33 | function test_apiserver_off { 34 | # For the common local scenario, fail fast if server is already running. 35 | # this can happen if you run start-multicluster-controlplane.sh twice and kill etcd in between. 36 | if ! curl --silent -k -g "127.0.0.1:${CONTROLPLANE_PORT}" ; then 37 | echo "API SERVER secure port is free, proceeding..." 38 | else 39 | echo "ERROR starting API SERVER, exiting. Some process is serving already on ${CONTROLPLANE_PORT}" 40 | exit 1 41 | fi 42 | } 43 | 44 | function cleanup() { 45 | echo "Cleaning up..." 46 | # Check if the API server is still running 47 | [[ -n "${APISERVER_PID-}" ]] && kube::util::read-array APISERVER_PIDS < <(pgrep -P "${APISERVER_PID}" ; ps -o pid= -p "${APISERVER_PID}") 48 | [[ -n "${APISERVER_PIDS-}" ]] && kill "${APISERVER_PIDS[@]}" 2>/dev/null 49 | exit 0 50 | } 51 | 52 | function healthcheck { 53 | if [[ -n "${APISERVER_PID-}" ]] && ! kill -0 "${APISERVER_PID}" 2>/dev/null; then 54 | warning_log "API server terminated unexpectedly, see ${APISERVER_LOG}" 55 | APISERVER_PID= 56 | fi 57 | } 58 | 59 | function print_color { 60 | message=$1 61 | prefix=${2:+$2: } # add colon only if defined 62 | color=${3:-1} # default is red 63 | echo -n "$(tput bold)$(tput setaf "${color}")" 64 | echo "${prefix}${message}" 65 | echo -n "$(tput sgr0)" 66 | } 67 | 68 | function warning_log { 69 | print_color "$1" "W$(date "+%m%d %H:%M:%S")]" 1 70 | } 71 | 72 | function start_etcd { 73 | echo "etcd starting..." 74 | export ETCD_LOGFILE=${LOG_DIR}/etcd.log 75 | kube::etcd::start 76 | } 77 | 78 | function start_apiserver { 79 | "${GO_OUT}/multicluster-controlplane" \ 80 | "server" \ 81 | --controlplane-config-dir="${CONFIG_DIR}" \ 82 | --auto-approved-csr-users="${BOOTSTRAP_USERS}" \ 83 | --feature-gates="${FEATURE_GATES}" >"${APISERVER_LOG}" 2>&1 & 84 | 85 | APISERVER_PID=$! 86 | 87 | echo "Waiting for apiserver to come up" 88 | kube::util::wait_for_url "https://127.0.0.1:${CONTROLPLANE_PORT}/healthz" "apiserver: " 1 "${WAIT_FOR_URL_API_SERVER}" "${MAX_TIME_FOR_URL_API_SERVER}" \ 89 | || { echo "check apiserver logs: ${APISERVER_LOG}" ; exit 1 ; } 90 | 91 | echo "use 'kubectl --kubeconfig=${DATA_DIR}/cert/kube-aggregator.kubeconfig' to access the controlplane" 92 | echo "$APISERVER_PID" > "${REPO_DIR}/_output/controlplane/controlplane_pid" 93 | chmod a+r ${DATA_DIR}/cert/kube-aggregator.kubeconfig 94 | } 95 | 96 | ############################################################################### 97 | if [ ! -f "${CONFIG_DIR}/ocmconfig.yaml" ] ; then 98 | cat > "${CONFIG_DIR}/ocmconfig.yaml" < signers, where fileName is the filename of a CA bundle 19 | // where PEM certificates should be stored 20 | fileBundles map[string][][]string 21 | } 22 | 23 | func NewCertificateChains(signers ...CertificateSignerBuilder) CertificateChainsBuilder { 24 | return &certificateChains{ 25 | signers: signers, 26 | 27 | fileBundles: make(map[string][][]string), 28 | } 29 | } 30 | 31 | func (cs *certificateChains) WithSigners(signers ...CertificateSignerBuilder) CertificateChainsBuilder { 32 | cs.signers = append(cs.signers, signers...) 33 | return cs 34 | } 35 | 36 | func (cs *certificateChains) WithCABundle(bundlePath string, signerNames ...[]string) CertificateChainsBuilder { 37 | cs.fileBundles[bundlePath] = signerNames 38 | return cs 39 | } 40 | 41 | func (cs *certificateChains) Complete(cfg *SigningConfig) (*CertificateChains, error) { 42 | completeChains := &CertificateChains{ 43 | signers: make(map[string]*CertificateSigner), 44 | SigningConfig: cfg, 45 | } 46 | 47 | // Library-go crypto package warns via stderr prints about CA 48 | // and cert validity time when they exceed 5 and 2 years 49 | // respectively. This is not configurable and the introduction 50 | // of such a possibility involves changing the API in a massively 51 | // used library accross OpenShift. Temporarily disable stderr as 52 | // a shortcut to clean logs. 53 | newstderr, err := os.Open("/dev/null") 54 | if err == nil { 55 | originalStderr := os.Stderr 56 | os.Stderr = newstderr 57 | defer newstderr.Close() 58 | defer func() { 59 | os.Stderr = originalStderr 60 | }() 61 | } 62 | 63 | for _, signer := range cs.signers { 64 | signer := signer 65 | if _, ok := completeChains.signers[signer.Name()]; ok { 66 | return nil, fmt.Errorf("signer name clash: %s", signer.Name()) 67 | } 68 | 69 | completedSigner, err := signer.Complete() 70 | if err != nil { 71 | return nil, fmt.Errorf("failed to complete signer %q: %w", signer.Name(), err) 72 | } 73 | completeChains.signers[completedSigner.signerName] = completedSigner 74 | } 75 | 76 | for bundle, signers := range cs.fileBundles { 77 | for _, s := range signers { 78 | signerObj := completeChains.GetSigner(s...) 79 | if signerObj == nil { 80 | return nil, NewSignerNotFound(signerObj.signerName) 81 | } 82 | 83 | if err := signerObj.AddToBundles(bundle); err != nil { 84 | return nil, fmt.Errorf("failed adding the signer %q to CA bundle %q: %v", signerObj.signerName, bundle, err) 85 | } 86 | } 87 | } 88 | 89 | return completeChains, nil 90 | } 91 | -------------------------------------------------------------------------------- /pkg/certificate/certchains/doc.go: -------------------------------------------------------------------------------- 1 | // Copyright Contributors to the Open Cluster Management project 2 | package certchains 3 | 4 | /* 5 | the certchains package is a modified copy of "github.com/openshift/microshift/pkg/util/cryptomaterial", 6 | see original package in https://github.com/openshift/microshift/tree/main/pkg/util/cryptomaterial/certchains 7 | */ 8 | -------------------------------------------------------------------------------- /pkg/certificate/certchains/errors.go: -------------------------------------------------------------------------------- 1 | // Copyright Contributors to the Open Cluster Management project 2 | package certchains 3 | 4 | import ( 5 | "errors" 6 | "fmt" 7 | ) 8 | 9 | var _ error = &SignerNotFound{} 10 | 11 | type SignerNotFound struct { 12 | name string 13 | } 14 | 15 | func NewSignerNotFound(signerName string) *SignerNotFound { 16 | return &SignerNotFound{ 17 | name: signerName, 18 | } 19 | } 20 | 21 | func (e *SignerNotFound) Error() string { 22 | return fmt.Sprintf("signer %q was not found", e.name) 23 | } 24 | 25 | func IsSignerNotFoundError(err error) bool { 26 | var t *SignerNotFound 27 | return errors.As(err, &t) 28 | } 29 | -------------------------------------------------------------------------------- /pkg/certificate/certchains/util.go: -------------------------------------------------------------------------------- 1 | // Copyright Contributors to the Open Cluster Management project 2 | package certchains 3 | 4 | import ( 5 | "crypto/x509" 6 | "path/filepath" 7 | "time" 8 | ) 9 | 10 | const ( 11 | CACertFileName = "ca.crt" 12 | CAKeyFileName = "ca.key" 13 | CABundleFileName = "ca-bundle.crt" 14 | CASerialsFileName = "serial.txt" 15 | ServerCertFileName = "server.crt" 16 | ServerKeyFileName = "server.key" 17 | ClientCertFileName = "client.crt" 18 | ClientKeyFileName = "client.key" 19 | PeerCertFileName = "peer.crt" 20 | PeerKeyFileName = "peer.key" 21 | 22 | LongLivedCertificateValidityDays = 365 * 10 23 | ShortLivedCertificateValidityDays = 365 24 | ) 25 | 26 | func IsCertShortLived(c *x509.Certificate) bool { 27 | totalTime := c.NotAfter.Sub(c.NotBefore) 28 | 29 | // certs under 5 years are considered short-lived 30 | return totalTime < 5*365*time.Hour*24 31 | } 32 | 33 | func CACertPath(dir string) string { return filepath.Join(dir, CACertFileName) } 34 | func CAKeyPath(dir string) string { return filepath.Join(dir, CAKeyFileName) } 35 | func CASerialsPath(dir string) string { return filepath.Join(dir, CASerialsFileName) } 36 | 37 | func CABundlePath(dir string) string { return filepath.Join(dir, CABundleFileName) } 38 | 39 | func ClientCertPath(dir string) string { return filepath.Join(dir, ClientCertFileName) } 40 | func ClientKeyPath(dir string) string { return filepath.Join(dir, ClientKeyFileName) } 41 | 42 | func ServingCertPath(dir string) string { return filepath.Join(dir, ServerCertFileName) } 43 | func ServingKeyPath(dir string) string { return filepath.Join(dir, ServerKeyFileName) } 44 | 45 | func PeerCertPath(dir string) string { return filepath.Join(dir, PeerCertFileName) } 46 | func PeerKeyPath(dir string) string { return filepath.Join(dir, PeerKeyFileName) } 47 | -------------------------------------------------------------------------------- /pkg/cmd/agent/agent.go: -------------------------------------------------------------------------------- 1 | // Copyright Contributors to the Open Cluster Management project 2 | package agent 3 | 4 | import ( 5 | "context" 6 | 7 | "github.com/spf13/cobra" 8 | utilruntime "k8s.io/apimachinery/pkg/util/runtime" 9 | "k8s.io/apiserver/pkg/server" 10 | "k8s.io/klog/v2" 11 | ocmfeature "open-cluster-management.io/api/feature" 12 | "open-cluster-management.io/ocm/pkg/features" 13 | 14 | "open-cluster-management.io/multicluster-controlplane/pkg/agent" 15 | mcfeature "open-cluster-management.io/multicluster-controlplane/pkg/feature" 16 | ) 17 | 18 | func init() { 19 | utilruntime.Must(features.SpokeMutableFeatureGate.Add(ocmfeature.DefaultSpokeRegistrationFeatureGates)) 20 | utilruntime.Must(features.SpokeMutableFeatureGate.Add(ocmfeature.DefaultSpokeWorkFeatureGates)) 21 | utilruntime.Must(features.SpokeMutableFeatureGate.Add(mcfeature.DefaultControlPlaneAgentFeatureGates)) 22 | } 23 | 24 | func NewAgent() *cobra.Command { 25 | agentOptions := agent.NewAgentOptions(). 26 | WithWorkloadSourceDriverConfig("/spoke/hub-kubeconfig/kubeconfig") 27 | 28 | cmd := &cobra.Command{ 29 | Use: "agent", 30 | Short: "Start a Multicluster Controlplane Agent", 31 | RunE: func(cmd *cobra.Command, args []string) error { 32 | shutdownCtx, cancel := context.WithCancel(context.TODO()) 33 | 34 | shutdownHandler := server.SetupSignalHandler() 35 | go func() { 36 | defer cancel() 37 | <-shutdownHandler 38 | klog.Infof("Received SIGTERM or SIGINT signal, shutting down agent.") 39 | }() 40 | 41 | ctx, terminate := context.WithCancel(shutdownCtx) 42 | defer terminate() 43 | 44 | go func() { 45 | klog.Info("starting the controlplane agent") 46 | if err := agentOptions.RunAgent(ctx); err != nil { 47 | klog.Fatalf("failed to run agent, %v", err) 48 | } 49 | }() 50 | 51 | if err := agentOptions.RunAddOns(ctx); err != nil { 52 | return err 53 | } 54 | 55 | <-ctx.Done() 56 | return nil 57 | }, 58 | } 59 | 60 | flags := cmd.Flags() 61 | features.SpokeMutableFeatureGate.AddFlag(flags) 62 | agentOptions.AddFlags(flags) 63 | return cmd 64 | } 65 | -------------------------------------------------------------------------------- /pkg/cmd/controller/controller.go: -------------------------------------------------------------------------------- 1 | // Copyright Contributors to the Open Cluster Management project 2 | package controller 3 | 4 | import ( 5 | "fmt" 6 | 7 | "github.com/spf13/cobra" 8 | utilruntime "k8s.io/apimachinery/pkg/util/runtime" 9 | genericapiserver "k8s.io/apiserver/pkg/server" 10 | utilfeature "k8s.io/apiserver/pkg/util/feature" 11 | cliflag "k8s.io/component-base/cli/flag" 12 | logsapi "k8s.io/component-base/logs/api/v1" 13 | "k8s.io/component-base/version/verflag" 14 | ocmfeature "open-cluster-management.io/api/feature" 15 | "open-cluster-management.io/ocm/pkg/features" 16 | 17 | mcfeature "open-cluster-management.io/multicluster-controlplane/pkg/feature" 18 | "open-cluster-management.io/multicluster-controlplane/pkg/servers" 19 | "open-cluster-management.io/multicluster-controlplane/pkg/servers/options" 20 | "open-cluster-management.io/multicluster-controlplane/pkg/util" 21 | ) 22 | 23 | func init() { 24 | utilruntime.Must(features.HubMutableFeatureGate.Add(ocmfeature.DefaultHubWorkFeatureGates)) 25 | utilruntime.Must(features.HubMutableFeatureGate.Add(ocmfeature.DefaultHubRegistrationFeatureGates)) 26 | utilruntime.Must(features.HubMutableFeatureGate.Add(ocmfeature.DefaultHubAddonManagerFeatureGates)) 27 | utilruntime.Must(features.HubMutableFeatureGate.Add(mcfeature.DefaultControlPlaneFeatureGates)) 28 | } 29 | 30 | func NewController() *cobra.Command { 31 | options := options.NewServerRunOptions() 32 | cmd := &cobra.Command{ 33 | Use: "server", 34 | Short: "Start a Multicluster Controlplane Server", 35 | RunE: func(cmd *cobra.Command, args []string) error { 36 | verflag.PrintAndExitIfRequested() 37 | cliflag.PrintFlags(cmd.Flags()) 38 | 39 | if err := logsapi.ValidateAndApply(options.Logs, utilfeature.DefaultFeatureGate); err != nil { 40 | return err 41 | } 42 | 43 | stopChan := genericapiserver.SetupSignalHandler() 44 | if err := options.Complete(stopChan); err != nil { 45 | return err 46 | } 47 | 48 | if err := options.Validate(); err != nil { 49 | return err 50 | } 51 | 52 | return servers.NewServer(*options).Start(util.GoContext(stopChan)) 53 | }, 54 | Args: func(cmd *cobra.Command, args []string) error { 55 | for _, arg := range args { 56 | if len(arg) > 0 { 57 | return fmt.Errorf("%q does not take any arguments, got %q", cmd.CommandPath(), args) 58 | } 59 | } 60 | return nil 61 | }, 62 | } 63 | 64 | features.HubMutableFeatureGate.AddFlag(cmd.Flags()) 65 | options.AddFlags(cmd.Flags()) 66 | return cmd 67 | } 68 | -------------------------------------------------------------------------------- /pkg/controllers/addons/managedserviceaccount.go: -------------------------------------------------------------------------------- 1 | // Copyright Contributors to the Open Cluster Management project 2 | 3 | package addons 4 | 5 | import ( 6 | "context" 7 | 8 | "open-cluster-management.io/managed-serviceaccount/pkg/addon/commoncontroller" 9 | 10 | ctrl "sigs.k8s.io/controller-runtime" 11 | ) 12 | 13 | func SetupManagedServiceAccountWithManager(ctx context.Context, mgr ctrl.Manager) error { 14 | ctrl := commoncontroller.NewEphemeralIdentityReconciler(mgr.GetCache(), mgr.GetClient()) 15 | if err := ctrl.SetupWithManager(mgr); err != nil { 16 | return err 17 | } 18 | return nil 19 | } 20 | -------------------------------------------------------------------------------- /pkg/controllers/bootstrap/crds/0000_03_clusters.open-cluster-management.io_placementdecisions.crd.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | name: placementdecisions.cluster.open-cluster-management.io 5 | spec: 6 | group: cluster.open-cluster-management.io 7 | names: 8 | kind: PlacementDecision 9 | listKind: PlacementDecisionList 10 | plural: placementdecisions 11 | singular: placementdecision 12 | preserveUnknownFields: false 13 | scope: Namespaced 14 | versions: 15 | - name: v1beta1 16 | schema: 17 | openAPIV3Schema: 18 | description: |- 19 | PlacementDecision indicates a decision from a placement. 20 | PlacementDecision must have a cluster.open-cluster-management.io/placement={placement name} label to reference a certain placement. 21 | 22 | 23 | If a placement has spec.numberOfClusters specified, the total number of decisions contained in 24 | the status.decisions of PlacementDecisions must be the same as NumberOfClusters. Otherwise, the 25 | total number of decisions must equal the number of ManagedClusters that 26 | match the placement requirements. 27 | 28 | 29 | Some of the decisions might be empty when there are not enough ManagedClusters to meet the placement requirements. 30 | properties: 31 | apiVersion: 32 | description: |- 33 | APIVersion defines the versioned schema of this representation of an object. 34 | Servers should convert recognized schemas to the latest internal value, and 35 | may reject unrecognized values. 36 | More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources 37 | type: string 38 | kind: 39 | description: |- 40 | Kind is a string value representing the REST resource this object represents. 41 | Servers may infer this from the endpoint the client submits requests to. 42 | Cannot be updated. 43 | In CamelCase. 44 | More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds 45 | type: string 46 | metadata: 47 | type: object 48 | status: 49 | description: Status represents the current status of the PlacementDecision 50 | properties: 51 | decisions: 52 | description: |- 53 | Decisions is a slice of decisions according to a placement 54 | The number of decisions should not be larger than 100 55 | items: 56 | description: |- 57 | ClusterDecision represents a decision from a placement 58 | An empty ClusterDecision indicates it is not scheduled yet. 59 | properties: 60 | clusterName: 61 | description: |- 62 | ClusterName is the name of the ManagedCluster. If it is not empty, its value should be unique cross all 63 | placement decisions for the Placement. 64 | type: string 65 | reason: 66 | description: Reason represents the reason why the ManagedCluster 67 | is selected. 68 | type: string 69 | required: 70 | - clusterName 71 | - reason 72 | type: object 73 | type: array 74 | required: 75 | - decisions 76 | type: object 77 | type: object 78 | served: true 79 | storage: true 80 | subresources: 81 | status: {} 82 | status: 83 | acceptedNames: 84 | kind: "" 85 | plural: "" 86 | conditions: [] 87 | storedVersions: [] 88 | -------------------------------------------------------------------------------- /pkg/controllers/controller.go: -------------------------------------------------------------------------------- 1 | // Copyright Contributors to the Open Cluster Management project 2 | 3 | package controllers 4 | 5 | import aggregatorapiserver "k8s.io/kube-aggregator/pkg/apiserver" 6 | 7 | type Controller func(stopCh <-chan struct{}, aggregatorConfig *aggregatorapiserver.Config) error 8 | -------------------------------------------------------------------------------- /pkg/controllers/kubecontroller/bootstrap.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2016 The Kubernetes Authors. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package kubecontroller 18 | 19 | import ( 20 | "context" 21 | "fmt" 22 | 23 | "k8s.io/controller-manager/controller" 24 | "k8s.io/kubernetes/pkg/controller/bootstrap" 25 | ) 26 | 27 | func startBootstrapSignerController(ctx context.Context, controllerContext ControllerContext) (controller.Interface, bool, error) { 28 | bsc, err := bootstrap.NewSigner( 29 | controllerContext.ClientBuilder.ClientOrDie("bootstrap-signer"), 30 | controllerContext.InformerFactory.Core().V1().Secrets(), 31 | controllerContext.InformerFactory.Core().V1().ConfigMaps(), 32 | bootstrap.DefaultSignerOptions(), 33 | ) 34 | if err != nil { 35 | return nil, true, fmt.Errorf("error creating BootstrapSigner controller: %v", err) 36 | } 37 | go bsc.Run(ctx) 38 | return nil, true, nil 39 | } 40 | 41 | func startTokenCleanerController(ctx context.Context, controllerContext ControllerContext) (controller.Interface, bool, error) { 42 | tcc, err := bootstrap.NewTokenCleaner( 43 | controllerContext.ClientBuilder.ClientOrDie("token-cleaner"), 44 | controllerContext.InformerFactory.Core().V1().Secrets(), 45 | bootstrap.DefaultTokenCleanerOptions(), 46 | ) 47 | if err != nil { 48 | return nil, true, fmt.Errorf("error creating TokenCleaner controller: %v", err) 49 | } 50 | go tcc.Run(ctx) 51 | return nil, true, nil 52 | } 53 | -------------------------------------------------------------------------------- /pkg/controllers/kubecontroller/config/config.go: -------------------------------------------------------------------------------- 1 | // Copyright Contributors to the Open Cluster Management project 2 | package config 3 | 4 | import ( 5 | clientset "k8s.io/client-go/kubernetes" 6 | restclient "k8s.io/client-go/rest" 7 | "k8s.io/client-go/tools/record" 8 | kubectrlmgrconfig "k8s.io/kubernetes/pkg/controller/apis/config" 9 | ) 10 | 11 | // Config is the main context object for the controller manager. 12 | type Config struct { 13 | ComponentConfig kubectrlmgrconfig.KubeControllerManagerConfiguration 14 | 15 | // the general kube client 16 | Client *clientset.Clientset 17 | 18 | // the rest config for the master 19 | Kubeconfig *restclient.Config 20 | 21 | EventBroadcaster record.EventBroadcaster 22 | EventRecorder record.EventRecorder 23 | } 24 | 25 | type completedConfig struct { 26 | *Config 27 | } 28 | 29 | // CompletedConfig same as Config, just to swap private object. 30 | type CompletedConfig struct { 31 | // Embed a private pointer that cannot be instantiated outside of this package. 32 | *completedConfig 33 | } 34 | 35 | // Complete fills in any fields not set that are required to have valid data. It's mutating the receiver. 36 | func (c *Config) Complete() *CompletedConfig { 37 | cc := completedConfig{c} 38 | return &CompletedConfig{&cc} 39 | } 40 | -------------------------------------------------------------------------------- /pkg/controllers/kubecontroller/import_known_versions.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2016 The Kubernetes Authors. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Package app imports the API groups that the client will support 18 | // TODO: Remove this file when namespace controller and garbage collector 19 | // stops using legacyscheme.Registry.RESTMapper() 20 | package kubecontroller 21 | 22 | import ( 23 | // These imports are the API groups the client will support. 24 | _ "k8s.io/kubernetes/pkg/apis/authentication/install" 25 | _ "k8s.io/kubernetes/pkg/apis/authorization/install" 26 | _ "k8s.io/kubernetes/pkg/apis/certificates/install" 27 | _ "k8s.io/kubernetes/pkg/apis/core/install" 28 | _ "k8s.io/kubernetes/pkg/apis/events/install" 29 | _ "k8s.io/kubernetes/pkg/apis/rbac/install" 30 | ) 31 | -------------------------------------------------------------------------------- /pkg/controllers/kubecontroller/options/garbagecollectorcontroller.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2018 The Kubernetes Authors. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package options 18 | 19 | import ( 20 | "github.com/spf13/pflag" 21 | 22 | garbagecollectorconfig "k8s.io/kubernetes/pkg/controller/garbagecollector/config" 23 | ) 24 | 25 | // GarbageCollectorControllerOptions holds the GarbageCollectorController options. 26 | type GarbageCollectorControllerOptions struct { 27 | *garbagecollectorconfig.GarbageCollectorControllerConfiguration 28 | } 29 | 30 | // AddFlags adds flags related to GarbageCollectorController for controller manager to the specified FlagSet. 31 | func (o *GarbageCollectorControllerOptions) AddFlags(fs *pflag.FlagSet) { 32 | if o == nil { 33 | return 34 | } 35 | 36 | fs.Int32Var(&o.ConcurrentGCSyncs, "concurrent-gc-syncs", o.ConcurrentGCSyncs, "The number of garbage collector workers that are allowed to sync concurrently.") 37 | fs.BoolVar(&o.EnableGarbageCollector, "enable-garbage-collector", o.EnableGarbageCollector, "Enables the generic garbage collector. MUST be synced with the corresponding flag of the kube-apiserver.") 38 | } 39 | 40 | // ApplyTo fills up GarbageCollectorController config with options. 41 | func (o *GarbageCollectorControllerOptions) ApplyTo(cfg *garbagecollectorconfig.GarbageCollectorControllerConfiguration) error { 42 | if o == nil { 43 | return nil 44 | } 45 | 46 | cfg.ConcurrentGCSyncs = o.ConcurrentGCSyncs 47 | cfg.GCIgnoredResources = o.GCIgnoredResources 48 | cfg.EnableGarbageCollector = o.EnableGarbageCollector 49 | 50 | return nil 51 | } 52 | 53 | // Validate checks validation of GarbageCollectorController. 54 | func (o *GarbageCollectorControllerOptions) Validate() []error { 55 | if o == nil { 56 | return nil 57 | } 58 | 59 | errs := []error{} 60 | return errs 61 | } 62 | -------------------------------------------------------------------------------- /pkg/controllers/kubecontroller/options/namespacecontroller.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2018 The Kubernetes Authors. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package options 18 | 19 | import ( 20 | "github.com/spf13/pflag" 21 | 22 | namespaceconfig "k8s.io/kubernetes/pkg/controller/namespace/config" 23 | ) 24 | 25 | // NamespaceControllerOptions holds the NamespaceController options. 26 | type NamespaceControllerOptions struct { 27 | *namespaceconfig.NamespaceControllerConfiguration 28 | } 29 | 30 | // AddFlags adds flags related to NamespaceController for controller manager to the specified FlagSet. 31 | func (o *NamespaceControllerOptions) AddFlags(fs *pflag.FlagSet) { 32 | if o == nil { 33 | return 34 | } 35 | 36 | fs.DurationVar(&o.NamespaceSyncPeriod.Duration, "namespace-sync-period", o.NamespaceSyncPeriod.Duration, "The period for syncing namespace life-cycle updates") 37 | fs.Int32Var(&o.ConcurrentNamespaceSyncs, "concurrent-namespace-syncs", o.ConcurrentNamespaceSyncs, "The number of namespace objects that are allowed to sync concurrently. Larger number = more responsive namespace termination, but more CPU (and network) load") 38 | } 39 | 40 | // ApplyTo fills up NamespaceController config with options. 41 | func (o *NamespaceControllerOptions) ApplyTo(cfg *namespaceconfig.NamespaceControllerConfiguration) error { 42 | if o == nil { 43 | return nil 44 | } 45 | 46 | cfg.NamespaceSyncPeriod = o.NamespaceSyncPeriod 47 | cfg.ConcurrentNamespaceSyncs = o.ConcurrentNamespaceSyncs 48 | 49 | return nil 50 | } 51 | 52 | // Validate checks validation of NamespaceControllerOptions. 53 | func (o *NamespaceControllerOptions) Validate() []error { 54 | if o == nil { 55 | return nil 56 | } 57 | 58 | errs := []error{} 59 | return errs 60 | } 61 | -------------------------------------------------------------------------------- /pkg/controllers/kubecontroller/options/serviceaccountcontroller.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2018 The Kubernetes Authors. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Copyright Contributors to the Open Cluster Management project 18 | package options 19 | 20 | import ( 21 | "github.com/spf13/pflag" 22 | 23 | serviceaccountconfig "k8s.io/kubernetes/pkg/controller/serviceaccount/config" 24 | ) 25 | 26 | // SAControllerOptions holds the ServiceAccountController options. 27 | type SAControllerOptions struct { 28 | *serviceaccountconfig.SAControllerConfiguration 29 | } 30 | 31 | // AddFlags adds flags related to ServiceAccountController for controller manager to the specified FlagSet 32 | func (o *SAControllerOptions) AddFlags(fs *pflag.FlagSet) { 33 | if o == nil { 34 | return 35 | } 36 | 37 | fs.StringVar(&o.ServiceAccountKeyFile, "service-account-private-key-file", o.ServiceAccountKeyFile, "Filename containing a PEM-encoded private RSA or ECDSA key used to sign service account tokens.") 38 | fs.Int32Var(&o.ConcurrentSATokenSyncs, "concurrent-serviceaccount-token-syncs", o.ConcurrentSATokenSyncs, "The number of service account token objects that are allowed to sync concurrently. Larger number = more responsive token generation, but more CPU (and network) load") 39 | fs.StringVar(&o.RootCAFile, "root-ca-file", o.RootCAFile, "If set, this root certificate authority will be included in service account's token secret. This must be a valid PEM-encoded CA bundle.") 40 | } 41 | 42 | // ApplyTo fills up ServiceAccountController config with options. 43 | func (o *SAControllerOptions) ApplyTo(cfg *serviceaccountconfig.SAControllerConfiguration) error { 44 | if o == nil { 45 | return nil 46 | } 47 | 48 | cfg.ServiceAccountKeyFile = o.ServiceAccountKeyFile 49 | cfg.ConcurrentSATokenSyncs = o.ConcurrentSATokenSyncs 50 | cfg.RootCAFile = o.RootCAFile 51 | 52 | return nil 53 | } 54 | 55 | // Validate checks validation of ServiceAccountControllerOptions. 56 | func (o *SAControllerOptions) Validate() []error { 57 | if o == nil { 58 | return nil 59 | } 60 | 61 | errs := []error{} 62 | return errs 63 | } 64 | -------------------------------------------------------------------------------- /pkg/controllers/ocmcontroller/ocmcrd.go: -------------------------------------------------------------------------------- 1 | // Copyright Contributors to the Open Cluster Management project 2 | package ocmcontroller 3 | 4 | import ( 5 | apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" 6 | "k8s.io/klog/v2" 7 | aggregatorapiserver "k8s.io/kube-aggregator/pkg/apiserver" 8 | 9 | "open-cluster-management.io/multicluster-controlplane/pkg/controllers/bootstrap" 10 | "open-cluster-management.io/multicluster-controlplane/pkg/util" 11 | ) 12 | 13 | func InstallCRD(stopCh <-chan struct{}, aggregatorConfig *aggregatorapiserver.Config) error { 14 | klog.Info("installing ocm crds") 15 | apiextensionsClient, err := apiextensionsclient.NewForConfig(aggregatorConfig.GenericConfig.LoopbackClientConfig) 16 | if err != nil { 17 | return err 18 | } 19 | if err := bootstrap.InstallBaseCRDs(util.GoContext(stopCh), apiextensionsClient); err != nil { 20 | klog.Errorf("failed to bootstrap OCM CRDs: %v", err) 21 | // nolint:nilerr 22 | return nil // don't klog.Fatal. This only happens when context is cancelled. 23 | } 24 | klog.Info("installed ocm crds") 25 | return nil 26 | } 27 | -------------------------------------------------------------------------------- /pkg/controllers/ocmcontroller/ocmhubresource.go: -------------------------------------------------------------------------------- 1 | // Copyright Contributors to the Open Cluster Management project 2 | package ocmcontroller 3 | 4 | import ( 5 | "k8s.io/client-go/kubernetes" 6 | "k8s.io/klog/v2" 7 | aggregatorapiserver "k8s.io/kube-aggregator/pkg/apiserver" 8 | 9 | "open-cluster-management.io/multicluster-controlplane/pkg/controllers/bootstrap" 10 | "open-cluster-management.io/multicluster-controlplane/pkg/util" 11 | ) 12 | 13 | func InstallHubResource(stopCh <-chan struct{}, aggregatorConfig *aggregatorapiserver.Config) error { 14 | klog.Info("installing ocm hub resources") 15 | kubeClient, err := kubernetes.NewForConfig(aggregatorConfig.GenericConfig.LoopbackClientConfig) 16 | if err != nil { 17 | return err 18 | } 19 | 20 | // bootstrap ocm hub resources 21 | if err := bootstrap.BuildKubeSystemResources( 22 | util.GoContext(stopCh), 23 | aggregatorConfig.GenericConfig.Config, 24 | kubeClient, 25 | ); err != nil { 26 | klog.Errorf("failed to bootstrap ocm hub controller resources: %v", err) 27 | // nolint:nilerr 28 | return nil // don't klog.Fatal. This only happens when context is cancelled. 29 | } 30 | klog.Infof("installed ocm hub resources") 31 | return nil 32 | } 33 | -------------------------------------------------------------------------------- /pkg/etcd/etcd.go: -------------------------------------------------------------------------------- 1 | // Copyright Contributors to the Open Cluster Management project 2 | package etcd 3 | 4 | import ( 5 | "context" 6 | "crypto/tls" 7 | "fmt" 8 | "net/url" 9 | "os" 10 | "path/filepath" 11 | "strconv" 12 | "time" 13 | 14 | "go.etcd.io/etcd/server/v3/embed" 15 | "go.etcd.io/etcd/server/v3/wal" 16 | 17 | "k8s.io/klog/v2" 18 | ) 19 | 20 | type Server struct { 21 | Dir string 22 | } 23 | 24 | type ClientInfo struct { 25 | Endpoints []string 26 | TLS *tls.Config 27 | 28 | CertFile string 29 | KeyFile string 30 | TrustedCAFile string 31 | } 32 | 33 | func (s *Server) Run(ctx context.Context, peerPort, clientPort string, walSizeBytes int64) (ClientInfo, error) { 34 | klog.Info("Creating embedded etcd server") 35 | if walSizeBytes != 0 { 36 | wal.SegmentSizeBytes = walSizeBytes 37 | } 38 | cfg := embed.NewConfig() 39 | 40 | cfg.Logger = "zap" 41 | cfg.LogLevel = "warn" 42 | 43 | cfg.Dir = s.Dir 44 | cfg.AuthToken = "" 45 | 46 | cfg.ListenPeerUrls = []url.URL{{Scheme: "https", Host: "localhost:" + peerPort}} 47 | cfg.AdvertisePeerUrls = []url.URL{{Scheme: "https", Host: "localhost:" + peerPort}} 48 | cfg.ListenClientUrls = []url.URL{{Scheme: "https", Host: "localhost:" + clientPort}} 49 | cfg.AdvertiseClientUrls = []url.URL{{Scheme: "https", Host: "localhost:" + clientPort}} 50 | cfg.InitialCluster = cfg.InitialClusterFromName(cfg.Name) 51 | 52 | cfg.PeerTLSInfo.ServerName = "localhost" 53 | cfg.PeerTLSInfo.CertFile = filepath.Join(cfg.Dir, "cert", "etcd-ca", "peer", "peer.crt") 54 | cfg.PeerTLSInfo.KeyFile = filepath.Join(cfg.Dir, "cert", "etcd-ca", "peer", "peer.key") 55 | cfg.PeerTLSInfo.TrustedCAFile = filepath.Join(cfg.Dir, "cert", "etcd-ca", "ca.crt") 56 | cfg.PeerTLSInfo.ClientCertAuth = true 57 | 58 | cfg.ClientTLSInfo.ServerName = "localhost" 59 | cfg.ClientTLSInfo.CertFile = filepath.Join(cfg.Dir, "cert", "etcd-ca", "peer", "peer.crt") 60 | cfg.ClientTLSInfo.KeyFile = filepath.Join(cfg.Dir, "cert", "etcd-ca", "peer", "peer.key") 61 | cfg.ClientTLSInfo.TrustedCAFile = filepath.Join(cfg.Dir, "cert", "etcd-ca", "ca.crt") 62 | cfg.ClientTLSInfo.ClientCertAuth = true 63 | 64 | if enableUnsafeEtcdDisableFsyncHack, _ := strconv.ParseBool(os.Getenv("UNSAFE_E2E_HACK_DISABLE_ETCD_FSYNC")); enableUnsafeEtcdDisableFsyncHack { 65 | cfg.UnsafeNoFsync = true 66 | } 67 | 68 | e, err := embed.StartEtcd(cfg) 69 | if err != nil { 70 | return ClientInfo{}, err 71 | } 72 | // Shutdown when context is closed 73 | go func() { 74 | <-ctx.Done() 75 | e.Close() 76 | }() 77 | 78 | clientConfig, err := cfg.ClientTLSInfo.ClientConfig() 79 | if err != nil { 80 | return ClientInfo{}, err 81 | } 82 | 83 | select { 84 | case <-e.Server.ReadyNotify(): 85 | return ClientInfo{ 86 | Endpoints: []string{cfg.AdvertiseClientUrls[0].String()}, 87 | TLS: clientConfig, 88 | CertFile: cfg.ClientTLSInfo.CertFile, 89 | KeyFile: cfg.ClientTLSInfo.KeyFile, 90 | TrustedCAFile: cfg.ClientTLSInfo.TrustedCAFile, 91 | }, nil 92 | case <-time.After(60 * time.Second): 93 | e.Server.Stop() // trigger a shutdown 94 | return ClientInfo{}, fmt.Errorf("server took too long to start") 95 | case e := <-e.Err(): 96 | return ClientInfo{}, e 97 | } 98 | } 99 | -------------------------------------------------------------------------------- /pkg/feature/feature.go: -------------------------------------------------------------------------------- 1 | // Copyright Contributors to the Open Cluster Management project 2 | 3 | package features 4 | 5 | import ( 6 | "k8s.io/component-base/featuregate" 7 | ) 8 | 9 | const ( 10 | // ManagedServiceAccount will start new controllers in the controlplane agent process to synchronize ServiceAccount to the managed clusters 11 | // and collecting the tokens from these local service accounts as secret resources back to the hub cluster. 12 | ManagedServiceAccount featuregate.Feature = "ManagedServiceAccount" 13 | 14 | // ManagedServiceAccountEphemeralIdentity allow user to set TTL on the ManagedServiceAccount resource via spec.ttlSecondsAfterCreation 15 | ManagedServiceAccountEphemeralIdentity featuregate.Feature = "ManagedServiceAccountEphemeralIdentity" 16 | ) 17 | 18 | var DefaultControlPlaneFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{ 19 | ManagedServiceAccountEphemeralIdentity: {Default: false, PreRelease: featuregate.Alpha}, 20 | } 21 | 22 | var DefaultControlPlaneAgentFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{ 23 | ManagedServiceAccount: {Default: false, PreRelease: featuregate.Alpha}, 24 | } 25 | -------------------------------------------------------------------------------- /pkg/servers/configs/configs.go: -------------------------------------------------------------------------------- 1 | // Copyright Contributors to the Open Cluster Management project 2 | package configs 3 | 4 | import ( 5 | "fmt" 6 | "os" 7 | "path" 8 | "path/filepath" 9 | 10 | "gopkg.in/yaml.v2" 11 | "k8s.io/klog/v2" 12 | "open-cluster-management.io/multicluster-controlplane/pkg/util" 13 | ) 14 | 15 | const DefaultAPIServerPort = 9443 16 | 17 | const ( 18 | defaultControlPlaneDataDir = "/.ocm" 19 | defaultControlPlaneCADir = "/.ocm/cert/controlplane-ca" 20 | defaultETCDMode = "embed" 21 | defaultETCDPrefix = "/registry" 22 | ) 23 | 24 | type ControlplaneRunConfig struct { 25 | DataDirectory string `yaml:"dataDirectory"` 26 | Apiserver ApiserverConfig `yaml:"apiserver"` 27 | Etcd EtcdConfig `yaml:"etcd"` 28 | Aggregator AggregatorConfig `yaml:"aggregator"` 29 | } 30 | 31 | type ApiserverConfig struct { 32 | ExternalHostname string `yaml:"externalHostname"` 33 | Port int `yaml:"port"` 34 | CAFile string `yaml:"caFile"` 35 | CAKeyFile string `yaml:"caKeyFile"` 36 | } 37 | 38 | type EtcdConfig struct { 39 | Mode string `yaml:"mode"` 40 | Servers []string `yaml:"servers"` 41 | CAFile string `yaml:"caFile"` 42 | CertFile string `yaml:"certFile"` 43 | KeyFile string `yaml:"keyFile"` 44 | Prefix string `yaml:"prefix"` 45 | } 46 | 47 | type AggregatorConfig struct { 48 | ProxyClientCertFile string `yaml:"proxyClientCertFile"` 49 | ProxyClientKeyFile string `yaml:"proxyClientKeyFile"` 50 | RequestHeaderClientCAFile string `yaml:"requestheaderClientCAFile"` 51 | RequestHeaderUsernameHeaders []string `yaml:"requestheaderUsernameHeaders"` 52 | RequestHeaderGroupHeaders []string `yaml:"requestheaderGroupHeaders"` 53 | RequestHeaderExtraHeaderPrefixes []string `yaml:"requestheaderExtraHeadersPrefix"` 54 | RequestHeaderAllowedNames []string `yaml:"requestheaderAllowedNames"` 55 | } 56 | 57 | func LoadConfig(configDir string) (*ControlplaneRunConfig, error) { 58 | configFile := path.Join(configDir, "ocmconfig.yaml") 59 | configFileData, err := os.ReadFile(configFile) 60 | if err != nil { 61 | return nil, err 62 | } 63 | 64 | c := &ControlplaneRunConfig{} 65 | if err := yaml.Unmarshal(configFileData, c); err != nil { 66 | return nil, err 67 | } 68 | 69 | if c.DataDirectory == "" { 70 | c.DataDirectory = defaultControlPlaneDataDir 71 | } 72 | 73 | if c.Etcd.Mode == "" { 74 | c.Etcd.Mode = defaultETCDMode 75 | } 76 | 77 | if c.Etcd.Prefix == "" { 78 | c.Etcd.Prefix = defaultETCDPrefix 79 | } 80 | 81 | if len(c.Etcd.Servers) == 0 { 82 | c.Etcd.Servers = []string{"http://127.0.0.1:2379"} 83 | } 84 | 85 | if c.Apiserver.ExternalHostname == "" { 86 | klog.Infof("The external host name unspecified, trying to find it from runtime environment ...") 87 | hostname, err := util.GetExternalHost() 88 | if err != nil { 89 | return nil, fmt.Errorf("failed to find external host name from runtime environment, %v", err) 90 | } 91 | c.Apiserver.ExternalHostname = hostname 92 | } 93 | 94 | if !c.IsCAProvided() { 95 | klog.Infof("The server ca unspecified, trying to find it from runtime environment ...") 96 | loaded, err := util.LoadServingSigner(defaultControlPlaneCADir) 97 | if err != nil { 98 | return nil, fmt.Errorf("failed to load server ca from runtime enviroment, %v", err) 99 | } 100 | 101 | if loaded { 102 | c.Apiserver.CAFile = filepath.Join(defaultControlPlaneCADir, "ca.crt") 103 | c.Apiserver.CAKeyFile = filepath.Join(defaultControlPlaneCADir, "ca.key") 104 | } 105 | } 106 | 107 | return c, nil 108 | } 109 | 110 | func (c *ControlplaneRunConfig) IsCAProvided() bool { 111 | return c.Apiserver.CAFile != "" && c.Apiserver.CAKeyFile != "" 112 | } 113 | 114 | func (c *ControlplaneRunConfig) IsEmbedEtcd() bool { 115 | return c.Etcd.Mode == "embed" 116 | } 117 | -------------------------------------------------------------------------------- /pkg/servers/options/admission.go: -------------------------------------------------------------------------------- 1 | // Copyright Contributors to the Open Cluster Management project 2 | 3 | /* 4 | Copyright 2018 The Kubernetes Authors. 5 | 6 | Licensed under the Apache License, Version 2.0 (the "License"); 7 | you may not use this file except in compliance with the License. 8 | You may obtain a copy of the License at 9 | 10 | http://www.apache.org/licenses/LICENSE-2.0 11 | 12 | Unless required by applicable law or agreed to in writing, software 13 | distributed under the License is distributed on an "AS IS" BASIS, 14 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | See the License for the specific language governing permissions and 16 | limitations under the License. 17 | */ 18 | 19 | package options 20 | 21 | import ( 22 | "github.com/spf13/pflag" 23 | 24 | "k8s.io/apiserver/pkg/admission" 25 | "k8s.io/apiserver/pkg/server" 26 | genericoptions "k8s.io/apiserver/pkg/server/options" 27 | "k8s.io/client-go/dynamic" 28 | "k8s.io/client-go/informers" 29 | "k8s.io/client-go/kubernetes" 30 | "k8s.io/component-base/featuregate" 31 | ) 32 | 33 | // AdmissionOptions holds the admission options. 34 | // It is a wrap of generic AdmissionOptions. 35 | type AdmissionOptions struct { 36 | // GenericAdmission holds the generic admission options. 37 | GenericAdmission *genericoptions.AdmissionOptions 38 | } 39 | 40 | // NewAdmissionOptions creates a new instance of AdmissionOptions 41 | // Note: 42 | // 43 | // In addition it calls RegisterAllAdmissionPlugins to register 44 | // all kube-apiserver admission plugins. 45 | // 46 | // Provides the list of RecommendedPluginOrder that holds sane values 47 | // that can be used by servers that don't care about admission chain. 48 | // Servers that do care can overwrite/append that field after creation. 49 | func NewAdmissionOptions() *AdmissionOptions { 50 | options := genericoptions.NewAdmissionOptions() 51 | // register all admission plugins 52 | RegisterAllAdmissionPlugins(options.Plugins) 53 | // set RecommendedPluginOrder 54 | options.RecommendedPluginOrder = AllOrderedPlugins 55 | // set DefaultOffPlugins 56 | options.DefaultOffPlugins = DefaultOffAdmissionPlugins() 57 | 58 | return &AdmissionOptions{ 59 | GenericAdmission: options, 60 | } 61 | } 62 | 63 | // AddFlags adds flags related to admission for kube-apiserver to the specified FlagSet 64 | func (a *AdmissionOptions) AddFlags(fs *pflag.FlagSet) { 65 | a.GenericAdmission.AddFlags(fs) 66 | } 67 | 68 | // Validate verifies flags passed to kube-apiserver AdmissionOptions. 69 | // Kube-apiserver verifies PluginNames and then call generic AdmissionOptions.Validate. 70 | func (a *AdmissionOptions) Validate() []error { 71 | return a.GenericAdmission.Validate() 72 | } 73 | 74 | // ApplyTo adds the admission chain to the server configuration. 75 | // Kube-apiserver just call generic AdmissionOptions.ApplyTo. 76 | func (a *AdmissionOptions) ApplyTo( 77 | c *server.Config, 78 | informers informers.SharedInformerFactory, 79 | kubeClient kubernetes.Interface, 80 | dynamicClient dynamic.Interface, 81 | features featuregate.FeatureGate, 82 | pluginInitializers ...admission.PluginInitializer, 83 | ) error { 84 | if a == nil { 85 | return nil 86 | } 87 | 88 | return a.GenericAdmission.ApplyTo(c, informers, kubeClient, dynamicClient, features, pluginInitializers...) 89 | } 90 | -------------------------------------------------------------------------------- /pkg/servers/options/embeddedetcd.go: -------------------------------------------------------------------------------- 1 | // Copyright Contributors to the Open Cluster Management project 2 | /* 3 | Copyright 2022 The KCP Authors. 4 | 5 | Licensed under the Apache License, Version 2.0 (the "License"); 6 | you may not use this file except in compliance with the License. 7 | You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | */ 17 | 18 | package options 19 | 20 | import ( 21 | "fmt" 22 | 23 | "github.com/spf13/pflag" 24 | ) 25 | 26 | type EmbeddedEtcd struct { 27 | Enabled bool 28 | 29 | Directory string 30 | PeerPort string 31 | ClientPort string 32 | WalSizeBytes int64 33 | } 34 | 35 | func NewEmbeddedEtcd() *EmbeddedEtcd { 36 | return &EmbeddedEtcd{ 37 | PeerPort: "2380", 38 | ClientPort: "2379", 39 | } 40 | } 41 | 42 | func (e *EmbeddedEtcd) AddFlags(fs *pflag.FlagSet) { 43 | fs.BoolVar(&e.Enabled, "enable-embedded-etcd", false, "will use embedded etcd, if set to true") 44 | fs.StringVar(&e.Directory, "embedded-etcd-directory", e.Directory, "Directory for embedded etcd") 45 | fs.StringVar(&e.PeerPort, "embedded-etcd-peer-port", e.PeerPort, "Port for embedded etcd peer") 46 | fs.StringVar(&e.ClientPort, "embedded-etcd-client-port", e.ClientPort, "Port for embedded etcd client") 47 | fs.Int64Var(&e.WalSizeBytes, "embedded-etcd-wal-size-bytes", e.WalSizeBytes, "Size of embedded etcd WAL") 48 | } 49 | 50 | func (e *EmbeddedEtcd) Validate() []error { 51 | var errs []error 52 | 53 | if e.Enabled { 54 | if e.PeerPort == "" { 55 | errs = append(errs, fmt.Errorf("--embedded-etcd-peer-port must be specified")) 56 | } 57 | if e.ClientPort == "" { 58 | errs = append(errs, fmt.Errorf("--embedded-etcd-client-port must be specified")) 59 | } 60 | } 61 | 62 | return errs 63 | } 64 | -------------------------------------------------------------------------------- /pkg/servers/simplerestoptionsfactroy.go: -------------------------------------------------------------------------------- 1 | // Copyright Contributors to the Open Cluster Management project 2 | 3 | package servers 4 | 5 | // refer to https://github.com/kubernetes/apiserver/blob/v0.24.11/pkg/server/options/etcd.go#L243 6 | 7 | import ( 8 | "fmt" 9 | "strconv" 10 | "strings" 11 | 12 | "k8s.io/apimachinery/pkg/runtime" 13 | "k8s.io/apimachinery/pkg/runtime/schema" 14 | "k8s.io/apiserver/pkg/registry/generic" 15 | genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" 16 | genericoptions "k8s.io/apiserver/pkg/server/options" 17 | "k8s.io/apiserver/pkg/storage/value" 18 | "k8s.io/klog/v2" 19 | ) 20 | 21 | type SimpleRestOptionsFactory struct { 22 | Options genericoptions.EtcdOptions 23 | TransformerOverrides map[schema.GroupResource]value.Transformer 24 | } 25 | 26 | func (f *SimpleRestOptionsFactory) GetRESTOptions(resource schema.GroupResource, example runtime.Object) (generic.RESTOptions, error) { 27 | ret := generic.RESTOptions{ 28 | StorageConfig: f.Options.StorageConfig.ForResource(resource), 29 | Decorator: generic.UndecoratedStorage, 30 | EnableGarbageCollection: f.Options.EnableGarbageCollection, 31 | DeleteCollectionWorkers: f.Options.DeleteCollectionWorkers, 32 | ResourcePrefix: resource.Group + "/" + resource.Resource, 33 | CountMetricPollPeriod: f.Options.StorageConfig.CountMetricPollPeriod, 34 | StorageObjectCountTracker: f.Options.StorageConfig.StorageObjectCountTracker, 35 | } 36 | if f.TransformerOverrides != nil { 37 | if transformer, ok := f.TransformerOverrides[resource]; ok { 38 | ret.StorageConfig.Transformer = transformer 39 | } 40 | } 41 | if f.Options.EnableWatchCache { 42 | sizes, err := ParseWatchCacheSizes(f.Options.WatchCacheSizes) 43 | if err != nil { 44 | return generic.RESTOptions{}, err 45 | } 46 | size, ok := sizes[resource] 47 | if ok && size > 0 { 48 | klog.Warningf("Dropping watch-cache-size for %v - watchCache size is now dynamic", resource) 49 | } 50 | if ok && size <= 0 { 51 | klog.V(3).Info("Not using watch cache", "resource", resource) 52 | ret.Decorator = generic.UndecoratedStorage 53 | } else { 54 | klog.V(3).Info("Using watch cache", "resource", resource) 55 | ret.Decorator = genericregistry.StorageWithCacher() 56 | } 57 | } 58 | return ret, nil 59 | } 60 | 61 | // ParseWatchCacheSizes turns a list of cache size values into a map of group resources 62 | // to requested sizes. 63 | func ParseWatchCacheSizes(cacheSizes []string) (map[schema.GroupResource]int, error) { 64 | watchCacheSizes := make(map[schema.GroupResource]int) 65 | for _, c := range cacheSizes { 66 | tokens := strings.Split(c, "#") 67 | if len(tokens) != 2 { 68 | return nil, fmt.Errorf("invalid value of watch cache size: %s", c) 69 | } 70 | 71 | size, err := strconv.Atoi(tokens[1]) 72 | if err != nil { 73 | return nil, fmt.Errorf("invalid size of watch cache size: %s", c) 74 | } 75 | if size < 0 { 76 | return nil, fmt.Errorf("watch cache size cannot be negative: %s", c) 77 | } 78 | watchCacheSizes[schema.ParseGroupResource(tokens[0])] = size 79 | } 80 | return watchCacheSizes, nil 81 | } 82 | -------------------------------------------------------------------------------- /pkg/util/recorder.go: -------------------------------------------------------------------------------- 1 | // Copyright Contributors to the Open Cluster Management project 2 | package util 3 | 4 | import ( 5 | "context" 6 | "fmt" 7 | 8 | "github.com/openshift/library-go/pkg/operator/events" 9 | "k8s.io/klog/v2" 10 | ) 11 | 12 | type loggingRecorder struct { 13 | componentName string 14 | } 15 | 16 | var _ events.Recorder = &loggingRecorder{} 17 | 18 | func NewLoggingRecorder(componentName string) events.Recorder { 19 | return &loggingRecorder{componentName: componentName} 20 | } 21 | 22 | func (r *loggingRecorder) Event(reason, message string) { 23 | klog.Infof("component=%s, reason=%s, msg=%s", r.componentName, reason, message) 24 | } 25 | 26 | func (r *loggingRecorder) Eventf(reason, messageFmt string, args ...interface{}) { 27 | r.Event(reason, fmt.Sprintf(messageFmt, args...)) 28 | } 29 | 30 | func (r *loggingRecorder) Warning(reason, message string) { 31 | klog.Warningf("component=%s, reason=%s, msg=%s", r.componentName, reason, message) 32 | } 33 | 34 | func (r *loggingRecorder) Warningf(reason, messageFmt string, args ...interface{}) { 35 | r.Warning(reason, fmt.Sprintf(messageFmt, args...)) 36 | } 37 | 38 | func (r *loggingRecorder) ForComponent(componentName string) events.Recorder { 39 | newRecorder := *r 40 | newRecorder.componentName = componentName 41 | return &newRecorder 42 | } 43 | 44 | func (r *loggingRecorder) WithComponentSuffix(componentNameSuffix string) events.Recorder { 45 | return r.ForComponent(fmt.Sprintf("%s-%s", r.ComponentName(), componentNameSuffix)) 46 | } 47 | 48 | func (r *loggingRecorder) WithContext(ctx context.Context) events.Recorder { 49 | return r 50 | } 51 | 52 | func (r *loggingRecorder) ComponentName() string { 53 | return r.componentName 54 | } 55 | 56 | func (r *loggingRecorder) Shutdown() {} 57 | -------------------------------------------------------------------------------- /plugin/admission/managedclustermutating/admission.go: -------------------------------------------------------------------------------- 1 | // Copyright Contributors to the Open Cluster Management project 2 | package managedclustermutating 3 | 4 | import ( 5 | "context" 6 | "fmt" 7 | "io" 8 | 9 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 10 | "k8s.io/apimachinery/pkg/runtime" 11 | "k8s.io/apimachinery/pkg/types" 12 | "k8s.io/apimachinery/pkg/util/uuid" 13 | "k8s.io/apiserver/pkg/admission" 14 | "k8s.io/apiserver/pkg/admission/plugin/webhook/generic" 15 | "k8s.io/apiserver/pkg/admission/plugin/webhook/request" 16 | clusterv1api "open-cluster-management.io/api/cluster/v1" 17 | clusterwebhookv1 "open-cluster-management.io/ocm/pkg/registration/webhook/v1" 18 | runtimeadmission "sigs.k8s.io/controller-runtime/pkg/webhook/admission" 19 | 20 | admissionutil "open-cluster-management.io/multicluster-controlplane/plugin/admission/util" 21 | ) 22 | 23 | const PluginName = "ManagedClusterMutating" 24 | 25 | func Register(plugins *admission.Plugins) { 26 | plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { 27 | return NewPlugin(), nil 28 | }) 29 | } 30 | 31 | type Plugin struct { 32 | *admission.Handler 33 | webhook *clusterwebhookv1.ManagedClusterWebhook 34 | } 35 | 36 | func (p *Plugin) ValidateInitialization() error { 37 | return nil 38 | } 39 | 40 | var _ admission.MutationInterface = &Plugin{} 41 | var _ admission.InitializationValidator = &Plugin{} 42 | 43 | func NewPlugin() *Plugin { 44 | return &Plugin{ 45 | Handler: admission.NewHandler(admission.Create, admission.Update), 46 | webhook: &clusterwebhookv1.ManagedClusterWebhook{}, 47 | } 48 | } 49 | 50 | func (p *Plugin) Admit(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) error { 51 | v := admission.VersionedAttributes{ 52 | Attributes: a, 53 | VersionedOldObject: a.GetOldObject(), 54 | VersionedObject: a.GetObject(), 55 | VersionedKind: a.GetKind(), 56 | } 57 | 58 | gvr := clusterv1api.GroupVersion.WithResource("managedclusters") 59 | gvk := clusterv1api.GroupVersion.WithKind("ManagedCluster") 60 | 61 | // resource is not mcl 62 | if a.GetKind() != gvk { 63 | return nil 64 | } 65 | 66 | // we just need gvr in code logic 67 | i := generic.WebhookInvocation{ 68 | Kind: gvk, 69 | Resource: gvr, 70 | } 71 | 72 | uid := types.UID(uuid.NewUUID()) 73 | ar := request.CreateV1AdmissionReview(uid, &v, &i) 74 | 75 | old := a.GetOldObject() 76 | oldRaw := runtime.RawExtension{} 77 | err := admissionutil.Convert_runtime_Object_To_runtime_RawExtension_Raw(&old, &oldRaw) 78 | if err != nil { 79 | return fmt.Errorf("error occured in ManagedClusterMutating: failed to convert Object to RawExtension") 80 | } 81 | ar.Request.OldObject = oldRaw 82 | 83 | cluster := &clusterv1api.ManagedCluster{} 84 | obj := a.GetObject().(*unstructured.Unstructured) 85 | err = runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, cluster) 86 | if err != nil { 87 | return err 88 | } 89 | 90 | r := runtimeadmission.Request{AdmissionRequest: *ar.Request} 91 | admissionContext := runtimeadmission.NewContextWithRequest(ctx, r) 92 | if err := p.webhook.Default(admissionContext, cluster); err != nil { 93 | return err 94 | } 95 | 96 | updated, err := runtime.DefaultUnstructuredConverter.ToUnstructured(cluster) 97 | if err != nil { 98 | return err 99 | } 100 | obj.Object = updated 101 | 102 | return nil 103 | } 104 | -------------------------------------------------------------------------------- /plugin/admission/managedclustersetbindingvalidating/admission.go: -------------------------------------------------------------------------------- 1 | // Copyright Contributors to the Open Cluster Management project 2 | package managedclustersetbindingvalidating 3 | 4 | import ( 5 | "context" 6 | "fmt" 7 | "io" 8 | 9 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 10 | "k8s.io/apimachinery/pkg/runtime" 11 | "k8s.io/apimachinery/pkg/types" 12 | "k8s.io/apimachinery/pkg/util/uuid" 13 | "k8s.io/apiserver/pkg/admission" 14 | genericadmissioninitializer "k8s.io/apiserver/pkg/admission/initializer" 15 | "k8s.io/apiserver/pkg/admission/plugin/webhook/generic" 16 | "k8s.io/apiserver/pkg/admission/plugin/webhook/request" 17 | "k8s.io/client-go/kubernetes" 18 | clusterv1beta2api "open-cluster-management.io/api/cluster/v1beta2" 19 | webhookv1beta2 "open-cluster-management.io/ocm/pkg/registration/webhook/v1beta2" 20 | runtimeadmission "sigs.k8s.io/controller-runtime/pkg/webhook/admission" 21 | ) 22 | 23 | const PluginName = "ManagedClusterSetBindingValidating" 24 | 25 | func Register(plugins *admission.Plugins) { 26 | plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { 27 | return NewPlugin(), nil 28 | }) 29 | } 30 | 31 | type Plugin struct { 32 | *admission.Handler 33 | webhook *webhookv1beta2.ManagedClusterSetBindingWebhook 34 | } 35 | 36 | func (p *Plugin) SetExternalKubeClientSet(client kubernetes.Interface) { 37 | p.webhook.SetExternalKubeClientSet(client) 38 | } 39 | 40 | func (p *Plugin) ValidateInitialization() error { 41 | if p.webhook == nil { 42 | return fmt.Errorf("missing admission") 43 | } 44 | return nil 45 | } 46 | 47 | var _ admission.ValidationInterface = &Plugin{} 48 | var _ admission.InitializationValidator = &Plugin{} 49 | var _ = genericadmissioninitializer.WantsExternalKubeClientSet(&Plugin{}) 50 | 51 | func NewPlugin() *Plugin { 52 | return &Plugin{ 53 | Handler: admission.NewHandler(admission.Create, admission.Update), 54 | webhook: &webhookv1beta2.ManagedClusterSetBindingWebhook{}, 55 | } 56 | } 57 | 58 | func (p *Plugin) Validate(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) error { 59 | v := admission.VersionedAttributes{ 60 | Attributes: a, 61 | VersionedOldObject: a.GetOldObject(), 62 | VersionedObject: a.GetObject(), 63 | VersionedKind: a.GetKind(), 64 | } 65 | 66 | gvr := clusterv1beta2api.GroupVersion.WithResource("managedclustersetbindings") 67 | gvk := clusterv1beta2api.GroupVersion.WithKind("ManagedClusterSetBinding") 68 | 69 | // resource is not mcl 70 | if a.GetKind() != gvk { 71 | return nil 72 | } 73 | 74 | // don't set kind cause do not use it in code logical 75 | i := generic.WebhookInvocation{ 76 | Resource: gvr, 77 | Kind: gvk, 78 | } 79 | 80 | uid := types.UID(uuid.NewUUID()) 81 | ar := request.CreateV1AdmissionReview(uid, &v, &i) 82 | 83 | binding := &clusterv1beta2api.ManagedClusterSetBinding{} 84 | obj := a.GetObject().(*unstructured.Unstructured) 85 | err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, binding) 86 | if err != nil { 87 | return err 88 | } 89 | 90 | r := runtimeadmission.Request{AdmissionRequest: *ar.Request} 91 | admissionContext := runtimeadmission.NewContextWithRequest(ctx, r) 92 | switch a.GetOperation() { 93 | case admission.Create: 94 | _, err := p.webhook.ValidateCreate(admissionContext, binding) 95 | return err 96 | case admission.Update: 97 | oldBinding := &clusterv1beta2api.ManagedClusterSetBinding{} 98 | oldObj := a.GetOldObject().(*unstructured.Unstructured) 99 | err := runtime.DefaultUnstructuredConverter.FromUnstructured(oldObj.Object, oldBinding) 100 | if err != nil { 101 | return err 102 | } 103 | _, err = p.webhook.ValidateUpdate(admissionContext, oldBinding, binding) 104 | return err 105 | } 106 | 107 | return nil 108 | } 109 | -------------------------------------------------------------------------------- /plugin/admission/managedclustervalidating/admission.go: -------------------------------------------------------------------------------- 1 | // Copyright Contributors to the Open Cluster Management project 2 | package managedclustervalidating 3 | 4 | import ( 5 | "context" 6 | "fmt" 7 | "io" 8 | 9 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 10 | "k8s.io/apimachinery/pkg/runtime" 11 | "k8s.io/apimachinery/pkg/types" 12 | "k8s.io/apimachinery/pkg/util/uuid" 13 | "k8s.io/apiserver/pkg/admission" 14 | genericadmissioninitializer "k8s.io/apiserver/pkg/admission/initializer" 15 | "k8s.io/apiserver/pkg/admission/plugin/webhook/generic" 16 | "k8s.io/apiserver/pkg/admission/plugin/webhook/request" 17 | "k8s.io/client-go/kubernetes" 18 | clusterv1api "open-cluster-management.io/api/cluster/v1" 19 | clusterwebhookv1 "open-cluster-management.io/ocm/pkg/registration/webhook/v1" 20 | runtimeadmission "sigs.k8s.io/controller-runtime/pkg/webhook/admission" 21 | ) 22 | 23 | const PluginName = "ManagedClusterValidating" 24 | 25 | func Register(plugins *admission.Plugins) { 26 | plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { 27 | return NewPlugin(), nil 28 | }) 29 | } 30 | 31 | type Plugin struct { 32 | *admission.Handler 33 | webhook *clusterwebhookv1.ManagedClusterWebhook 34 | } 35 | 36 | func (p *Plugin) SetExternalKubeClientSet(client kubernetes.Interface) { 37 | p.webhook.SetExternalKubeClientSet(client) 38 | } 39 | 40 | func (p *Plugin) ValidateInitialization() error { 41 | if p.webhook == nil { 42 | return fmt.Errorf("missing webhook") 43 | } 44 | return nil 45 | } 46 | 47 | var _ admission.ValidationInterface = &Plugin{} 48 | var _ admission.InitializationValidator = &Plugin{} 49 | var _ = genericadmissioninitializer.WantsExternalKubeClientSet(&Plugin{}) 50 | 51 | func NewPlugin() *Plugin { 52 | return &Plugin{ 53 | Handler: admission.NewHandler(admission.Create, admission.Update), 54 | webhook: &clusterwebhookv1.ManagedClusterWebhook{}, 55 | } 56 | } 57 | 58 | func (p *Plugin) Validate(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) error { 59 | v := admission.VersionedAttributes{ 60 | Attributes: a, 61 | VersionedOldObject: a.GetOldObject(), 62 | VersionedObject: a.GetObject(), 63 | VersionedKind: a.GetKind(), 64 | } 65 | 66 | gvr := clusterv1api.GroupVersion.WithResource("managedclusters") 67 | gvk := clusterv1api.GroupVersion.WithKind("ManagedCluster") 68 | 69 | // resource is not mcl 70 | if a.GetKind() != gvk { 71 | return nil 72 | } 73 | 74 | // don't set kind cause do not use it in code logical 75 | i := generic.WebhookInvocation{ 76 | Resource: gvr, 77 | Kind: gvk, 78 | } 79 | 80 | uid := types.UID(uuid.NewUUID()) 81 | ar := request.CreateV1AdmissionReview(uid, &v, &i) 82 | 83 | r := runtimeadmission.Request{AdmissionRequest: *ar.Request} 84 | admissionContext := runtimeadmission.NewContextWithRequest(ctx, r) 85 | 86 | cluster := &clusterv1api.ManagedCluster{} 87 | obj := a.GetObject().(*unstructured.Unstructured) 88 | err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, cluster) 89 | if err != nil { 90 | return err 91 | } 92 | 93 | switch a.GetOperation() { 94 | case admission.Create: 95 | _, err := p.webhook.ValidateCreate(admissionContext, cluster) 96 | return err 97 | case admission.Update: 98 | oldCluster := &clusterv1api.ManagedCluster{} 99 | oldObj := a.GetOldObject().(*unstructured.Unstructured) 100 | err := runtime.DefaultUnstructuredConverter.FromUnstructured(oldObj.Object, oldCluster) 101 | if err != nil { 102 | return err 103 | } 104 | _, err = p.webhook.ValidateUpdate(admissionContext, oldCluster, cluster) 105 | return err 106 | } 107 | 108 | return nil 109 | } 110 | -------------------------------------------------------------------------------- /plugin/admission/manifestworkvalidating/admission.go: -------------------------------------------------------------------------------- 1 | // Copyright Contributors to the Open Cluster Management project 2 | package manifestworkvalidating 3 | 4 | import ( 5 | "context" 6 | "fmt" 7 | "io" 8 | 9 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 10 | "k8s.io/apimachinery/pkg/runtime" 11 | "k8s.io/apimachinery/pkg/types" 12 | "k8s.io/apimachinery/pkg/util/uuid" 13 | "k8s.io/apiserver/pkg/admission" 14 | genericadmissioninitializer "k8s.io/apiserver/pkg/admission/initializer" 15 | "k8s.io/apiserver/pkg/admission/plugin/webhook/generic" 16 | "k8s.io/apiserver/pkg/admission/plugin/webhook/request" 17 | "k8s.io/client-go/kubernetes" 18 | workv1 "open-cluster-management.io/api/work/v1" 19 | workwebhookv1 "open-cluster-management.io/ocm/pkg/work/webhook/v1" 20 | runtimeadmission "sigs.k8s.io/controller-runtime/pkg/webhook/admission" 21 | ) 22 | 23 | const PluginName = "ManifestWorkValidating" 24 | 25 | func Register(plugins *admission.Plugins) { 26 | plugins.Register(PluginName, func(config io.Reader) (admission.Interface, error) { 27 | return NewPlugin(), nil 28 | }) 29 | } 30 | 31 | type Plugin struct { 32 | *admission.Handler 33 | webhook *workwebhookv1.ManifestWorkWebhook 34 | } 35 | 36 | func (p *Plugin) SetExternalKubeClientSet(client kubernetes.Interface) { 37 | p.webhook.SetExternalKubeClientSet(client) 38 | } 39 | 40 | func (p *Plugin) ValidateInitialization() error { 41 | if p.webhook == nil { 42 | return fmt.Errorf("missing webhook") 43 | } 44 | return nil 45 | } 46 | 47 | var _ admission.ValidationInterface = &Plugin{} 48 | var _ admission.InitializationValidator = &Plugin{} 49 | var _ = genericadmissioninitializer.WantsExternalKubeClientSet(&Plugin{}) 50 | 51 | func NewPlugin() *Plugin { 52 | return &Plugin{ 53 | Handler: admission.NewHandler(admission.Create, admission.Update), 54 | webhook: &workwebhookv1.ManifestWorkWebhook{}, 55 | } 56 | } 57 | 58 | func (p *Plugin) Validate(ctx context.Context, a admission.Attributes, o admission.ObjectInterfaces) error { 59 | v := admission.VersionedAttributes{ 60 | Attributes: a, 61 | VersionedOldObject: a.GetOldObject(), 62 | VersionedObject: a.GetObject(), 63 | VersionedKind: a.GetKind(), 64 | } 65 | 66 | gvr := workv1.GroupVersion.WithResource("manifestworks") 67 | gvk := workv1.GroupVersion.WithKind("ManifestWork") 68 | 69 | // resource is not work 70 | if a.GetKind() != gvk { 71 | return nil 72 | } 73 | 74 | // don't set kind cause do not use it in code logical 75 | i := generic.WebhookInvocation{ 76 | Resource: gvr, 77 | Kind: gvk, 78 | } 79 | 80 | uid := types.UID(uuid.NewUUID()) 81 | ar := request.CreateV1AdmissionReview(uid, &v, &i) 82 | 83 | r := runtimeadmission.Request{AdmissionRequest: *ar.Request} 84 | admissionContext := runtimeadmission.NewContextWithRequest(ctx, r) 85 | 86 | work := &workv1.ManifestWork{} 87 | obj := a.GetObject().(*unstructured.Unstructured) 88 | err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.Object, work) 89 | if err != nil { 90 | return err 91 | } 92 | 93 | switch a.GetOperation() { 94 | case admission.Create: 95 | _, err := p.webhook.ValidateCreate(admissionContext, work) 96 | return err 97 | case admission.Update: 98 | oldWork := &workv1.ManifestWork{} 99 | oldObj := a.GetOldObject().(*unstructured.Unstructured) 100 | err := runtime.DefaultUnstructuredConverter.FromUnstructured(oldObj.Object, oldWork) 101 | if err != nil { 102 | return err 103 | } 104 | _, err = p.webhook.ValidateUpdate(admissionContext, oldWork, work) 105 | return err 106 | } 107 | 108 | return nil 109 | } 110 | -------------------------------------------------------------------------------- /plugin/admission/util/util.go: -------------------------------------------------------------------------------- 1 | // Copyright Contributors to the Open Cluster Management project 2 | package util 3 | 4 | import ( 5 | "encoding/json" 6 | 7 | "k8s.io/apimachinery/pkg/runtime" 8 | ) 9 | 10 | func Convert_runtime_Object_To_runtime_RawExtension_Raw(in *runtime.Object, out *runtime.RawExtension) error { 11 | if in == nil { 12 | out.Raw = []byte("null") 13 | return nil 14 | } 15 | raw, err := json.Marshal(*in) 16 | if err != nil { 17 | return err 18 | } 19 | 20 | out.Raw = raw 21 | return nil 22 | } 23 | -------------------------------------------------------------------------------- /test/bin/util.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | REPO_DIR="$(cd "$(dirname ${BASH_SOURCE[0]})/../.." ; pwd -P)" 3 | 4 | function wait_command() { 5 | local command="$1"; shift 6 | local wait_seconds="${1:-90}"; shift # 90 seconds as default timeout 7 | 8 | until [[ $((wait_seconds--)) -eq 0 ]] || eval "$command &> /dev/null" ; do sleep 1; done 9 | 10 | ((++wait_seconds)) 11 | } 12 | 13 | function wait_for_url() { 14 | local url="$1"; shift 15 | local times="${1:-90}"; shift # 90 seconds as default timeout 16 | 17 | command -v curl >/dev/null || { 18 | echo "curl must be installed" 19 | exit 1 20 | } 21 | 22 | local i 23 | for i in $(seq 1 "${times}"); do 24 | local out 25 | if out=$(curl -gkfs "${url}" 2>/dev/null); then 26 | echo "On try ${i}, ${url}: ${out}" 27 | return 0 28 | fi 29 | sleep 1 30 | done 31 | 32 | echo "Timed out waiting for ${url}; tried ${times}" 33 | exit 1 34 | } 35 | 36 | function ensure_clusteradm() { 37 | bin_dir="${REPO_DIR}/_output/bin" 38 | mkdir -p ${bin_dir} 39 | pushd ${bin_dir} 40 | curl -LO https://raw.githubusercontent.com/open-cluster-management-io/clusteradm/main/install.sh 41 | chmod +x ./install.sh 42 | export INSTALL_DIR=$bin_dir 43 | ./install.sh 0.9.0 44 | unset INSTALL_DIR 45 | popd 46 | } 47 | -------------------------------------------------------------------------------- /test/e2e/hack/cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | REPO_DIR="$(cd "$(dirname ${BASH_SOURCE[0]})/../../.." ; pwd -P)" 4 | 5 | cluster="e2e-test" 6 | 7 | rm -rf ${REPO_DIR}/_output 8 | kind delete clusters $cluster 9 | -------------------------------------------------------------------------------- /test/e2e/hack/e2e.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | KIND=${KIND:-"kind"} 4 | KUBECTL=${KUBECTL:-"kubectl"} 5 | HELM=${HELM:-"helm"} 6 | KUSTOMIZE=${KUSTOMIZE:-"kustomize"} 7 | 8 | if ! command -v ${KIND} >/dev/null 2>&1; then 9 | echo "ERROR: command ${KIND} is not found" 10 | exit 1 11 | fi 12 | 13 | if ! command -v ${KUBECTL} >/dev/null 2>&1; then 14 | echo "ERROR: command ${KUBECTL} is not found" 15 | exit 1 16 | fi 17 | 18 | if ! command -v ${HELM} >/dev/null 2>&1; then 19 | echo "ERROR: command ${HELM} is not found" 20 | exit 1 21 | fi 22 | 23 | if ! command -v ${KUSTOMIZE} >/dev/null 2>&1; then 24 | echo "ERROR: command ${KUSTOMIZE} is not found" 25 | exit 1 26 | fi 27 | 28 | REPO_DIR="$(cd "$(dirname ${BASH_SOURCE[0]})/../../.." ; pwd -P)" 29 | IMAGE_NAME=${IMAGE_NAME:-quay.io/open-cluster-management/multicluster-controlplane:latest} 30 | 31 | source "${REPO_DIR}/test/bin/util.sh" 32 | 33 | output="${REPO_DIR}/_output" 34 | cluster_dir="${output}/kubeconfig" 35 | agent_deploy_dir="${output}/agent/deploy" 36 | 37 | mkdir -p ${cluster_dir} 38 | mkdir -p ${agent_deploy_dir} 39 | 40 | cluster="e2e-test" 41 | external_host_port="30443" 42 | kubeconfig="${cluster_dir}/${cluster}.kubeconfig" 43 | ${KIND} create cluster --kubeconfig $kubeconfig --name ${cluster} 44 | external_host_ip=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' ${cluster}-control-plane) 45 | 46 | echo "Load $IMAGE_NAME to the cluster $cluster ..." 47 | ${KIND} load docker-image $IMAGE_NAME --name $cluster 48 | 49 | echo "Deploy etcd in the cluster $cluster ..." 50 | pushd ${REPO_DIR} 51 | export KUBECONFIG=${kubeconfig} 52 | STORAGE_CLASS_NAME="standard" make deploy-etcd 53 | unset KUBECONFIG 54 | popd 55 | 56 | namespace=multicluster-controlplane 57 | echo "Deploy standalone controlplane in namespace $namespace ..." 58 | 59 | ${KUBECTL} --kubeconfig ${kubeconfig} delete ns $namespace --ignore-not-found 60 | ${KUBECTL} --kubeconfig ${kubeconfig} create ns $namespace 61 | 62 | pushd ${REPO_DIR} 63 | export HUB_NAME=${namespace} 64 | export EXTERNAL_HOSTNAME=${external_host_ip} 65 | export NODE_PORT="${external_host_port}" 66 | export SELF_MANAGEMENT=true 67 | export KUBECONFIG=${kubeconfig} 68 | export FEATURE_GATES="DefaultClusterSet=true\,ManagedClusterAutoApproval=true\,ManagedServiceAccountEphemeralIdentity=true" 69 | make deploy 70 | unset KUBECONFIG 71 | unset HUB_NAME 72 | unset EXTERNAL_HOSTNAME 73 | unset NODE_PORT 74 | unset SELF_MANAGEMENT 75 | unset FEATURE_GATES 76 | popd 77 | 78 | wait_command "${KUBECTL} --kubeconfig $kubeconfig -n multicluster-controlplane get secrets multicluster-controlplane-kubeconfig" 79 | ${KUBECTL} --kubeconfig $kubeconfig -n multicluster-controlplane -n multicluster-controlplane logs -l app=multicluster-controlplane --tail=-1 80 | 81 | hubkubeconfig="${cluster_dir}/controlplane.kubeconfig" 82 | ${KUBECTL} --kubeconfig $kubeconfig -n multicluster-controlplane get secrets multicluster-controlplane-kubeconfig -ojsonpath='{.data.kubeconfig}' | base64 -d > ${hubkubeconfig} 83 | 84 | 85 | # wait the controlplane is ready 86 | wait_for_url "https://${external_host_ip}:${external_host_port}/readyz" 87 | 88 | 89 | echo "Deploy standalone controlplane agents ..." 90 | cp -r ${REPO_DIR}/hack/deploy/agent/* $agent_deploy_dir 91 | 92 | agent_namespace="multicluster-controlplane-agent" 93 | ${KUBECTL} --kubeconfig ${kubeconfig} delete ns ${agent_namespace} --ignore-not-found 94 | ${KUBECTL} --kubeconfig ${kubeconfig} create ns ${agent_namespace} 95 | 96 | kubectl --kubeconfig $kubeconfig -n multicluster-controlplane get secrets multicluster-controlplane-svc-kubeconfig -ojsonpath='{.data.kubeconfig}' | base64 -d > ${agent_deploy_dir}/hub-kubeconfig 97 | 98 | pushd $agent_deploy_dir 99 | ${KUSTOMIZE} edit set image quay.io/open-cluster-management/multicluster-controlplane=${IMAGE_NAME} 100 | popd 101 | ${KUSTOMIZE} build ${agent_deploy_dir} | ${KUBECTL} --kubeconfig $kubeconfig -n ${agent_namespace} apply -f - 102 | 103 | export HUBKUBECONFIG=${hubkubeconfig} 104 | export SPOKEKUBECONFIG=${kubeconfig} 105 | ${output}/e2e.test -test.v -ginkgo.v 106 | -------------------------------------------------------------------------------- /test/e2e/managedserviceaccount_test.go: -------------------------------------------------------------------------------- 1 | // Copyright Contributors to the Open Cluster Management project 2 | package e2e_test 3 | 4 | import ( 5 | "context" 6 | "time" 7 | 8 | . "github.com/onsi/ginkgo/v2" 9 | . "github.com/onsi/gomega" 10 | 11 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 12 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 13 | "k8s.io/apimachinery/pkg/runtime/schema" 14 | ) 15 | 16 | var _ = Describe("managed service account tests", func() { 17 | Context("create a simple managed service account", func() { 18 | It("should be able to create a managed service account", func() { 19 | obj := &unstructured.Unstructured{ 20 | Object: map[string]interface{}{ 21 | "apiVersion": "authentication.open-cluster-management.io/v1beta1", 22 | "kind": "ManagedServiceAccount", 23 | "metadata": map[string]interface{}{ 24 | "name": "my-sample", 25 | "namespace": loopbackClusterName, 26 | }, 27 | "spec": map[string]interface{}{ 28 | "rotation": map[string]interface{}{}, 29 | }, 30 | }, 31 | } 32 | resource := schema.GroupVersionResource{ 33 | Group: "authentication.open-cluster-management.io", 34 | Version: "v1beta1", 35 | Resource: "managedserviceaccounts", 36 | } 37 | Eventually(func() error { 38 | _, err := hubDynamicClient.Resource(resource).Namespace(loopbackClusterName). 39 | Create(context.TODO(), obj, metav1.CreateOptions{}) 40 | return err 41 | }, 2*time.Minute, 5*time.Second).ShouldNot(HaveOccurred()) 42 | 43 | Eventually(func() error { 44 | _, err := hubKubeClient.CoreV1().Secrets(loopbackClusterName).Get(context.TODO(), "my-sample", metav1.GetOptions{}) 45 | return err 46 | }, 1*time.Minute, 1*time.Second).ShouldNot(HaveOccurred()) 47 | }) 48 | }) 49 | }) 50 | -------------------------------------------------------------------------------- /test/e2e/suite_test.go: -------------------------------------------------------------------------------- 1 | // Copyright Contributors to the Open Cluster Management project 2 | 3 | package e2e_test 4 | 5 | import ( 6 | "os" 7 | "testing" 8 | "time" 9 | 10 | ginkgo "github.com/onsi/ginkgo/v2" 11 | gomega "github.com/onsi/gomega" 12 | 13 | corev1 "k8s.io/api/core/v1" 14 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 15 | "k8s.io/apimachinery/pkg/runtime" 16 | "k8s.io/client-go/dynamic" 17 | "k8s.io/client-go/kubernetes" 18 | "k8s.io/client-go/tools/clientcmd" 19 | 20 | clusterclient "open-cluster-management.io/api/client/cluster/clientset/versioned" 21 | workclient "open-cluster-management.io/api/client/work/clientset/versioned" 22 | workv1 "open-cluster-management.io/api/work/v1" 23 | ) 24 | 25 | const defaultNamespace = "default" 26 | 27 | const timeout = 30 * time.Second 28 | 29 | var ( 30 | hubKubeClient kubernetes.Interface 31 | spokeKubeClient kubernetes.Interface 32 | hubClusterClient clusterclient.Interface 33 | hubWorkClient workclient.Interface 34 | hubDynamicClient dynamic.Interface 35 | ) 36 | 37 | func TestE2E(t *testing.T) { 38 | gomega.RegisterFailHandler(ginkgo.Fail) 39 | ginkgo.RunSpecs(t, "E2E Suite") 40 | } 41 | 42 | var _ = ginkgo.BeforeSuite(func() { 43 | err := func() error { 44 | var err error 45 | 46 | hubKubeconfig := os.Getenv("HUBKUBECONFIG") 47 | 48 | hubConfig, err := clientcmd.BuildConfigFromFlags("", hubKubeconfig) 49 | if err != nil { 50 | return err 51 | } 52 | 53 | hubKubeClient, err = kubernetes.NewForConfig(hubConfig) 54 | if err != nil { 55 | return err 56 | } 57 | 58 | hubClusterClient, err = clusterclient.NewForConfig(hubConfig) 59 | if err != nil { 60 | return err 61 | } 62 | 63 | hubWorkClient, err = workclient.NewForConfig(hubConfig) 64 | if err != nil { 65 | return err 66 | } 67 | 68 | spokeKubeconfig := os.Getenv("SPOKEKUBECONFIG") 69 | spokeConfig, err := clientcmd.BuildConfigFromFlags("", spokeKubeconfig) 70 | if err != nil { 71 | return err 72 | } 73 | 74 | spokeKubeClient, err = kubernetes.NewForConfig(spokeConfig) 75 | if err != nil { 76 | return err 77 | } 78 | 79 | hubDynamicClient, err = dynamic.NewForConfig(hubConfig) 80 | if err != nil { 81 | return err 82 | } 83 | 84 | return nil 85 | }() 86 | gomega.Expect(err).ToNot(gomega.HaveOccurred()) 87 | }) 88 | 89 | func toManifest(object runtime.Object) workv1.Manifest { 90 | manifest := workv1.Manifest{} 91 | manifest.Object = object 92 | return manifest 93 | } 94 | 95 | func newConfigmap(name string) *corev1.ConfigMap { 96 | return &corev1.ConfigMap{ 97 | TypeMeta: metav1.TypeMeta{ 98 | Kind: "ConfigMap", 99 | APIVersion: "v1", 100 | }, 101 | ObjectMeta: metav1.ObjectMeta{ 102 | Namespace: defaultNamespace, 103 | Name: name, 104 | }, 105 | Data: map[string]string{ 106 | "test": "I'm a test configmap", 107 | }, 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /test/integration/hack/cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | REPO_DIR="$(cd "$(dirname ${BASH_SOURCE[0]})/../../.." ; pwd -P)" 4 | 5 | managed_cluster="integration-test" 6 | 7 | rm -rf ${REPO_DIR}/_output 8 | kind delete clusters $managed_cluster 9 | -------------------------------------------------------------------------------- /test/integration/hack/integration.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # TODO move this to e2e, for integration we should focus on code level test 4 | 5 | REPO_DIR="$(cd "$(dirname ${BASH_SOURCE[0]})/../../.." ; pwd -P)" 6 | 7 | output="${REPO_DIR}/_output" 8 | agent_dir="${output}/agent" 9 | 10 | mkdir -p ${agent_dir} 11 | 12 | managed_cluster="integration-test" 13 | controlplane_kubeconfig="${output}/controlplane/.ocm/cert/kube-aggregator.kubeconfig" 14 | kubeconfig="${agent_dir}/${managed_cluster}.kubeconfig" 15 | 16 | source "${REPO_DIR}/test/bin/util.sh" 17 | ensure_clusteradm 18 | 19 | echo "Create a cluster with kind ..." 20 | kind create cluster --name $managed_cluster --kubeconfig $kubeconfig 21 | 22 | echo "Start controlplane with command ..." 23 | ${REPO_DIR}/hack/start-multicluster-controlplane.sh & 24 | pid=$! 25 | 26 | wait_command "cat ${output}/controlplane/controlplane_pid" 27 | if [ 0 -ne $? ]; then 28 | echo "Failed to start controlplane" 29 | cat /tmp/kube-apiserver.log 30 | exit 1 31 | fi 32 | cat ${output}/controlplane/controlplane_pid 33 | 34 | apiserver=$(kubectl config view --kubeconfig ${controlplane_kubeconfig} -ojsonpath='{.clusters[0].cluster.server}') 35 | echo "Joining the managed cluster $managed_cluster to ${apiserver} with clusteradm" 36 | token_output=$(${output}/bin/clusteradm --kubeconfig=${controlplane_kubeconfig} get token --use-bootstrap-token) 37 | token=$(echo $token_output | awk -F ' ' '{print $1}' | awk -F '=' '{print $2}') 38 | ${output}/bin/clusteradm --kubeconfig=${kubeconfig} join --hub-token $token --hub-apiserver "${apiserver}" --cluster-name $managed_cluster --wait 39 | if [ 0 -ne $? ]; then 40 | echo "Failed to join managed cluster $managed_cluster" 41 | exit 1 42 | fi 43 | ${output}/bin/clusteradm --kubeconfig=${controlplane_kubeconfig} accept --clusters $managed_cluster 44 | if [ 0 -ne $? ]; then 45 | echo "Failed to accept managed cluster $managed_cluster" 46 | exit 1 47 | fi 48 | ${output}/bin/clusteradm --kubeconfig=${kubeconfig} unjoin --cluster-name=$managed_cluster 49 | if [ 0 -ne $? ]; then 50 | echo "Failed to unjoin managed cluster $managed_cluster" 51 | exit 1 52 | fi 53 | 54 | echo "Stop the controlplane ..." 55 | kill $pid 56 | -------------------------------------------------------------------------------- /test/performance/README.md: -------------------------------------------------------------------------------- 1 | [comment]: # ( Copyright Contributors to the Open Cluster Management project ) 2 | # Performance test 3 | We can use the `perftool` to do the performance test for the multicluster controlplane 4 | 5 | ## Required 6 | 7 | - One Kubernetes cluster, we will deploy the multicluster controlplane on that. 8 | - [Kind](https://kind.sigs.k8s.io/) environment, we will create one kind cluster for performance test. 9 | 10 | ## Run 11 | 12 | 1. export your Kubernetes cluster kubeconfig with `KUBECONFIG`. 13 | 2. run `make test-performance` to start the performance test. 14 | 15 | ## Configuration 16 | 17 | By default, this performance test creates 1000 clusters and generates 5 manifest works for each cluster, you can 18 | configure the `--count` and `--work-count` in the `hack/performance.sh` to modify the cluster and manifestwork conuts. 19 | 20 | By default, we use the below manifestwork as the workload 21 | 22 | ```yaml 23 | apiVersion: work.open-cluster-management.io/v1 24 | kind: ManifestWork 25 | metadata: 26 | name: perftest--work- 27 | spec: 28 | workload: 29 | manifests: 30 | - apiVersion: v1 31 | kind: ConfigMap 32 | metadata: 33 | name: perftest--work- 34 | namespace: default 35 | data: 36 | test-data: "I'm a test configmap" 37 | ``` 38 | 39 | You can customize your own workload by following steps 40 | 41 | 1. Define your manifestworks in a directory and use `perftest.open-cluster-management.io/expected-work-count` annotation 42 | to specify your expected manifestwork counts, e.g. 43 | 44 | ```yaml 45 | apiVersion: work.open-cluster-management.io/v1 46 | kind: ManifestWork 47 | metadata: 48 | name: work-1 49 | annotations: 50 | "perftest.open-cluster-management.io/expected-work-count": "10" 51 | spec: 52 | workload: 53 | manifests: 54 | 55 | --- 56 | 57 | apiVersion: work.open-cluster-management.io/v1 58 | kind: ManifestWork 59 | metadata: 60 | name: work-2 61 | annotations: 62 | "perftest.open-cluster-management.io/expected-work-count": "20" 63 | spec: 64 | workload: 65 | manifests: 66 | ``` 67 | 68 | 2. Use `--work-template-dir` flag to specify your manifestworks directory path, then the `perftool` will generate the 69 | manifest works with your expected work count on each cluster, and the generated work will use a `configmap` to save your 70 | manifestwork to simulate the same size with your customized workloads, for above example, the `perftool` will generate 71 | 10 manifest works for `work-1` and 20 manifest works `work-2` on each cluster, and the generated manifest work will be 72 | 73 | ```yaml 74 | apiVersion: work.open-cluster-management.io/v1 75 | kind: ManifestWork 76 | metadata: 77 | name: perftest--work-work-1- 78 | spec: 79 | workload: 80 | - apiVersion: v1 81 | kind: ConfigMap 82 | metadata: 83 | name: perftest--work-1- 84 | namespace: default 85 | data: 86 | test-data: |- 87 | 88 | 89 | --- 90 | 91 | apiVersion: work.open-cluster-management.io/v1 92 | kind: ManifestWork 93 | metadata: 94 | name: -work-work-2- 95 | spec: 96 | workload: 97 | - apiVersion: v1 98 | kind: ConfigMap 99 | metadata: 100 | name: perftest--work-2- 101 | namespace: default 102 | data: 103 | test-data: |- 104 | 105 | ``` 106 | 107 | ## Test Result 108 | 109 | There is a [doc](https://docs.google.com/spreadsheets/d/11GcIXAxPpQlu35VWnN5sVtqrtkM0EYm3rqj8sTz2Pvs/edit#gid=0) to record our test result. 110 | -------------------------------------------------------------------------------- /test/performance/cluster/cleanup.go: -------------------------------------------------------------------------------- 1 | // Copyright Contributors to the Open Cluster Management project 2 | 3 | package cluster 4 | 5 | import ( 6 | "context" 7 | "fmt" 8 | 9 | "github.com/spf13/pflag" 10 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 11 | "k8s.io/client-go/kubernetes" 12 | "k8s.io/client-go/tools/clientcmd" 13 | 14 | clusterclient "open-cluster-management.io/api/client/cluster/clientset/versioned" 15 | "open-cluster-management.io/multicluster-controlplane/test/performance/utils" 16 | ) 17 | 18 | type clusterCleanupOptions struct { 19 | HubKubeconfig string 20 | 21 | hubKubeClient kubernetes.Interface 22 | hubClusterClient clusterclient.Interface 23 | } 24 | 25 | func NewClusterCleanupOptions() *clusterCleanupOptions { 26 | return &clusterCleanupOptions{} 27 | } 28 | 29 | func (o *clusterCleanupOptions) Complete() error { 30 | if o.HubKubeconfig == "" { 31 | return fmt.Errorf("flag `--controlplane-kubeconfig` is requried") 32 | } 33 | 34 | hubConfig, err := clientcmd.BuildConfigFromFlags("", o.HubKubeconfig) 35 | if err != nil { 36 | return fmt.Errorf("failed to build hub kubeconfig with %s, %v", o.HubKubeconfig, err) 37 | } 38 | 39 | o.hubKubeClient, err = kubernetes.NewForConfig(hubConfig) 40 | if err != nil { 41 | return fmt.Errorf("failed to build hub kube client with %s, %v", o.HubKubeconfig, err) 42 | } 43 | 44 | o.hubClusterClient, err = clusterclient.NewForConfig(hubConfig) 45 | if err != nil { 46 | return fmt.Errorf("failed to build hub cluster client with %s, %v", o.HubKubeconfig, err) 47 | } 48 | 49 | return nil 50 | } 51 | 52 | func (o *clusterCleanupOptions) AddFlags(fs *pflag.FlagSet) { 53 | fs.StringVar(&o.HubKubeconfig, "controlplane-kubeconfig", o.HubKubeconfig, "The kubeconfig of multicluster controlplane") 54 | } 55 | 56 | func (o *clusterCleanupOptions) Run() error { 57 | ctx := context.Background() 58 | clusters, err := o.hubClusterClient.ClusterV1().ManagedClusters().List(ctx, metav1.ListOptions{ 59 | LabelSelector: fmt.Sprintf("%s=true", performanceTestLabel), 60 | }) 61 | if err != nil { 62 | return err 63 | } 64 | 65 | for _, cluster := range clusters.Items { 66 | if err := o.hubClusterClient.ClusterV1().ManagedClusters().Delete(ctx, cluster.Name, metav1.DeleteOptions{}); err != nil { 67 | return err 68 | } 69 | 70 | utils.PrintMsg(fmt.Sprintf("Cluster %q is deleted", cluster.Name)) 71 | 72 | if err := o.hubKubeClient.CoreV1().Namespaces().Delete(ctx, cluster.Name, metav1.DeleteOptions{}); err != nil { 73 | return err 74 | } 75 | 76 | utils.PrintMsg(fmt.Sprintf("Cluster namespace %q is deleted", cluster.Name)) 77 | 78 | } 79 | return nil 80 | } 81 | -------------------------------------------------------------------------------- /test/performance/cmd/cleanup.go: -------------------------------------------------------------------------------- 1 | // Copyright Contributors to the Open Cluster Management project 2 | 3 | package cmd 4 | 5 | import ( 6 | "github.com/spf13/cobra" 7 | 8 | "open-cluster-management.io/multicluster-controlplane/test/performance/cluster" 9 | ) 10 | 11 | func NewCleanupCommand() *cobra.Command { 12 | options := cluster.NewClusterCleanupOptions() 13 | cmd := &cobra.Command{ 14 | Use: "cleanup", 15 | Short: "Cleanup clusters in the controlplane", 16 | RunE: func(cmd *cobra.Command, args []string) error { 17 | if err := options.Complete(); err != nil { 18 | return err 19 | } 20 | return options.Run() 21 | }, 22 | } 23 | options.AddFlags(cmd.Flags()) 24 | return cmd 25 | } 26 | -------------------------------------------------------------------------------- /test/performance/cmd/create.go: -------------------------------------------------------------------------------- 1 | // Copyright Contributors to the Open Cluster Management project 2 | 3 | package cmd 4 | 5 | import ( 6 | "context" 7 | 8 | "github.com/spf13/cobra" 9 | "k8s.io/apiserver/pkg/server" 10 | 11 | "open-cluster-management.io/multicluster-controlplane/test/performance/cluster" 12 | ) 13 | 14 | func NewCreateCommand() *cobra.Command { 15 | options := cluster.NewClusterRunOptions() 16 | cmd := &cobra.Command{ 17 | Use: "create", 18 | Short: "Create clusters in the controlplane", 19 | RunE: func(cmd *cobra.Command, args []string) error { 20 | if err := options.Complete(); err != nil { 21 | return err 22 | } 23 | 24 | if err := options.Validate(); err != nil { 25 | return err 26 | } 27 | 28 | shutdownCtx, cancel := context.WithCancel(context.TODO()) 29 | shutdownHandler := server.SetupSignalHandler() 30 | go func() { 31 | defer cancel() 32 | <-shutdownHandler 33 | }() 34 | 35 | ctx, terminate := context.WithCancel(shutdownCtx) 36 | defer terminate() 37 | 38 | return options.Run(ctx) 39 | }, 40 | } 41 | 42 | options.AddFlags(cmd.Flags()) 43 | return cmd 44 | } 45 | -------------------------------------------------------------------------------- /test/performance/hack/performance.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | REPO_DIR="$(cd "$(dirname ${BASH_SOURCE[0]})/../../.." ; pwd -P)" 4 | 5 | source "${REPO_DIR}/test/bin/util.sh" 6 | 7 | workdir=$REPO_DIR/_output/performance 8 | output_suffix=${PERF_TEST_OUTPUT_SUFFIX:-"$(date '+%Y%m%dT%H%M%S')"} 9 | 10 | mkdir -p $workdir 11 | 12 | if [ -z "$KUBECONFIG" ]; then 13 | echo "KUBECONFIG is required for running controlplane" 14 | exit 1 15 | fi 16 | 17 | echo "##Deploy controlplane on $KUBECONFIG" 18 | rm -f $REPO_DIR/multicluster-controlplane.kubeconfig 19 | rm -f $REPO_DIR/hack/deploy/controlplane/ocmconfig.yaml 20 | 21 | # deploy multicluster-controlplane 22 | kubectl delete ns multicluster-controlplane --ignore-not-found --wait 23 | kubectl create ns multicluster-controlplane 24 | 25 | helm install charts/multicluster-controlplane \ 26 | -n multicluster-controlplane \ 27 | --set route.enabled=true \ 28 | --set enableSelfManagement=false \ 29 | --generate-name 30 | 31 | # wait for multicluster-controlplane ready 32 | wait_command "kubectl -n multicluster-controlplane get secrets multicluster-controlplane-kubeconfig" 33 | hubkubeconfig="${workdir}/multicluster-controlplane.kubeconfig" 34 | kubectl -n multicluster-controlplane get secrets multicluster-controlplane-kubeconfig -ojsonpath='{.data.kubeconfig}' | base64 -d > ${hubkubeconfig} 35 | 36 | wait_command "kubectl --kubeconfig ${hubkubeconfig} get crds" 37 | 38 | # perpare spoke cluster for test 39 | kind delete clusters perf 40 | kind create cluster --name perf --kubeconfig ${workdir}/perf.kubeconfig 41 | 42 | kubectl --kubeconfig ${workdir}/perf.kubeconfig delete namespace open-cluster-management-agent --ignore-not-found 43 | kubectl --kubeconfig ${workdir}/perf.kubeconfig create namespace open-cluster-management-agent 44 | 45 | rm -rf /tmp/performance-test-agent 46 | 47 | perftool=$REPO_DIR/bin/perftool 48 | logfile="${workdir}/perf-tool-${output_suffix}.log" 49 | 50 | $perftool create \ 51 | --kubeconfig=$KUBECONFIG \ 52 | --controlplane-kubeconfig=${hubkubeconfig} \ 53 | --spoke-kubeconfig=${workdir}/perf.kubeconfig \ 54 | --count=1000 \ 55 | --work-count=5 \ 56 | --output-file-suffix=$output_suffix \ 57 | --output-dir=$workdir 2>$logfile 58 | -------------------------------------------------------------------------------- /test/performance/metrics/metrics.go: -------------------------------------------------------------------------------- 1 | // Copyright Contributors to the Open Cluster Management project 2 | package metrics 3 | 4 | import ( 5 | "context" 6 | "fmt" 7 | "strings" 8 | 9 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 10 | "k8s.io/client-go/discovery" 11 | "k8s.io/client-go/tools/clientcmd" 12 | metricsapi "k8s.io/metrics/pkg/apis/metrics" 13 | metricsv1beta1api "k8s.io/metrics/pkg/apis/metrics/v1beta1" 14 | metricsclientset "k8s.io/metrics/pkg/client/clientset/versioned" 15 | 16 | "open-cluster-management.io/multicluster-controlplane/test/performance/utils" 17 | ) 18 | 19 | const labelSelector = "app=multicluster-controlplane" 20 | 21 | var supportedMetricsAPIVersions = []string{ 22 | "v1beta1", 23 | } 24 | 25 | type MetricsRecorder struct { 26 | namespace string 27 | metricsClient metricsclientset.Interface 28 | } 29 | 30 | func BuildMetricsGetter(kubeConfig, namespace string) (*MetricsRecorder, error) { 31 | config, err := clientcmd.BuildConfigFromFlags("", kubeConfig) 32 | if err != nil { 33 | return nil, fmt.Errorf("failed to build kubeconfig with %s, %v", kubeConfig, err) 34 | } 35 | 36 | discoveryClient, err := discovery.NewDiscoveryClientForConfig(config) 37 | if err != nil { 38 | return nil, fmt.Errorf("failed to build discovery client with %s, %v", kubeConfig, err) 39 | } 40 | 41 | apiGroups, err := discoveryClient.ServerGroups() 42 | if err != nil { 43 | return nil, err 44 | } 45 | 46 | if !metricsAPIAvailable(apiGroups) { 47 | return nil, fmt.Errorf("metrics API not available on the %s", kubeConfig) 48 | } 49 | 50 | metricsClient, err := metricsclientset.NewForConfig(config) 51 | if err != nil { 52 | return nil, err 53 | } 54 | 55 | return &MetricsRecorder{ 56 | namespace: namespace, 57 | metricsClient: metricsClient, 58 | }, nil 59 | } 60 | 61 | func (g *MetricsRecorder) Record(ctx context.Context, filename string, clusterCounts int) error { 62 | versionedMetrics, err := g.metricsClient.MetricsV1beta1().PodMetricses(g.namespace).List(ctx, metav1.ListOptions{ 63 | LabelSelector: labelSelector}) 64 | if err != nil { 65 | return fmt.Errorf("failed to list metrics in the namespace %s with %s, %v", g.namespace, labelSelector, err) 66 | } 67 | 68 | metrics := &metricsapi.PodMetricsList{} 69 | err = metricsv1beta1api.Convert_v1beta1_PodMetricsList_To_metrics_PodMetricsList(versionedMetrics, metrics, nil) 70 | if err != nil { 71 | return fmt.Errorf("failed to convert metrics, %v", err) 72 | } 73 | 74 | for _, m := range metrics.Items { 75 | for _, c := range m.Containers { 76 | if c.Name == "POD" { 77 | continue 78 | } 79 | 80 | memory, ok := c.Usage.Memory().AsInt64() 81 | if !ok { 82 | utils.PrintMsg(fmt.Sprintf("container=%s, cpu=%s, memory=unknown", c.Name, c.Usage.Cpu())) 83 | continue 84 | } 85 | 86 | // millicore 87 | cpu := c.Usage.Cpu() 88 | // megabytes 89 | memory = memory / 1024 / 1024 90 | utils.PrintMsg(fmt.Sprintf("container=%s, counts=%d, cpu=%s, memory=%dMi", 91 | c.Name, clusterCounts, cpu, memory)) 92 | if err := utils.AppendRecordToFile(filename, fmt.Sprintf("%d,%s,%d", 93 | clusterCounts, strings.ReplaceAll(cpu.String(), "m", ""), memory)); err != nil { 94 | return fmt.Errorf("failed to dump metrics to file, %v", err) 95 | } 96 | } 97 | } 98 | 99 | return nil 100 | } 101 | 102 | func metricsAPIAvailable(discoveredAPIGroups *metav1.APIGroupList) bool { 103 | for _, discoveredAPIGroup := range discoveredAPIGroups.Groups { 104 | if discoveredAPIGroup.Name != metricsapi.GroupName { 105 | continue 106 | } 107 | for _, version := range discoveredAPIGroup.Versions { 108 | for _, supportedVersion := range supportedMetricsAPIVersions { 109 | if version.Version == supportedVersion { 110 | return true 111 | } 112 | } 113 | } 114 | } 115 | return false 116 | } 117 | -------------------------------------------------------------------------------- /test/performance/perftool.go: -------------------------------------------------------------------------------- 1 | // Copyright Contributors to the Open Cluster Management project 2 | 3 | package main 4 | 5 | import ( 6 | "fmt" 7 | "os" 8 | 9 | "github.com/spf13/cobra" 10 | "k8s.io/klog/v2" 11 | 12 | "open-cluster-management.io/multicluster-controlplane/test/performance/cmd" 13 | ) 14 | 15 | func main() { 16 | rootCmd := &cobra.Command{ 17 | Use: "perftool", 18 | Short: "A preformance test tool for controlplane", 19 | Run: func(cmd *cobra.Command, args []string) { 20 | if err := cmd.Help(); err != nil { 21 | fmt.Fprintf(os.Stdout, "%v\n", err) 22 | } 23 | 24 | os.Exit(1) 25 | }, 26 | } 27 | 28 | klog.InitFlags(nil) 29 | klog.LogToStderr(true) 30 | 31 | rootCmd.AddCommand(cmd.NewCreateCommand()) 32 | rootCmd.AddCommand(cmd.NewCleanupCommand()) 33 | 34 | if err := rootCmd.Execute(); err != nil { 35 | fmt.Fprintln(os.Stdout, err) 36 | os.Exit(1) 37 | } 38 | } 39 | --------------------------------------------------------------------------------