├── .github └── workflows │ ├── build.yml │ ├── build_tag.yml │ ├── certification.yml │ ├── test.yml │ └── upload_release_assets.yml ├── .gitignore ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── Makefile ├── NOTICE ├── README.md ├── VERSION ├── build ├── Dockerfile └── bin │ ├── entrypoint │ └── user_setup ├── bundle ├── generate_bundle.sh ├── kubernetes │ ├── bundle.Dockerfile │ ├── manifests │ │ ├── configmap.yaml │ │ ├── lb-secret.yaml │ │ ├── nsx-container-plugin-operator.clusterserviceversion.yaml │ │ ├── nsx-secret.yaml │ │ └── operator.nsx.vmware.com_ncpinstalls_crd.yaml │ ├── metadata │ │ └── annotations.yaml │ └── tests │ │ └── scorecard │ │ └── config.yaml └── openshift4 │ ├── bundle.Dockerfile │ ├── manifests │ ├── configmap.yaml │ ├── lb-secret.yaml │ ├── nsx-container-plugin-operator.clusterserviceversion.yaml │ ├── nsx-secret.yaml │ └── operator.nsx.vmware.com_ncpinstalls_crd.yaml │ ├── metadata │ └── annotations.yaml │ └── tests │ └── scorecard │ └── config.yaml ├── cmd └── manager │ └── main.go ├── deploy ├── kubernetes │ ├── configmap.yaml │ ├── lb-secret.yaml │ ├── namespace.yaml │ ├── nsx-secret.yaml │ ├── operator.nsx.vmware.com_ncpinstalls_crd.yaml │ ├── operator.nsx.vmware.com_v1_ncpinstall_cr.yaml │ ├── operator.yaml │ ├── role.yaml │ ├── role_binding.yaml │ └── service_account.yaml └── openshift4 │ ├── configmap.yaml │ ├── lb-secret.yaml │ ├── namespace.yaml │ ├── nsx-secret.yaml │ ├── operator.nsx.vmware.com_ncpinstalls_crd.yaml │ ├── operator.nsx.vmware.com_v1_ncpinstall_cr.yaml │ ├── operator.yaml │ ├── role.yaml │ ├── role_binding.yaml │ └── service_account.yaml ├── go.mod ├── go.sum ├── hack ├── certify-operator-ocp.sh ├── get-kustomize.sh └── prepare-assets.sh ├── manifest ├── kubernetes │ ├── rhel │ │ └── ncp-rhel.yaml │ └── ubuntu │ │ └── ncp-ubuntu.yaml └── openshift4 │ └── coreos │ └── ncp-openshift4.yaml ├── olm-catalog ├── 0.1.0 │ ├── nsx-container-plugin-operator.v0.1.0.clusterserviceversion.yaml │ └── operator.nsx.vmware.com_ncpinstalls_crd.yaml ├── 0.2.0 │ └── nsx-container-plugin-operator.v0.2.0.clusterserviceversion.yaml ├── make_zip_bundle.py └── nsx-container-plugin-operator.package.yaml ├── pkg ├── apis │ ├── addtoscheme_operator_v1.go │ ├── apis.go │ └── operator │ │ ├── group.go │ │ └── v1 │ │ ├── doc.go │ │ ├── ncpinstall_types.go │ │ ├── register.go │ │ └── zz_generated.deepcopy.go ├── controller │ ├── add_configmap.go │ ├── add_node.go │ ├── add_pod.go │ ├── configmap │ │ ├── config.go │ │ ├── config_test.go │ │ ├── configmap_controller.go │ │ └── configmap_controller_test.go │ ├── controller.go │ ├── node │ │ └── node_controller.go │ ├── pod │ │ ├── pod_controller.go │ │ └── pod_controller_test.go │ ├── sharedinfo │ │ └── shared_info.go │ └── statusmanager │ │ ├── node_status.go │ │ ├── pod_status.go │ │ ├── status_manager.go │ │ ├── status_manager_test.go │ │ └── test_utils.go └── types │ ├── defaults.go │ ├── names.go │ ├── utils.go │ └── utils_test.go ├── tools.go ├── version └── version.go └── versioning.mk /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: Build image 2 | 3 | on: 4 | pull_request: 5 | branches: 6 | - main 7 | push: 8 | branches: 9 | - main 10 | 11 | jobs: 12 | build-image: 13 | runs-on: [ubuntu-latest] 14 | steps: 15 | - name: Set up Go 1.23 16 | uses: actions/setup-go@v4 17 | with: 18 | go-version: 1.23.1 19 | cache: false 20 | - name: Check-out code 21 | uses: actions/checkout@v4 22 | - name: Build image 23 | run: make 24 | - name: Push images to registry 25 | if: ${{ github.repository == 'vmware/nsx-container-plugin-operator' && github.event_name == 'push' && github.ref == 'refs/heads/main' }} 26 | env: 27 | DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} 28 | DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} 29 | run: | 30 | echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin 31 | docker push vmware/nsx-container-plugin-operator:latest 32 | -------------------------------------------------------------------------------- /.github/workflows/build_tag.yml: -------------------------------------------------------------------------------- 1 | name: Build and push a release image 2 | 3 | on: 4 | push: 5 | tags: 6 | - v* 7 | 8 | jobs: 9 | build: 10 | runs-on: [ubuntu-latest] 11 | steps: 12 | - name: Set up Go 1.23 13 | uses: actions/setup-go@v4 14 | with: 15 | go-version: 1.23.1 16 | cache: false 17 | - uses: actions/checkout@v4 18 | - name: Build Operator image and push to registry 19 | env: 20 | DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} 21 | DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} 22 | TAG: ${{ github.ref }} 23 | run: | 24 | VERSION="${TAG:10}" make 25 | echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin 26 | docker push vmware/nsx-container-plugin-operator:"${TAG:10}" 27 | -------------------------------------------------------------------------------- /.github/workflows/certification.yml: -------------------------------------------------------------------------------- 1 | name: Certify container with OpenShift 2 | 3 | on: 4 | workflow_dispatch: 5 | inputs: 6 | version_tag: 7 | description: 'Version tag' 8 | required: true 9 | default: 'latest' 10 | 11 | jobs: 12 | validate_image: 13 | runs-on: [ubuntu-latest] 14 | steps: 15 | - uses: actions/checkout@v4 16 | - name: Run container certification 17 | env: 18 | VERSION: ${{ github.event.inputs.version_tag }} 19 | REGISTRY_LOGIN_USERNAME: ${{ secrets.REGISTRY_LOGIN_USERNAME }} 20 | REGISTRY_LOGIN_PASSWORD: ${{ secrets.REGISTRY_LOGIN_PASSWORD }} 21 | PFLT_PYXIS_API_TOKEN: ${{ secrets.PFLT_PYXIS_API_TOKEN }} 22 | PFLT_CERTIFICATION_PROJECT_ID: ${{ secrets.PFLT_CERTIFICATION_PROJECT_ID }} 23 | run: | 24 | ./hack/certify-operator-ocp.sh 25 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: Test 2 | 3 | on: 4 | pull_request: 5 | branches: 6 | - master 7 | 8 | jobs: 9 | test-unit: 10 | name: Unit test 11 | runs-on: [ubuntu-latest] 12 | steps: 13 | - name: Set up Go 1.23 14 | uses: actions/setup-go@v4 15 | with: 16 | go-version: 1.23.1 17 | cache: false 18 | - name: Check-out code 19 | uses: actions/checkout@v4 20 | - name: Run unit tests 21 | run: make test-unit 22 | -------------------------------------------------------------------------------- /.github/workflows/upload_release_assets.yml: -------------------------------------------------------------------------------- 1 | name: Upload assets to release 2 | 3 | on: 4 | release: 5 | types: 6 | - created 7 | 8 | jobs: 9 | build: 10 | runs-on: [ubuntu-latest] 11 | steps: 12 | - uses: actions/checkout@v4 13 | - name: Build assets 14 | env: 15 | TAG: ${{ github.ref }} 16 | run: | 17 | mkdir assets 18 | VERSION="${TAG:10}" ./hack/prepare-assets.sh ./assets 19 | - name: Upload openshift4 tarball 20 | uses: actions/upload-release-asset@v1 21 | env: 22 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 23 | with: 24 | upload_url: ${{ github.event.release.upload_url }} 25 | asset_path: ./assets/openshift4.tar.gz 26 | asset_name: openshift4.tar.gz 27 | asset_content_type: application/octet-stream 28 | - name: Upload kubernetes tarball 29 | uses: actions/upload-release-asset@v1 30 | env: 31 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 32 | with: 33 | upload_url: ${{ github.event.release.upload_url }} 34 | asset_path: ./assets/kubernetes.tar.gz 35 | asset_name: kubernetes.tar.gz 36 | asset_content_type: application/octet-stream 37 | 38 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea/ 2 | build/bin/nsx-ncp-operator 3 | 4 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | # Contributor Covenant Code of Conduct 4 | 5 | ## Our Pledge 6 | 7 | In the interest of fostering an open and welcoming environment, we as 8 | contributors and maintainers pledge to making participation in nsx-container-plugin-operator project and 9 | our community a harassment-free experience for everyone, regardless of age, body 10 | size, disability, ethnicity, sex characteristics, gender identity and expression, 11 | level of experience, education, socio-economic status, nationality, personal 12 | appearance, race, religion, or sexual identity and orientation. 13 | 14 | ## Our Standards 15 | 16 | Examples of behavior that contributes to creating a positive environment 17 | include: 18 | 19 | * Using welcoming and inclusive language 20 | * Being respectful of differing viewpoints and experiences 21 | * Gracefully accepting constructive criticism 22 | * Focusing on what is best for the community 23 | * Showing empathy towards other community members 24 | 25 | Examples of unacceptable behavior by participants include: 26 | 27 | * The use of sexualized language or imagery and unwelcome sexual attention or 28 | advances 29 | * Trolling, insulting/derogatory comments, and personal or political attacks 30 | * Public or private harassment 31 | * Publishing others' private information, such as a physical or electronic 32 | address, without explicit permission 33 | * Other conduct which could reasonably be considered inappropriate in a 34 | professional setting 35 | 36 | ## Our Responsibilities 37 | 38 | Project maintainers are responsible for clarifying the standards of acceptable 39 | behavior and are expected to take appropriate and fair corrective action in 40 | response to any instances of unacceptable behavior. 41 | 42 | Project maintainers have the right and responsibility to remove, edit, or 43 | reject comments, commits, code, wiki edits, issues, and other contributions 44 | that are not aligned to this Code of Conduct, or to ban temporarily or 45 | permanently any contributor for other behaviors that they deem inappropriate, 46 | threatening, offensive, or harmful. 47 | 48 | ## Scope 49 | 50 | This Code of Conduct applies both within project spaces and in public spaces 51 | when an individual is representing the project or its community. Examples of 52 | representing a project or community include using an official project e-mail 53 | address, posting via an official social media account, or acting as an appointed 54 | representative at an online or offline event. Representation of a project may be 55 | further defined and clarified by project maintainers. 56 | 57 | ## Enforcement 58 | 59 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 60 | reported by contacting the project team at oss-coc@vmware.com. All 61 | complaints will be reviewed and investigated and will result in a response that 62 | is deemed necessary and appropriate to the circumstances. The project team is 63 | obligated to maintain confidentiality with regard to the reporter of an incident. 64 | Further details of specific enforcement policies may be posted separately. 65 | 66 | Project maintainers who do not follow or enforce the Code of Conduct in good 67 | faith may face temporary or permanent repercussions as determined by other 68 | members of the project's leadership. 69 | 70 | ## Attribution 71 | 72 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 73 | available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html 74 | 75 | [homepage]: https://www.contributor-covenant.org 76 | 77 | For answers to common questions about this code of conduct, see 78 | https://www.contributor-covenant.org/faq 79 | 80 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | # Contributing to nsx-container-plugin-operator 4 | 5 | The nsx-container-plugin-operator project team welcomes contributions from the community. If you wish to contribute code and you have not signed our contributor license agreement (CLA), our bot will update the issue when you open a Pull Request. For any questions about the CLA process, please refer to our [FAQ](https://cla.vmware.com/faq). 6 | 7 | ## Contribution Flow 8 | 9 | This is a rough outline of what a contributor's workflow looks like: 10 | 11 | - Create a topic branch from where you want to base your work 12 | - Make commits of logical units 13 | - Make sure your commit messages are in the proper format (see below) 14 | - Push your changes to a topic branch in your fork of the repository 15 | - Submit a pull request 16 | 17 | Example: 18 | 19 | ``` shell 20 | git remote add upstream https://github.com/vmware/nsx-container-plugin-operator.git 21 | git checkout -b my-new-feature main 22 | git commit -a 23 | git push origin my-new-feature 24 | ``` 25 | 26 | ### Staying In Sync With Upstream 27 | 28 | When your branch gets out of sync with the vmware/main branch, use the following to update: 29 | 30 | ``` shell 31 | git checkout my-new-feature 32 | git fetch -a 33 | git pull --rebase upstream main 34 | git push --force-with-lease origin my-new-feature 35 | ``` 36 | 37 | ### Updating pull requests 38 | 39 | If your PR fails to pass CI or needs changes based on code review, you'll most likely want to squash these changes into 40 | existing commits. 41 | 42 | If your pull request contains a single commit or your changes are related to the most recent commit, you can simply 43 | amend the commit. 44 | 45 | ``` shell 46 | git add . 47 | git commit --amend 48 | git push --force-with-lease origin my-new-feature 49 | ``` 50 | 51 | If you need to squash changes into an earlier commit, you can use: 52 | 53 | ``` shell 54 | git add . 55 | git commit --fixup 56 | git rebase -i --autosquash main 57 | git push --force-with-lease origin my-new-feature 58 | ``` 59 | 60 | Be sure to add a comment to the PR indicating your new changes are ready to review, as GitHub does not generate a 61 | notification when you git push. 62 | 63 | ### Code Style 64 | 65 | ### Formatting Commit Messages 66 | 67 | We follow the conventions on [How to Write a Git Commit Message](http://chris.beams.io/posts/git-commit/). 68 | 69 | Be sure to include any related GitHub issue references in the commit message. See 70 | [GFM syntax](https://guides.github.com/features/mastering-markdown/#GitHub-flavored-markdown) for referencing issues 71 | and commits. 72 | 73 | ## Reporting Bugs and Creating Issues 74 | 75 | When opening a new issue, try to roughly follow the commit message format conventions above. 76 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | nsx-container-plugin-operator 2 | 3 | Copyright (c) 2021 VMware, Inc. All rights reserved. 4 | 5 | The Apache 2.0 license (the "License") set forth below applies to all parts of the nsx-container-plugin-operator project. You may not use this file except in compliance with the License. 6 | 7 | Apache License 8 | 9 | Version 2.0, January 2004 10 | http://www.apache.org/licenses/ 11 | 12 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 13 | 14 | 1. Definitions. 15 | 16 | "License" shall mean the terms and conditions for use, reproduction, 17 | and distribution as defined by Sections 1 through 9 of this document. 18 | 19 | "Licensor" shall mean the copyright owner or entity authorized by the 20 | copyright owner that is granting the License. 21 | 22 | "Legal Entity" shall mean the union of the acting entity and all other 23 | entities that control, are controlled by, or are under common control 24 | with that entity. For the purposes of this definition, "control" means 25 | (i) the power, direct or indirect, to cause the direction or management 26 | of such entity, whether by contract or otherwise, or (ii) ownership 27 | of fifty percent (50%) or more of the outstanding shares, or (iii) 28 | beneficial ownership of such entity. 29 | 30 | "You" (or "Your") shall mean an individual or Legal Entity exercising 31 | permissions granted by this License. 32 | 33 | "Source" form shall mean the preferred form for making modifications, 34 | including but not limited to software source code, documentation source, 35 | and configuration files. 36 | 37 | "Object" form shall mean any form resulting from mechanical transformation 38 | or translation of a Source form, including but not limited to compiled 39 | object code, generated documentation, and conversions to other media 40 | types. 41 | 42 | "Work" shall mean the work of authorship, whether in Source or 43 | Object form, made available under the License, as indicated by a copyright 44 | notice that is included in or attached to the work (an example is provided 45 | in the Appendix below). 46 | 47 | "Derivative Works" shall mean any work, whether in Source or Object form, 48 | that is based on (or derived from) the Work and for which the editorial 49 | revisions, annotations, elaborations, or other modifications represent, 50 | as a whole, an original work of authorship. For the purposes of this 51 | License, Derivative Works shall not include works that remain separable 52 | from, or merely link (or bind by name) to the interfaces of, the Work 53 | and Derivative Works thereof. 54 | 55 | "Contribution" shall mean any work of authorship, including the 56 | original version of the Work and any modifications or additions to 57 | that Work or Derivative Works thereof, that is intentionally submitted 58 | to Licensor for inclusion in the Work by the copyright owner or by an 59 | individual or Legal Entity authorized to submit on behalf of the copyright 60 | owner. For the purposes of this definition, "submitted" means any form of 61 | electronic, verbal, or written communication sent to the Licensor or its 62 | representatives, including but not limited to communication on electronic 63 | mailing lists, source code control systems, and issue tracking systems 64 | that are managed by, or on behalf of, the Licensor for the purpose of 65 | discussing and improving the Work, but excluding communication that is 66 | conspicuously marked or otherwise designated in writing by the copyright 67 | owner as "Not a Contribution." 68 | 69 | "Contributor" shall mean Licensor and any individual or Legal Entity 70 | on behalf of whom a Contribution has been received by Licensor and 71 | subsequently incorporated within the Work. 72 | 73 | 2. Grant of Copyright License. 74 | Subject to the terms and conditions of this License, each Contributor 75 | hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, 76 | royalty-free, irrevocable copyright license to reproduce, prepare 77 | Derivative Works of, publicly display, publicly perform, sublicense, and 78 | distribute the Work and such Derivative Works in Source or Object form. 79 | 80 | 3. Grant of Patent License. 81 | Subject to the terms and conditions of this License, each Contributor 82 | hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, 83 | royalty- free, irrevocable (except as stated in this section) patent 84 | license to make, have made, use, offer to sell, sell, import, and 85 | otherwise transfer the Work, where such license applies only to those 86 | patent claims licensable by such Contributor that are necessarily 87 | infringed by their Contribution(s) alone or by combination of 88 | their Contribution(s) with the Work to which such Contribution(s) 89 | was submitted. If You institute patent litigation against any entity 90 | (including a cross-claim or counterclaim in a lawsuit) alleging that the 91 | Work or a Contribution incorporated within the Work constitutes direct 92 | or contributory patent infringement, then any patent licenses granted 93 | to You under this License for that Work shall terminate as of the date 94 | such litigation is filed. 95 | 96 | 4. Redistribution. 97 | You may reproduce and distribute copies of the Work or Derivative Works 98 | thereof in any medium, with or without modifications, and in Source or 99 | Object form, provided that You meet the following conditions: 100 | 101 | a. You must give any other recipients of the Work or Derivative Works 102 | a copy of this License; and 103 | 104 | b. You must cause any modified files to carry prominent notices stating 105 | that You changed the files; and 106 | 107 | c. You must retain, in the Source form of any Derivative Works that 108 | You distribute, all copyright, patent, trademark, and attribution 109 | notices from the Source form of the Work, excluding those notices 110 | that do not pertain to any part of the Derivative Works; and 111 | 112 | d. If the Work includes a "NOTICE" text file as part of its 113 | distribution, then any Derivative Works that You distribute must 114 | include a readable copy of the attribution notices contained 115 | within such NOTICE file, excluding those notices that do not 116 | pertain to any part of the Derivative Works, in at least one of 117 | the following places: within a NOTICE text file distributed as part 118 | of the Derivative Works; within the Source form or documentation, 119 | if provided along with the Derivative Works; or, within a display 120 | generated by the Derivative Works, if and wherever such third-party 121 | notices normally appear. The contents of the NOTICE file are for 122 | informational purposes only and do not modify the License. You 123 | may add Your own attribution notices within Derivative Works that 124 | You distribute, alongside or as an addendum to the NOTICE text 125 | from the Work, provided that such additional attribution notices 126 | cannot be construed as modifying the License. You may add Your own 127 | copyright statement to Your modifications and may provide additional 128 | or different license terms and conditions for use, reproduction, or 129 | distribution of Your modifications, or for any such Derivative Works 130 | as a whole, provided Your use, reproduction, and distribution of the 131 | Work otherwise complies with the conditions stated in this License. 132 | 133 | 5. Submission of Contributions. 134 | Unless You explicitly state otherwise, any Contribution intentionally 135 | submitted for inclusion in the Work by You to the Licensor shall be 136 | under the terms and conditions of this License, without any additional 137 | terms or conditions. Notwithstanding the above, nothing herein shall 138 | supersede or modify the terms of any separate license agreement you may 139 | have executed with Licensor regarding such Contributions. 140 | 141 | 6. Trademarks. 142 | This License does not grant permission to use the trade names, trademarks, 143 | service marks, or product names of the Licensor, except as required for 144 | reasonable and customary use in describing the origin of the Work and 145 | reproducing the content of the NOTICE file. 146 | 147 | 7. Disclaimer of Warranty. 148 | Unless required by applicable law or agreed to in writing, Licensor 149 | provides the Work (and each Contributor provides its Contributions) on 150 | an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either 151 | express or implied, including, without limitation, any warranties or 152 | conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR 153 | A PARTICULAR PURPOSE. You are solely responsible for determining the 154 | appropriateness of using or redistributing the Work and assume any risks 155 | associated with Your exercise of permissions under this License. 156 | 157 | 8. Limitation of Liability. 158 | In no event and under no legal theory, whether in tort (including 159 | negligence), contract, or otherwise, unless required by applicable law 160 | (such as deliberate and grossly negligent acts) or agreed to in writing, 161 | shall any Contributor be liable to You for damages, including any direct, 162 | indirect, special, incidental, or consequential damages of any character 163 | arising as a result of this License or out of the use or inability to 164 | use the Work (including but not limited to damages for loss of goodwill, 165 | work stoppage, computer failure or malfunction, or any and all other 166 | commercial damages or losses), even if such Contributor has been advised 167 | of the possibility of such damages. 168 | 169 | 9. Accepting Warranty or Additional Liability. 170 | While redistributing the Work or Derivative Works thereof, You may 171 | choose to offer, and charge a fee for, acceptance of support, warranty, 172 | indemnity, or other liability obligations and/or rights consistent with 173 | this License. However, in accepting such obligations, You may act only 174 | on Your own behalf and on Your sole responsibility, not on behalf of 175 | any other Contributor, and only if You agree to indemnify, defend, and 176 | hold each Contributor harmless for any liability incurred by, or claims 177 | asserted against, such Contributor by reason of your accepting any such 178 | warranty or additional liability. 179 | 180 | END OF TERMS AND CONDITIONS 181 | 182 | 183 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # go options 2 | GO ?= go 3 | LDFLAGS := 4 | GOFLAGS := 5 | BINDIR ?= $(CURDIR)/build/bin 6 | GO_FILES := $(shell find . -type d -name '.cache' -prune -o -type f -name '*.go' -print) 7 | GOPATH ?= $$($(GO) env GOPATH) 8 | 9 | .PHONY: all 10 | all: build 11 | 12 | include versioning.mk 13 | 14 | LDFLAGS += $(VERSION_LDFLAGS) 15 | OPERATOR_NAME = nsx-ncp-operator 16 | OPERATOR_IMG_NAME = vmware/nsx-container-plugin-operator 17 | 18 | .PHONY: build 19 | build: 20 | CGO_ENABLED=0 GOOS=linux $(GO) build -o $(BINDIR)/$(OPERATOR_NAME) $(GOFLAGS) -ldflags '$(LDFLAGS)' ./cmd/manager 21 | docker build -f build/Dockerfile . -t $(OPERATOR_IMG_NAME):$(DOCKER_IMG_VERSION) 22 | docker tag $(OPERATOR_IMG_NAME):$(DOCKER_IMG_VERSION) $(OPERATOR_IMG_NAME) 23 | 24 | .PHONY: bin 25 | bin: 26 | GOOS=linux $(GO) build -o $(BINDIR)/$(OPERATOR_NAME) $(GOFLAGS) -ldflags '$(LDFLAGS)' ./cmd/manager 27 | 28 | .PHONY: bundle 29 | bundle: 30 | ./bundle/generate_bundle.sh --bundle-repo $(BUNDLE_REPO) --bundle-image $(BUNDLE_IMG_NAME) --bundle-version $(BUNDLE_VERSION) 31 | 32 | .PHONY: test-unit 33 | test-unit: 34 | GOOS=linux $(GO) test -race -cover github.com/vmware/nsx-container-plugin-operator/pkg... 35 | 36 | .PHONY: clean 37 | clean: 38 | rm -f $(BINDIR)/$(OPERATOR_NAME) 39 | -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | nsx-container-plugin-operator 2 | 3 | Copyright (c) 2021 VMware, Inc. All Rights Reserved. 4 | 5 | This product is licensed to you under the Apache 2.0 license (the "License"). You may not use this product except in compliance with the Apache 2.0 License. 6 | 7 | This product may include a number of subcomponents with separate copyright notices and license terms. Your use of these subcomponents is subject to the terms and conditions of the subcomponent's license, as noted in the LICENSE file. 8 | 9 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | - [NSX Container Plugin Operator](#nsx-container-plugin-operator) 2 | - [Overview](#overview) 3 | - [Try it out](#try-it-out) 4 | - [Preparing the operator image](#preparing-the-operator-image) 5 | - [Installing](#installing) 6 | - [Kubernetes](#kubernetes) 7 | - [Openshift](#openshift) 8 | - [Installing a cluster with user-provisioned infrastructure](#installing-a-cluster-with-user-provisioned-infrastructure) 9 | - [Installing a cluster with installer-provisioned infrastructure](#installing-a-cluster-with-installer-provisioned-infrastructure) 10 | - [Upgrade](#upgrade) 11 | - [Documentation](#documentation) 12 | - [Cluster network config (Openshift specific)](#cluster-network-config-openshift-specific) 13 | - [Operator ConfigMap](#operator-configmap) 14 | - [Kubernetes](#kubernetes-1) 15 | - [OpenShift](#openshift-1) 16 | - [NCP Image](#ncp-image) 17 | - [Unsafe changes](#unsafe-changes) 18 | - [Contributing](#contributing) 19 | - [License](#license) 20 | # NSX Container Plugin Operator 21 | 22 | [![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) 23 | 24 | ## Overview 25 | 26 | An operator for leveraging NSX as the default container networking solution for an 27 | Kubernetes/Openshift cluster. The operator will be deployed in the early phases of 28 | Openshift cluster deployment or after the kubectl is ready in Kubernetes cluster, 29 | and it will take care of deploying NSX integration components, and precisely: 30 | 31 | * The NSX container plugin (NCP) deployment 32 | * The nsx-ncp-bootstrap daemonset 33 | * The nsx-node-agent daemonset 34 | 35 | The nsx-container-plugin operator monitors a dedicated ConfigMap, applies changes 36 | to NCP and nsx-node-agent configuration, and creates/restarts the relevant pods 37 | so that the relevant configuration changes are picked up. 38 | 39 | The nsx-container-plugin operator also monitors the nsx-node-agent status and 40 | updates the network status on relevant nodes. 41 | 42 | In addition, the nsx-container-plugin operator is able to monitor nodes ensuring 43 | the corresponding NSX logical port is enabled as a container host logical port. 44 | 45 | For Openshift 4 clusters, the nsx-container-plugin operator especially monitors 46 | the `network.config.openshift.io` CR to update the container network CIDRs used by NCP. 47 | 48 | ## Try it out 49 | 50 | ### Preparing the operator image 51 | 52 | Pull the packed image for docker: 53 | ``` 54 | docker pull vmware/nsx-container-plugin-operator:latest 55 | ``` 56 | 57 | For containerd: 58 | ``` 59 | ctr image pull docker.io/vmware/nsx-container-plugin-operator:latest 60 | ``` 61 | 62 | Building the nsx-container-plugin operator is very simple. From the project root 63 | directory simply type the following command, which based on docker build tool. 64 | 65 | ``` 66 | make all 67 | ``` 68 | 69 | At the moment the nsx-container-plugin operator only works on native Kubernetes 70 | or Openshift 4 environments 71 | 72 | ### Installing 73 | 74 | #### Kubernetes 75 | 76 | Edit the operator yaml files in `deploy/kubernetes` then apply them. 77 | 78 | #### Openshift 79 | 80 | ##### Installing a cluster with user-provisioned infrastructure 81 | 82 | 1. Preparing install-config.yaml 83 | Generate install-config.yaml by using openshift-install command. 84 | ``` 85 | $ openshift-install --dir=$MY_CLUSTER create install-config 86 | ``` 87 | 88 | Edit `$MY_CLUSTER/install-config.yaml` to update networking section. 89 | Change `networkType` to `ncp`(case insensitive). 90 | Set container network CIDRs `clusterNetwork` in `$MY_CLUSTER/install-config.yaml`. 91 | 92 | 2. Creating manifest files: 93 | ``` 94 | $ openshift-install --dir=$MY_CLUSTER create manifests 95 | ``` 96 | 97 | If one cluster node has multiple VirtualNetworkInterfaces, the operator cannot 98 | detect which interface should be enabled as the containers' parent interface, 99 | so the user should edit `deploy/openshift4/operator.nsx.vmware.com_v1_ncpinstall_cr.yaml` 100 | to set `addNodeTag: false` and manually tag the target node port by 101 | `scope=ncp/node_name, tag=` and `scope=ncp/node_name, tag=` 102 | on NSX-T. 103 | 104 | Put operator yaml files from `deploy/openshift4/` to `$MY_CLUSTER/manifests`, 105 | edit configmap.yaml about operator configurations, add the operator image and 106 | NCP image in operator.yaml. 107 | 108 | 3. Generating ignition configuration files: 109 | ``` 110 | $ openshift-install --dir=$MY_CLUSTER create ignition-configs 111 | ``` 112 | This bootstrap ignition file will be added to the terraform tfvars. 113 | Then use terraform to install Openshift 4 cluster on vSphere. 114 | 115 | ##### Installing a cluster with installer-provisioned infrastructure 116 | 117 | 1. Prepare install-config.yaml 118 | This step is similar to UPI installation. An example of install-config.yaml: 119 | 120 | ``` 121 | apiVersion: v1 122 | baseDomain: openshift.test 123 | compute: 124 | - architecture: amd64 125 | hyperthreading: Enabled 126 | name: worker 127 | platform: {} 128 | replicas: 3 129 | controlPlane: 130 | architecture: amd64 131 | hyperthreading: Enabled 132 | name: master 133 | platform: {} 134 | replicas: 3 135 | metadata: 136 | creationTimestamp: null 137 | name: ipi 138 | networking: 139 | networkType: ncp 140 | clusterNetwork: 141 | - cidr: 10.0.0.0/14 142 | hostPrefix: 24 143 | machineCIDR: 192.168.10.0/24 144 | serviceNetwork: 145 | - 172.8.0.0/16 146 | platform: 147 | vsphere: 148 | apiVIP: 192.168.10.11 149 | cluster: cluster 150 | datacenter: dc 151 | defaultDatastore: vsanDatastore 152 | ingressVIP: 192.168.10.12 153 | network: openshift-segment 154 | password: pass 155 | username: user 156 | vCenter: my-vc.local 157 | publish: External 158 | pullSecret: 'xxx' 159 | sshKey: 'ssh-rsa xxx' 160 | ``` 161 | 162 | You can validate your DNS configuration 163 | before installing OpenShift Container Platform on IPI. A sample DNS zone database 164 | as follow: 165 | 166 | ``` 167 | $TTL 604800 168 | 169 | $ORIGIN openshift.test. 170 | @ IN SOA dns1.openshift.test. root.openshift.test. ( 171 | 2 ; Serial 172 | 604800 ; Refresh 173 | 86400 ; Retry 174 | 2419200 ; Expire 175 | 604800 ) ; Negative Cache TTL 176 | ; main domain name servers 177 | @ IN NS localhost. 178 | @ IN A 127.0.0.1 179 | @ IN AAAA ::1 180 | IN NS dns1.openshift.test. 181 | 182 | ; recors for name servers above 183 | dns1 IN A 10.92.204.129 184 | 185 | ; sub-domain definitions 186 | $ORIGIN ipi.openshift.test. 187 | api IN A 192.168.10.11 188 | apps IN A 192.168.10.12 189 | 190 | ; sub-domain definitions 191 | $ORIGIN apps.ipi.openshift.test. 192 | * IN A 192.168.10.12 193 | ``` 194 | 195 | 2. Preparing manifest files: 196 | 197 | Put operator yaml files from `deploy/openshift4/` to `$MY_CLUSTER/manifests`, 198 | edit configmap.yaml about operator configurations, add the operator image and 199 | NCP image in operator.yaml. 200 | 201 | 3. Creating cluster 202 | ``` 203 | $ openshift-install create cluster --dir=$MY_CLUSTER 204 | ``` 205 | 206 | The installation log locates in $MY_CLUSTER/.openshift_install.log. 207 | If the deployment ends in timeout or failure, you can check the environment 208 | according to the log, then Re-run Installer to continue to get the installation 209 | log: 210 | 211 | ``` 212 | $ openshift-install wait-for install-complete 213 | ``` 214 | 215 | ### Upgrade 216 | 217 | For upgrading, all yaml files in `deploy/${platform}/` should be involved, 218 | especially to check the `image` and `NCP_IMAGE` in `deploy/${platform}/operator.yaml 219 | 220 | 221 | ## Documentation 222 | 223 | ### Cluster network config (Openshift specific) 224 | Cluster network config is initially set in install-config.yaml, user could apply 225 | `network.config.openshift.io` CRD to update `clusterNetwork` in `manifests/cluster-network-02-config.yml`. 226 | *Example configurations* 227 | ``` 228 | apiVersion: config.openshift.io/v1 229 | kind: Network 230 | metadata: 231 | name: cluster 232 | spec: 233 | clusterNetwork: 234 | - cidr: 10.10.0.0/14 235 | networkType: ncp 236 | ``` 237 | 238 | ### Operator ConfigMap 239 | 240 | Operator ConfigMap `nsx-ncp-operator-config` is used to provide NCP configurations. 241 | As for now we only support NSX Policy API, single Tier topology on Openshift 4, 242 | single or two Tiers topology on native Kubernetes. 243 | 244 | #### Kubernetes 245 | 246 | Some fields are mandatory including `cluster`, `nsx_api_managers`, 247 | `container_ip_blocks`, `tier0_gateway`(for single T1 case), `top_tier_router` 248 | (for single T0 case), `external_ip_pools`(for SNAT mode).. If any of above 249 | options is not provided in the operator ConfigMap, the operator will fail to 250 | reconcile configurations, error message swill be added in ncpinstall nsx-ncp 251 | Degraded conditions 252 | 253 | #### OpenShift 254 | 255 | The operator sets `policy_nsxapi` as True, `single_tier_topology` as True. 256 | In the ConfigMap, some fields are mandatory including `cluster`, `nsx_api_managers`, 257 | `tier0_gateway`(for single T1 case), `top_tier_router`(for single T0 case), 258 | `external_ip_pools`(for SNAT mode). If any of above options is not provided in the 259 | operator ConfigMap, the operator will fail to reconcile configurations, error messages 260 | will be added in clusteroperator nsx-ncp Degraded conditions. 261 | 262 | ### NCP Image 263 | User needs to set NCP image as an environment parameter `NCP_IMAGE` in `deploy/${platform}/operator.yaml`. 264 | 265 | ### Unsafe changes 266 | * (Openshift specific) If CIDRs in `clusterNetwork` are already applied, it is 267 | unsafe to remove them. NSX NCP operator won't fail when it detects some existing 268 | network CIDRs are deleted, but the removal may cause unexpected issues. 269 | * NSX NCP operator uses tags to mark the container host logical ports, deleting these tags 270 | from NSX manager will cause network realization failure on corresponding nodes. 271 | 272 | ## Contributing 273 | 274 | We welcome community contributions to the NSX Container plugin Operator! 275 | 276 | Before you start working with nsx-container-plugin-operator, you should sign our 277 | contributor license agreement (CLA). 278 | 279 | If you wish to contribute code and you have not signed our CLA, our bot will update 280 | the issue when you open a Pull Request. 281 | For more detailed information, refer to [CONTRIBUTING.md](CONTRIBUTING.md). 282 | 283 | For any questions about the CLA process, please refer to our [FAQ](https://cla.vmware.com/faq). 284 | 285 | ## License 286 | 287 | This repository is available under the [Apache 2.0 license](LICENSE). 288 | -------------------------------------------------------------------------------- /VERSION: -------------------------------------------------------------------------------- 1 | 9.0.0 2 | -------------------------------------------------------------------------------- /build/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.21.5 as golang-build 2 | FROM registry.access.redhat.com/ubi8/ubi:latest 3 | 4 | LABEL name="nsx-container-plugin-operator" 5 | LABEL maintainer="NSX Containers Team " 6 | LABEL summary="A cluster operator to deploy nsx-ncp CNI plugin" 7 | LABEL version="0.0.3" 8 | LABEL release="1" 9 | LABEL description="Manage deployments, daemonsets, and config maps for NSX Integration" 10 | LABEL vendor="VMware" 11 | 12 | COPY LICENSE /licenses/ 13 | 14 | ENV OPERATOR=/usr/local/bin/nsx-ncp-operator \ 15 | USER_UID=1001 \ 16 | USER_NAME=nsx-ncp-operator 17 | 18 | COPY build/bin /usr/local/bin 19 | COPY manifest /manifest 20 | RUN /usr/local/bin/user_setup 21 | 22 | ENTRYPOINT ["/usr/local/bin/entrypoint"] 23 | 24 | USER ${USER_UID} 25 | -------------------------------------------------------------------------------- /build/bin/entrypoint: -------------------------------------------------------------------------------- 1 | #!/bin/sh -e 2 | 3 | exec ${OPERATOR} $@ 4 | -------------------------------------------------------------------------------- /build/bin/user_setup: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -x 3 | 4 | # ensure $HOME exists and is accessible by group 0 (we don't know what the runtime UID will be) 5 | echo "${USER_NAME}:x:${USER_UID}:0:${USER_NAME} user:${HOME}:/sbin/nologin" >> /etc/passwd 6 | mkdir -p "${HOME}" 7 | chown "${USER_UID}:0" "${HOME}" 8 | chmod ug+rwx "${HOME}" 9 | 10 | # no need for this script to remain in the image after running 11 | rm "$0" 12 | -------------------------------------------------------------------------------- /bundle/generate_bundle.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | while [[ $# -gt 0 ]] 5 | do 6 | key="$1" 7 | case $key in 8 | --bundle-repo) 9 | BUNDLE_REPO="$2" 10 | shift 2 11 | ;; 12 | --bundle-image) 13 | BUNDLE_IMG_NAME="$2" 14 | shift 2 15 | ;; 16 | --bundle-version) 17 | BUNDLE_VERSION="$2" 18 | shift 2 19 | ;; 20 | *) # unknown option 21 | echo "Unknown option $1." 22 | exit 1 23 | ;; 24 | esac 25 | done 26 | 27 | if [ -z "${BUNDLE_REPO}" ]; then 28 | echo "Bundle repo must be specified." 29 | exit 1 30 | fi 31 | 32 | if [ -z "${BUNDLE_IMG_NAME}" ]; then 33 | echo "Bundle image must be specified." 34 | exit 1 35 | fi 36 | 37 | if [ -z "${BUNDLE_VERSION}" ]; then 38 | echo "Bundle version must be specified." 39 | exit 1 40 | fi 41 | 42 | # copy kubernetes manifests and update csv with faq 43 | pushd ./deploy/kubernetes 44 | cp configmap.yaml ../../bundle/kubernetes/manifests 45 | cp lb-secret.yaml ../../bundle/kubernetes/manifests 46 | cp nsx-secret.yaml ../../bundle/kubernetes/manifests 47 | cp operator.nsx.vmware.com_ncpinstalls_crd.yaml ../../bundle/kubernetes/manifests 48 | faq -f yaml -o yaml --slurp \ 49 | '.[0].spec.install = {strategy: "deployment", spec:{ deployments: [{name: .[1].metadata.name, template: .[1].spec }], permissions: [{serviceAccountName: .[3].metadata.name, rules: .[2].rules }]}} | .[0]' \ 50 | ../../bundle/kubernetes/manifests/nsx-container-plugin-operator.clusterserviceversion.yaml operator.yaml role.yaml service_account.yaml > csv.tmp.yaml && cat csv.tmp.yaml > \ 51 | ../../bundle/kubernetes/manifests/nsx-container-plugin-operator.clusterserviceversion.yaml && rm csv.tmp.yaml 52 | popd 53 | 54 | # copy openshift4 manifests and update csv with faq 55 | pushd ./deploy/openshift4 56 | cp configmap.yaml ../../bundle/openshift4/manifests 57 | cp lb-secret.yaml ../../bundle/openshift4/manifests 58 | cp nsx-secret.yaml ../../bundle/openshift4/manifests 59 | cp operator.nsx.vmware.com_ncpinstalls_crd.yaml ../../bundle/openshift4/manifests 60 | faq -f yaml -o yaml --slurp \ 61 | '.[0].spec.install = {strategy: "deployment", spec:{ deployments: [{name: .[1].metadata.name, template: .[1].spec }], permissions: [{serviceAccountName: .[3].metadata.name, rules: .[2].rules }]}} | .[0]' \ 62 | ../../bundle/openshift4/manifests/nsx-container-plugin-operator.clusterserviceversion.yaml operator.yaml role.yaml service_account.yaml > csv.tmp.yaml && cat csv.tmp.yaml > \ 63 | ../../bundle/openshift4/manifests/nsx-container-plugin-operator.clusterserviceversion.yaml && rm csv.tmp.yaml 64 | popd 65 | 66 | pushd ./bundle 67 | # build kubernetes bundle image 68 | docker build -t ${BUNDLE_REPO}/${BUNDLE_IMG_NAME}-k8s:${BUNDLE_VERSION} -f ./kubernetes/bundle.Dockerfile ./kubernetes 69 | docker push ${BUNDLE_REPO}/${BUNDLE_IMG_NAME}-k8s:${BUNDLE_VERSION} 70 | opm index add --build-tool docker --bundles ${BUNDLE_REPO}/${BUNDLE_IMG_NAME}-k8s:${BUNDLE_VERSION} --tag ${BUNDLE_REPO}/ncp-operator-index-k8s:${BUNDLE_VERSION} 71 | 72 | # build openshift4 bundle image 73 | docker build -t ${BUNDLE_REPO}/${BUNDLE_IMG_NAME}-oc:${BUNDLE_VERSION} -f ./openshift4/bundle.Dockerfile ./openshift4 74 | docker push ${BUNDLE_REPO}/${BUNDLE_IMG_NAME}-oc:${BUNDLE_VERSION} 75 | opm index add --build-tool docker --bundles ${BUNDLE_REPO}/${BUNDLE_IMG_NAME}-oc:${BUNDLE_VERSION} --tag ${BUNDLE_REPO}/ncp-operator-index-oc:${BUNDLE_VERSION} 76 | popd -------------------------------------------------------------------------------- /bundle/kubernetes/bundle.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM scratch 2 | 3 | LABEL operators.operatorframework.io.bundle.mediatype.v1=registry+v1 4 | LABEL operators.operatorframework.io.bundle.manifests.v1=manifests/ 5 | LABEL operators.operatorframework.io.bundle.metadata.v1=metadata/ 6 | LABEL operators.operatorframework.io.bundle.package.v1=nsx-container-plugin-operator 7 | LABEL operators.operatorframework.io.bundle.channels.v1=alpha 8 | LABEL operators.operatorframework.io.bundle.channel.default.v1=alpha 9 | LABEL operators.operatorframework.io.test.config.v1=tests/scorecard/ 10 | LABEL operators.operatorframework.io.test.mediatype.v1=scorecard+v1 11 | 12 | COPY manifests /manifests/ 13 | COPY metadata /metadata/ 14 | COPY tests/scorecard /tests/scorecard/ 15 | LABEL com.redhat.openshift.versions=v4.6,v4.7,v4.8 16 | LABEL com.redhat.delivery.operator.bundle=true 17 | -------------------------------------------------------------------------------- /bundle/kubernetes/manifests/lb-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: {tls.crt: "", tls.key: ""} 3 | kind: Secret 4 | metadata: {name: lb-secret, namespace: nsx-system-operator} 5 | type: kubernetes.io/tls 6 | -------------------------------------------------------------------------------- /bundle/kubernetes/manifests/nsx-container-plugin-operator.clusterserviceversion.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: operators.coreos.com/v1alpha1 2 | kind: ClusterServiceVersion 3 | metadata: 4 | annotations: 5 | alm-examples: "[\n {\n \"apiVersion\": \"operator.nsx.vmware.com/v1\",\n \"kind\": 6 | \"NcpInstall\",\n \"metadata\": {\n \"name\": \"ncp-install\"\n },\n 7 | \ \"spec\": {\n \"ncpReplicas\": 1\n }\n } \n]" 8 | capabilities: Seamless Upgrades 9 | categories: Networking, Security 10 | certified: "True" 11 | containerImage: vmware/nsx-container-plugin-operator 12 | description: An operator which provides NSX as default network for an Openshift 13 | cluster. Simplifies the process of installing and upgrading the NSX Container 14 | plugin (NCP) components running in an Openshift cluster. The operator also allows 15 | for detailed monitoring of NCP components and reacts to configuration changes. 16 | This is a cluster operator and must be installed upon cluster creation; for 17 | more information please refer to install-time instructions. 18 | marketplace.openshift.io/action-text: install-time Instructions 19 | marketplace.openshift.io/remote-workflow: https://docs.vmware.com/en/VMware-NSX-T-Data-Center/3.1/ncp-openshift/GUID-1D75FE92-051C-4E30-8903-AF832E854AA7.html 20 | repository: https://github.com/vmware/nsx-container-plugin/operator 21 | support: VMware 22 | name: nsx-container-plugin-operator 23 | spec: 24 | apiservicedefinitions: {} 25 | customresourcedefinitions: 26 | owned: 27 | - description: NcpInstall is the Schema for the ncpinstalls API 28 | displayName: NcpInstall 29 | kind: NcpInstall 30 | name: ncpinstalls.operator.nsx.vmware.com 31 | resources: 32 | - kind: Deployment 33 | name: A Kubernetes Deployment for the Operator 34 | version: v1 35 | - kind: Configmap 36 | name: A Configmap with parameter for setting up Operands 37 | version: v1 38 | - kind: NcpInstall 39 | name: this operator's CR 40 | version: v1 41 | - kind: ClusterOperator 42 | name: nsx-ncp cluster operator 43 | version: v1 44 | - kind: Network 45 | name: Openshift's cluster network 46 | version: v1 47 | - kind: Daemonset 48 | name: A Daemonset 49 | version: v1 50 | - kind: Pod 51 | name: A Pod 52 | version: v1 53 | - kind: Secret 54 | name: It's a secret, I can't tell you 55 | version: v1 56 | - kind: Node 57 | name: A Kubernetes Node 58 | version: v1 59 | - kind: Status 60 | name: A Kubernetes resource status 61 | version: v1 62 | specdescriptors: 63 | - description: the replica numbers for the nsx-ncp deployment 64 | path: ncpReplicas 65 | statusdescriptors: 66 | - description: standard conditions field for Kubernetes resources 67 | path: conditions 68 | version: v1 69 | description: An operator which provides NSX as default network for an Openshit cluster. 70 | Simplifies the process of installing and upgrading the NSX Container plugin (NCP) 71 | components running in the Openshift cluster. The operator also allows for detailed 72 | monitoring of NCP components and reacts to configuration changes. 73 | displayName: NSX Container Plugin Operator 74 | install: 75 | spec: 76 | deployments: 77 | - name: nsx-ncp-operator 78 | template: 79 | replicas: 1 80 | selector: 81 | matchLabels: 82 | name: nsx-ncp-operator 83 | template: 84 | metadata: 85 | labels: 86 | name: nsx-ncp-operator 87 | spec: 88 | containers: 89 | - command: 90 | - /bin/bash 91 | - -c 92 | - nsx-ncp-operator --zap-time-encoding=iso8601 --metrics-server-bind-address=:8181 93 | env: 94 | - name: POD_NAME 95 | valueFrom: 96 | fieldRef: 97 | fieldPath: metadata.name 98 | - name: OPERATOR_NAME 99 | value: nsx-ncp-operator 100 | - name: NCP_IMAGE 101 | value: nsx-ncp:latest 102 | - name: WATCH_NAMESPACE 103 | value: nsx-system-operator 104 | image: docker.io/vmware/nsx-container-plugin-operator 105 | imagePullPolicy: IfNotPresent 106 | name: nsx-ncp-operator 107 | volumeMounts: 108 | - mountPath: /host/etc/os-release 109 | name: host-os-release 110 | hostNetwork: true 111 | serviceAccountName: nsx-ncp-operator 112 | tolerations: 113 | - effect: NoSchedule 114 | key: node-role.kubernetes.io/master 115 | - effect: NoSchedule 116 | key: node.kubernetes.io/not-ready 117 | - effect: NoSchedule 118 | key: node.kubernetes.io/network-unavailable 119 | volumes: 120 | - hostPath: 121 | path: /etc/os-release 122 | name: host-os-release 123 | permissions: 124 | - rules: 125 | - apiGroups: 126 | - "" 127 | resources: 128 | - pods 129 | - pods/log 130 | - pods/exec 131 | - configmaps 132 | - namespaces 133 | - serviceaccounts 134 | - secrets 135 | - nodes/status 136 | verbs: 137 | - create 138 | - get 139 | - list 140 | - patch 141 | - delete 142 | - update 143 | - watch 144 | - deletecollection 145 | - apiGroups: 146 | - apps 147 | resources: 148 | - deployments 149 | - daemonsets 150 | verbs: 151 | - create 152 | - get 153 | - list 154 | - patch 155 | - delete 156 | - update 157 | - watch 158 | - apiGroups: 159 | - rbac.authorization.k8s.io 160 | resources: 161 | - clusterroles 162 | - clusterrolebindings 163 | - roles 164 | - rolebindings 165 | verbs: 166 | - create 167 | - get 168 | - list 169 | - patch 170 | - update 171 | - watch 172 | - delete 173 | - apiGroups: 174 | - operator.nsx.vmware.com 175 | resources: 176 | - ncpinstalls 177 | - ncpinstalls/status 178 | verbs: 179 | - get 180 | - list 181 | - watch 182 | - patch 183 | - update 184 | - apiGroups: 185 | - "" 186 | resources: 187 | - endpoints 188 | - services 189 | verbs: 190 | - get 191 | - list 192 | - watch 193 | - apiGroups: 194 | - "" 195 | - extensions 196 | - networking.k8s.io 197 | - apps 198 | resources: 199 | - namespaces 200 | - ingresses 201 | - services 202 | - pods 203 | - networkpolicies 204 | - routes 205 | - statefulsets 206 | verbs: 207 | - get 208 | - watch 209 | - list 210 | - update 211 | - patch 212 | - apiGroups: 213 | - nsx.vmware.com 214 | resources: 215 | - nsxerrors 216 | - nsxlocks 217 | - ncpconfigs 218 | verbs: 219 | - create 220 | - get 221 | - list 222 | - patch 223 | - delete 224 | - update 225 | - apiGroups: 226 | - "" 227 | - extensions 228 | - networking.k8s.io 229 | resources: 230 | - ingresses/status 231 | - services/status 232 | - routes/status 233 | verbs: 234 | - replace 235 | - update 236 | - patch 237 | - apiGroups: 238 | - k8s.cni.cncf.io 239 | resources: 240 | - network-attachment-definitions 241 | verbs: 242 | - get 243 | - list 244 | - watch 245 | - apiGroups: 246 | - apiextensions.k8s.io 247 | resources: 248 | - customresourcedefinitions 249 | verbs: 250 | - create 251 | - get 252 | - list 253 | - patch 254 | - update 255 | - watch 256 | - delete 257 | - apiGroups: 258 | - "" 259 | - extensions 260 | - networking.k8s.io 261 | - apps 262 | resources: 263 | - deployments 264 | - endpoints 265 | - ingressclasses 266 | - nodes 267 | - pods/log 268 | - replicationcontrollers 269 | - secrets 270 | - statefulsets 271 | verbs: 272 | - get 273 | - list 274 | - watch 275 | - apiGroups: 276 | - vmware.com 277 | resources: 278 | - loadbalancers 279 | - loadbalancers/status 280 | - nsxlbmonitors 281 | - nsxlbmonitors/status 282 | - virtualnetworkinterfaces 283 | - virtualnetworkinterfaces/status 284 | - virtualnetworks 285 | - virtualnetworks/status 286 | verbs: 287 | - create 288 | - get 289 | - list 290 | - patch 291 | - update 292 | - watch 293 | - delete 294 | - apiGroups: 295 | - policy 296 | resources: 297 | - podsecuritypolicies 298 | verbs: 299 | - create 300 | - get 301 | - list 302 | - patch 303 | - update 304 | - watch 305 | - delete 306 | - use 307 | - apiGroups: 308 | - "" 309 | resources: 310 | - events 311 | verbs: 312 | - create 313 | - get 314 | - list 315 | - patch 316 | - update 317 | - delete 318 | serviceAccountName: nsx-ncp-operator 319 | strategy: deployment 320 | installModes: 321 | - supported: true 322 | type: OwnNamespace 323 | - supported: true 324 | type: SingleNamespace 325 | - supported: false 326 | type: MultiNamespace 327 | - supported: false 328 | type: AllNamespaces 329 | keywords: 330 | - networking 331 | - security 332 | maintainers: 333 | - email: sorlando@vmware.com 334 | name: Salvatore Orlando 335 | - email: jsui@vmware.com 336 | name: Jianwei Sui 337 | maturity: alpha 338 | provider: 339 | name: VMware 340 | version: "" 341 | -------------------------------------------------------------------------------- /bundle/kubernetes/manifests/nsx-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: {tls.crt: "", tls.key: "", tls.ca: ""} 3 | kind: Secret 4 | metadata: {name: nsx-secret, namespace: nsx-system-operator} 5 | type: kubernetes.io/tls 6 | -------------------------------------------------------------------------------- /bundle/kubernetes/manifests/operator.nsx.vmware.com_ncpinstalls_crd.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | name: ncpinstalls.operator.nsx.vmware.com 5 | spec: 6 | group: operator.nsx.vmware.com 7 | names: 8 | kind: NcpInstall 9 | listKind: NcpInstallList 10 | plural: ncpinstalls 11 | singular: ncpinstall 12 | scope: Namespaced 13 | versions: 14 | - name: v1 15 | served: true 16 | storage: true 17 | subresources: 18 | status: {} 19 | schema: 20 | openAPIV3Schema: 21 | type: object 22 | description: NcpInstall is the Schema for the ncpinstalls API 23 | properties: 24 | apiVersion: 25 | description: 'APIVersion defines the versioned schema of this representation 26 | of an object. Servers should convert recognized schemas to the latest 27 | internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' 28 | type: string 29 | kind: 30 | description: 'Kind is a string value representing the REST resource this 31 | object represents. Servers may infer this from the endpoint the client 32 | submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' 33 | type: string 34 | metadata: 35 | type: object 36 | spec: 37 | description: NcpInstallSpec defines the desired state of NcpInstall 38 | type: object 39 | properties: 40 | ncpReplicas: 41 | description: the replica numbers of nsx-ncp deployment 42 | type: integer 43 | format: int32 44 | minimum: 0 45 | addNodeTag: 46 | description: 'Tag node logical switch port with node name and cluster when set to true, skip tagging when set to false. 47 | Note that if one node has multiple attached VirtualNetworkInterfaces, this function is not supported and should be set to false.' 48 | type: boolean 49 | nsx-ncp: 50 | description: nsx-ncp defines what properties users can configure for NCP Deployment 51 | type: object 52 | properties: 53 | nodeSelector: 54 | additionalProperties: 55 | type: string 56 | type: object 57 | tolerations: 58 | items: 59 | properties: 60 | effect: 61 | type: string 62 | key: 63 | type: string 64 | operator: 65 | type: string 66 | tolerationSeconds: 67 | format: int64 68 | type: integer 69 | value: 70 | type: string 71 | type: object 72 | type: array 73 | nsx-node-agent: 74 | description: nsx-node-agent defines what properties users can configure for nsx-ncp-bootstrap and nsx-node-agent DaemonSet 75 | type: object 76 | properties: 77 | tolerations: 78 | items: 79 | properties: 80 | effect: 81 | type: string 82 | key: 83 | type: string 84 | operator: 85 | type: string 86 | tolerationSeconds: 87 | format: int64 88 | type: integer 89 | value: 90 | type: string 91 | type: object 92 | type: array 93 | status: 94 | description: NcpInstallStatus defines the observed state of NcpInstall 95 | type: object 96 | properties: 97 | conditions: 98 | description: conditions is a list of conditions and their status 99 | type: array 100 | items: 101 | description: It is just the standard condition fields 102 | type: object 103 | properties: 104 | lastTransitionTime: 105 | description: Last time the condition transit from one status to another 106 | type: string 107 | format: date-time 108 | type: 109 | description: Type of condition 110 | type: string 111 | status: 112 | description: Status of condition, one of 'True', 'False', 'Unknown' 113 | type: string 114 | reason: 115 | description: Brief reason for the condition 116 | type: string 117 | message: 118 | description: Human readable message indicating details 119 | type: string 120 | 121 | -------------------------------------------------------------------------------- /bundle/kubernetes/metadata/annotations.yaml: -------------------------------------------------------------------------------- 1 | annotations: 2 | operators.operatorframework.io.bundle.channel.default.v1: alpha 3 | operators.operatorframework.io.bundle.channels.v1: alpha 4 | operators.operatorframework.io.bundle.manifests.v1: manifests/ 5 | operators.operatorframework.io.bundle.mediatype.v1: registry+v1 6 | operators.operatorframework.io.bundle.metadata.v1: metadata/ 7 | operators.operatorframework.io.bundle.package.v1: nsx-container-plugin-operator 8 | operators.operatorframework.io.test.config.v1: tests/scorecard/ 9 | operators.operatorframework.io.test.mediatype.v1: scorecard+v1 10 | com.redhat.openshift.versions: v4.6,v4.7,4.8 11 | com.redhat.delivery.operator.bundle: true 12 | -------------------------------------------------------------------------------- /bundle/kubernetes/tests/scorecard/config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: scorecard.operatorframework.io/v1alpha3 2 | kind: Configuration 3 | metadata: 4 | name: config 5 | stages: 6 | - parallel: true 7 | tests: 8 | - entrypoint: 9 | - scorecard-test 10 | - basic-check-spec 11 | image: quay.io/operator-framework/scorecard-test:v1.2.0 12 | labels: 13 | suite: basic 14 | test: basic-check-spec-test 15 | - entrypoint: 16 | - scorecard-test 17 | - olm-bundle-validation 18 | image: quay.io/operator-framework/scorecard-test:v1.2.0 19 | labels: 20 | suite: olm 21 | test: olm-bundle-validation-test 22 | - entrypoint: 23 | - scorecard-test 24 | - olm-crds-have-validation 25 | image: quay.io/operator-framework/scorecard-test:v1.2.0 26 | labels: 27 | suite: olm 28 | test: olm-crds-have-validation-test 29 | - entrypoint: 30 | - scorecard-test 31 | - olm-crds-have-resources 32 | image: quay.io/operator-framework/scorecard-test:v1.2.0 33 | labels: 34 | suite: olm 35 | test: olm-crds-have-resources-test 36 | - entrypoint: 37 | - scorecard-test 38 | - olm-spec-descriptors 39 | image: quay.io/operator-framework/scorecard-test:v1.2.0 40 | labels: 41 | suite: olm 42 | test: olm-spec-descriptors-test 43 | - entrypoint: 44 | - scorecard-test 45 | - olm-status-descriptors 46 | image: quay.io/operator-framework/scorecard-test:v1.2.0 47 | labels: 48 | suite: olm 49 | test: olm-status-descriptors-test 50 | -------------------------------------------------------------------------------- /bundle/openshift4/bundle.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM scratch 2 | 3 | LABEL operators.operatorframework.io.bundle.mediatype.v1=registry+v1 4 | LABEL operators.operatorframework.io.bundle.manifests.v1=manifests/ 5 | LABEL operators.operatorframework.io.bundle.metadata.v1=metadata/ 6 | LABEL operators.operatorframework.io.bundle.package.v1=nsx-container-plugin-operator 7 | LABEL operators.operatorframework.io.bundle.channels.v1=alpha 8 | LABEL operators.operatorframework.io.bundle.channel.default.v1=alpha 9 | LABEL operators.operatorframework.io.test.config.v1=tests/scorecard/ 10 | LABEL operators.operatorframework.io.test.mediatype.v1=scorecard+v1 11 | 12 | COPY manifests /manifests/ 13 | COPY metadata /metadata/ 14 | COPY tests/scorecard /tests/scorecard/ 15 | LABEL com.redhat.openshift.versions=v4.6,v4.7,v4.8 16 | LABEL com.redhat.delivery.operator.bundle=true 17 | -------------------------------------------------------------------------------- /bundle/openshift4/manifests/lb-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: {tls.crt: "", tls.key: ""} 3 | kind: Secret 4 | metadata: {name: lb-secret, namespace: nsx-system-operator} 5 | type: kubernetes.io/tls 6 | -------------------------------------------------------------------------------- /bundle/openshift4/manifests/nsx-container-plugin-operator.clusterserviceversion.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: operators.coreos.com/v1alpha1 2 | kind: ClusterServiceVersion 3 | metadata: 4 | annotations: 5 | alm-examples: "[\n {\n \"apiVersion\": \"operator.nsx.vmware.com/v1\",\n \"kind\": 6 | \"NcpInstall\",\n \"metadata\": {\n \"name\": \"ncp-install\"\n },\n 7 | \ \"spec\": {\n \"ncpReplicas\": 1\n }\n } \n]" 8 | capabilities: Seamless Upgrades 9 | categories: Networking, Security 10 | certified: "True" 11 | containerImage: vmware/nsx-container-plugin-operator 12 | description: An operator which provides NSX as default network for an Openshift 13 | cluster. Simplifies the process of installing and upgrading the NSX Container 14 | plugin (NCP) components running in an Openshift cluster. The operator also allows 15 | for detailed monitoring of NCP components and reacts to configuration changes. 16 | This is a cluster operator and must be installed upon cluster creation; for 17 | more information please refer to install-time instructions. 18 | marketplace.openshift.io/action-text: install-time Instructions 19 | marketplace.openshift.io/remote-workflow: https://docs.vmware.com/en/VMware-NSX-T-Data-Center/3.1/ncp-openshift/GUID-1D75FE92-051C-4E30-8903-AF832E854AA7.html 20 | repository: https://github.com/vmware/nsx-container-plugin/operator 21 | support: VMware 22 | name: nsx-container-plugin-operator 23 | spec: 24 | apiservicedefinitions: {} 25 | customresourcedefinitions: 26 | owned: 27 | - description: NcpInstall is the Schema for the ncpinstalls API 28 | displayName: NcpInstall 29 | kind: NcpInstall 30 | name: ncpinstalls.operator.nsx.vmware.com 31 | resources: 32 | - kind: Deployment 33 | name: A Kubernetes Deployment for the Operator 34 | version: v1 35 | - kind: Configmap 36 | name: A Configmap with parameter for setting up Operands 37 | version: v1 38 | - kind: NcpInstall 39 | name: this operator's CR 40 | version: v1 41 | - kind: ClusterOperator 42 | name: nsx-ncp cluster operator 43 | version: v1 44 | - kind: Network 45 | name: Openshift's cluster network 46 | version: v1 47 | - kind: Daemonset 48 | name: A Daemonset 49 | version: v1 50 | - kind: Pod 51 | name: A Pod 52 | version: v1 53 | - kind: Secret 54 | name: It's a secret, I can't tell you 55 | version: v1 56 | - kind: Node 57 | name: A Kubernetes Node 58 | version: v1 59 | - kind: Status 60 | name: A Kubernetes resource status 61 | version: v1 62 | specdescriptors: 63 | - description: the replica numbers for the nsx-ncp deployment 64 | path: ncpReplicas 65 | statusdescriptors: 66 | - description: standard conditions field for Kubernetes resources 67 | path: conditions 68 | version: v1 69 | description: An operator which provides NSX as default network for an Openshit cluster. 70 | Simplifies the process of installing and upgrading the NSX Container plugin (NCP) 71 | components running in the Openshift cluster. The operator also allows for detailed 72 | monitoring of NCP components and reacts to configuration changes. 73 | displayName: NSX Container Plugin Operator 74 | install: 75 | spec: 76 | deployments: 77 | - name: nsx-ncp-operator 78 | template: 79 | replicas: 1 80 | selector: 81 | matchLabels: 82 | name: nsx-ncp-operator 83 | template: 84 | metadata: 85 | labels: 86 | name: nsx-ncp-operator 87 | spec: 88 | containers: 89 | - command: 90 | - /bin/bash 91 | - -c 92 | - nsx-ncp-operator --zap-time-encoding=iso8601 --metrics-server-bind-address=:8181 93 | env: 94 | - name: POD_NAME 95 | valueFrom: 96 | fieldRef: 97 | fieldPath: metadata.name 98 | - name: OPERATOR_NAME 99 | value: nsx-ncp-operator 100 | - name: NCP_IMAGE 101 | value: nsx-ncp:latest 102 | - name: WATCH_NAMESPACE 103 | value: nsx-system-operator 104 | image: docker.io/vmware/nsx-container-plugin-operator 105 | imagePullPolicy: IfNotPresent 106 | name: nsx-ncp-operator 107 | volumeMounts: 108 | - mountPath: /host/etc/os-release 109 | name: host-os-release 110 | hostNetwork: true 111 | serviceAccountName: nsx-ncp-operator 112 | tolerations: 113 | - effect: NoSchedule 114 | key: node-role.kubernetes.io/master 115 | - effect: NoSchedule 116 | key: node.kubernetes.io/not-ready 117 | - effect: NoSchedule 118 | key: node.kubernetes.io/network-unavailable 119 | volumes: 120 | - hostPath: 121 | path: /etc/os-release 122 | name: host-os-release 123 | permissions: 124 | - rules: 125 | - apiGroups: 126 | - "" 127 | resources: 128 | - pods 129 | - configmaps 130 | - namespaces 131 | - serviceaccounts 132 | - secrets 133 | - nodes/status 134 | verbs: 135 | - create 136 | - get 137 | - list 138 | - patch 139 | - delete 140 | - update 141 | - watch 142 | - deletecollection 143 | - apiGroups: 144 | - "" 145 | resources: 146 | - nodes 147 | verbs: 148 | - get 149 | - list 150 | - watch 151 | - apiGroups: 152 | - config.openshift.io 153 | resources: 154 | - networks 155 | - networks/finalizers 156 | verbs: 157 | - get 158 | - list 159 | - watch 160 | - patch 161 | - update 162 | - apiGroups: 163 | - apps 164 | resources: 165 | - deployments 166 | - daemonsets 167 | verbs: 168 | - create 169 | - get 170 | - list 171 | - patch 172 | - delete 173 | - update 174 | - watch 175 | - apiGroups: 176 | - config.openshift.io 177 | - apiextensions.k8s.io 178 | resources: 179 | - clusteroperators 180 | - clusteroperators/status 181 | - customresourcedefinitions 182 | verbs: 183 | - create 184 | - get 185 | - list 186 | - patch 187 | - update 188 | - watch 189 | - delete 190 | - apiGroups: 191 | - rbac.authorization.k8s.io 192 | resources: 193 | - clusterroles 194 | - clusterrolebindings 195 | - roles 196 | - rolebindings 197 | verbs: 198 | - create 199 | - get 200 | - list 201 | - patch 202 | - update 203 | - watch 204 | - delete 205 | - apiGroups: 206 | - operator.nsx.vmware.com 207 | resources: 208 | - ncpinstalls 209 | - ncpinstalls/status 210 | verbs: 211 | - get 212 | - list 213 | - watch 214 | - patch 215 | - update 216 | - apiGroups: 217 | - "" 218 | resources: 219 | - endpoints 220 | - services 221 | verbs: 222 | - get 223 | - list 224 | - watch 225 | - apiGroups: 226 | - "" 227 | - extensions 228 | - networking.k8s.io 229 | - config.openshift.io 230 | - apps 231 | resources: 232 | - deployments 233 | - endpoints 234 | - pods/log 235 | - nodes 236 | - replicationcontrollers 237 | - secrets 238 | - ingressclasses 239 | - routes 240 | - network 241 | - networks 242 | - statefulsets 243 | verbs: 244 | - get 245 | - watch 246 | - list 247 | - apiGroups: 248 | - route.openshift.io 249 | resources: 250 | - routes 251 | verbs: 252 | - get 253 | - watch 254 | - list 255 | - apiGroups: 256 | - "" 257 | - extensions 258 | - networking.k8s.io 259 | - apps 260 | resources: 261 | - namespaces 262 | - ingresses 263 | - services 264 | - pods 265 | - networkpolicies 266 | - routes 267 | - statefulsets 268 | verbs: 269 | - get 270 | - watch 271 | - list 272 | - update 273 | - patch 274 | - apiGroups: 275 | - nsx.vmware.com 276 | resources: 277 | - nsxerrors 278 | - nsxlocks 279 | - ncpconfigs 280 | verbs: 281 | - create 282 | - get 283 | - list 284 | - patch 285 | - delete 286 | - update 287 | - apiGroups: 288 | - "" 289 | - extensions 290 | - networking.k8s.io 291 | resources: 292 | - ingresses/status 293 | - services/status 294 | - routes/status 295 | verbs: 296 | - replace 297 | - update 298 | - patch 299 | - apiGroups: 300 | - config.openshift.io 301 | resources: 302 | - networks 303 | verbs: 304 | - patch 305 | - apiGroups: 306 | - route.openshift.io 307 | resources: 308 | - routes 309 | - routes/status 310 | verbs: 311 | - patch 312 | - update 313 | - apiGroups: 314 | - k8s.cni.cncf.io 315 | resources: 316 | - network-attachment-definitions 317 | verbs: 318 | - get 319 | - list 320 | - watch 321 | - apiGroups: 322 | - vmware.com 323 | resources: 324 | - loadbalancers 325 | - loadbalancers/status 326 | - nsxlbmonitors 327 | - nsxlbmonitors/status 328 | - virtualnetworkinterfaces 329 | - virtualnetworkinterfaces/status 330 | - virtualnetworks 331 | - virtualnetworks/status 332 | verbs: 333 | - create 334 | - get 335 | - list 336 | - patch 337 | - update 338 | - watch 339 | - delete 340 | - apiGroups: 341 | - "" 342 | resources: 343 | - pods/exec 344 | verbs: 345 | - get 346 | - create 347 | - apiGroups: 348 | - "" 349 | resources: 350 | - events 351 | verbs: 352 | - create 353 | - get 354 | - list 355 | - patch 356 | - update 357 | - delete 358 | serviceAccountName: nsx-ncp-operator 359 | strategy: deployment 360 | installModes: 361 | - supported: true 362 | type: OwnNamespace 363 | - supported: true 364 | type: SingleNamespace 365 | - supported: false 366 | type: MultiNamespace 367 | - supported: false 368 | type: AllNamespaces 369 | keywords: 370 | - networking 371 | - security 372 | maintainers: 373 | - email: sorlando@vmware.com 374 | name: Salvatore Orlando 375 | - email: jsui@vmware.com 376 | name: Jianwei Sui 377 | maturity: alpha 378 | provider: 379 | name: VMware 380 | version: "" 381 | -------------------------------------------------------------------------------- /bundle/openshift4/manifests/nsx-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: {tls.crt: "", tls.key: "", tls.ca: ""} 3 | kind: Secret 4 | metadata: {name: nsx-secret, namespace: nsx-system-operator} 5 | type: kubernetes.io/tls 6 | -------------------------------------------------------------------------------- /bundle/openshift4/manifests/operator.nsx.vmware.com_ncpinstalls_crd.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | name: ncpinstalls.operator.nsx.vmware.com 5 | spec: 6 | group: operator.nsx.vmware.com 7 | names: 8 | kind: NcpInstall 9 | listKind: NcpInstallList 10 | plural: ncpinstalls 11 | singular: ncpinstall 12 | scope: Namespaced 13 | versions: 14 | - name: v1 15 | served: true 16 | storage: true 17 | subresources: 18 | status: {} 19 | schema: 20 | openAPIV3Schema: 21 | type: object 22 | description: NcpInstall is the Schema for the ncpinstalls API 23 | properties: 24 | apiVersion: 25 | description: 'APIVersion defines the versioned schema of this representation 26 | of an object. Servers should convert recognized schemas to the latest 27 | internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' 28 | type: string 29 | kind: 30 | description: 'Kind is a string value representing the REST resource this 31 | object represents. Servers may infer this from the endpoint the client 32 | submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' 33 | type: string 34 | metadata: 35 | type: object 36 | spec: 37 | description: NcpInstallSpec defines the desired state of NcpInstall 38 | type: object 39 | properties: 40 | ncpReplicas: 41 | description: the replica numbers of nsx-ncp deployment 42 | type: integer 43 | format: int32 44 | minimum: 0 45 | addNodeTag: 46 | description: 'Tag node logical switch port with node name and cluster when set to true, skip tagging when set to false. 47 | Note that if one node has multiple attached VirtualNetworkInterfaces, this function is not supported and should be set to false.' 48 | type: boolean 49 | nsx-ncp: 50 | description: nsx-ncp defines what properties users can configure for NCP Deployment 51 | type: object 52 | properties: 53 | nodeSelector: 54 | additionalProperties: 55 | type: string 56 | type: object 57 | tolerations: 58 | items: 59 | properties: 60 | effect: 61 | type: string 62 | key: 63 | type: string 64 | operator: 65 | type: string 66 | tolerationSeconds: 67 | format: int64 68 | type: integer 69 | value: 70 | type: string 71 | type: object 72 | type: array 73 | nsx-node-agent: 74 | description: nsx-node-agent defines what properties users can configure for nsx-ncp-bootstrap and nsx-node-agent DaemonSet 75 | type: object 76 | properties: 77 | tolerations: 78 | items: 79 | properties: 80 | effect: 81 | type: string 82 | key: 83 | type: string 84 | operator: 85 | type: string 86 | tolerationSeconds: 87 | format: int64 88 | type: integer 89 | value: 90 | type: string 91 | type: object 92 | type: array 93 | status: 94 | description: NcpInstallStatus defines the observed state of NcpInstall 95 | type: object 96 | properties: 97 | conditions: 98 | description: conditions is a list of conditions and their status 99 | type: array 100 | items: 101 | description: It is just the standard condition fields 102 | type: object 103 | properties: 104 | lastTransitionTime: 105 | description: Last time the condition transit from one status to another 106 | type: string 107 | format: date-time 108 | type: 109 | description: Type of condition 110 | type: string 111 | status: 112 | description: Status of condition, one of 'True', 'False', 'Unknown' 113 | type: string 114 | reason: 115 | description: Brief reason for the condition 116 | type: string 117 | message: 118 | description: Human readable message indicating details 119 | type: string 120 | 121 | -------------------------------------------------------------------------------- /bundle/openshift4/metadata/annotations.yaml: -------------------------------------------------------------------------------- 1 | annotations: 2 | operators.operatorframework.io.bundle.channel.default.v1: alpha 3 | operators.operatorframework.io.bundle.channels.v1: alpha 4 | operators.operatorframework.io.bundle.manifests.v1: manifests/ 5 | operators.operatorframework.io.bundle.mediatype.v1: registry+v1 6 | operators.operatorframework.io.bundle.metadata.v1: metadata/ 7 | operators.operatorframework.io.bundle.package.v1: nsx-container-plugin-operator 8 | operators.operatorframework.io.test.config.v1: tests/scorecard/ 9 | operators.operatorframework.io.test.mediatype.v1: scorecard+v1 10 | com.redhat.openshift.versions: v4.6,v4.7,4.8 11 | com.redhat.delivery.operator.bundle: true 12 | -------------------------------------------------------------------------------- /bundle/openshift4/tests/scorecard/config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: scorecard.operatorframework.io/v1alpha3 2 | kind: Configuration 3 | metadata: 4 | name: config 5 | stages: 6 | - parallel: true 7 | tests: 8 | - entrypoint: 9 | - scorecard-test 10 | - basic-check-spec 11 | image: quay.io/operator-framework/scorecard-test:v1.2.0 12 | labels: 13 | suite: basic 14 | test: basic-check-spec-test 15 | - entrypoint: 16 | - scorecard-test 17 | - olm-bundle-validation 18 | image: quay.io/operator-framework/scorecard-test:v1.2.0 19 | labels: 20 | suite: olm 21 | test: olm-bundle-validation-test 22 | - entrypoint: 23 | - scorecard-test 24 | - olm-crds-have-validation 25 | image: quay.io/operator-framework/scorecard-test:v1.2.0 26 | labels: 27 | suite: olm 28 | test: olm-crds-have-validation-test 29 | - entrypoint: 30 | - scorecard-test 31 | - olm-crds-have-resources 32 | image: quay.io/operator-framework/scorecard-test:v1.2.0 33 | labels: 34 | suite: olm 35 | test: olm-crds-have-resources-test 36 | - entrypoint: 37 | - scorecard-test 38 | - olm-spec-descriptors 39 | image: quay.io/operator-framework/scorecard-test:v1.2.0 40 | labels: 41 | suite: olm 42 | test: olm-spec-descriptors-test 43 | - entrypoint: 44 | - scorecard-test 45 | - olm-status-descriptors 46 | image: quay.io/operator-framework/scorecard-test:v1.2.0 47 | labels: 48 | suite: olm 49 | test: olm-status-descriptors-test 50 | -------------------------------------------------------------------------------- /cmd/manager/main.go: -------------------------------------------------------------------------------- 1 | /* Copyright © 2020 VMware, Inc. All Rights Reserved. 2 | SPDX-License-Identifier: Apache-2.0 */ 3 | 4 | package main 5 | 6 | import ( 7 | "context" 8 | "flag" 9 | "fmt" 10 | "os" 11 | "runtime" 12 | "strings" 13 | 14 | // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) 15 | _ "k8s.io/client-go/plugin/pkg/client/auth" 16 | 17 | "github.com/vmware/nsx-container-plugin-operator/pkg/apis" 18 | "github.com/vmware/nsx-container-plugin-operator/pkg/controller" 19 | operatortypes "github.com/vmware/nsx-container-plugin-operator/pkg/types" 20 | "github.com/vmware/nsx-container-plugin-operator/version" 21 | 22 | "github.com/operator-framework/operator-sdk/pkg/k8sutil" 23 | "github.com/operator-framework/operator-sdk/pkg/leader" 24 | "github.com/operator-framework/operator-sdk/pkg/log/zap" 25 | sdkVersion "github.com/operator-framework/operator-sdk/version" 26 | "github.com/spf13/pflag" 27 | "sigs.k8s.io/controller-runtime/pkg/client/config" 28 | logf "sigs.k8s.io/controller-runtime/pkg/log" 29 | "sigs.k8s.io/controller-runtime/pkg/manager" 30 | "sigs.k8s.io/controller-runtime/pkg/manager/signals" 31 | ) 32 | 33 | var log = logf.Log.WithName("cmd") 34 | 35 | func printVersion() { 36 | log.Info(fmt.Sprintf("Operator Version: %s", version.Version)) 37 | log.Info(fmt.Sprintf("Go Version: %s", runtime.Version())) 38 | log.Info(fmt.Sprintf("Go OS/Arch: %s/%s", runtime.GOOS, runtime.GOARCH)) 39 | log.Info(fmt.Sprintf("Version of operator-sdk: %v", sdkVersion.Version)) 40 | } 41 | 42 | func main() { 43 | // Add the zap logger flag set to the CLI. The flag set must 44 | // be added before calling pflag.Parse(). 45 | pflag.CommandLine.AddFlagSet(zap.FlagSet()) 46 | 47 | // metricsBindAddr is the TCP address that the controller should bind to 48 | // for serving prometheus metrics. 49 | var metricsBindAddr string 50 | flag.StringVar(&metricsBindAddr, "metrics-server-bind-address", ":8181", "The address the prometheus metrics server binds to.") 51 | 52 | // Add flags registered by imported packages (e.g. glog and 53 | // controller-runtime) 54 | pflag.CommandLine.AddGoFlagSet(flag.CommandLine) 55 | 56 | pflag.Parse() 57 | 58 | // Use a zap logr.Logger implementation. If none of the zap 59 | // flags are configured (or if the zap flag set is not being 60 | // used), this defaults to a production zap logger. 61 | // 62 | // The logger instantiated here can be changed to any logger 63 | // implementing the logr.Logger interface. This logger will 64 | // be propagated through the whole operator, generating 65 | // uniform and structured logs. 66 | logf.SetLogger(zap.Logger()) 67 | 68 | printVersion() 69 | 70 | // Check if we need to watch a specific namespace 71 | namespace, err := k8sutil.GetWatchNamespace() 72 | if err != nil { 73 | log.Error(err, "Failed to get watch namespace") 74 | os.Exit(1) 75 | } 76 | 77 | if strings.Contains(namespace, ",") { 78 | namespace = "" 79 | log.Info(`This operator cannot handle multiple 80 | namespaces. The operator will watch for changes 81 | across all namespaces`) 82 | } 83 | 84 | // Set default manager options 85 | options := manager.Options{ 86 | Namespace: namespace, 87 | MetricsBindAddress: metricsBindAddr, 88 | } 89 | 90 | if namespace != "" && namespace != operatortypes.NsxNamespace { 91 | // since the pod controller must watch nsx-system namespace, 92 | // this becomes akin to a multinamespace scenario, but we 93 | // will treat it as "AllNamespaces" since the MultiNamespaceCache 94 | // does not load cluster-scoped resources. 95 | // The namespace will be loaded anyway into the statusmanager so 96 | // the configMapController will monitor only that namespace 97 | options.Namespace = "" 98 | } 99 | 100 | // Get a config to talk to the apiserver 101 | cfg, err := config.GetConfig() 102 | if err != nil { 103 | log.Error(err, "") 104 | os.Exit(1) 105 | } 106 | 107 | ctx := context.TODO() 108 | // Become the leader before proceeding 109 | err = leader.Become(ctx, "nsx-ncp-operator-lock") 110 | if err != nil { 111 | log.Error(err, "") 112 | os.Exit(1) 113 | } 114 | 115 | // Create a new manager to provide shared dependencies and start components 116 | mgr, err := manager.New(cfg, options) 117 | if err != nil { 118 | log.Error(err, "") 119 | os.Exit(1) 120 | } 121 | 122 | log.Info("Registering Components.") 123 | 124 | // Setup Scheme for all resources 125 | if err := apis.AddToScheme(mgr.GetScheme()); err != nil { 126 | log.Error(err, "") 127 | os.Exit(1) 128 | } 129 | 130 | // Setup all Controllers 131 | if err := controller.AddToManager(mgr, namespace); err != nil { 132 | log.Error(err, "") 133 | os.Exit(1) 134 | } 135 | 136 | log.Info("Starting the Cmd.") 137 | 138 | // Start the Cmd 139 | if err := mgr.Start(signals.SetupSignalHandler()); err != nil { 140 | log.Error(err, "Manager exited non-zero") 141 | os.Exit(1) 142 | } 143 | } 144 | -------------------------------------------------------------------------------- /deploy/kubernetes/lb-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: {tls.crt: "", tls.key: ""} 3 | kind: Secret 4 | metadata: {name: lb-secret, namespace: nsx-system-operator} 5 | type: kubernetes.io/tls 6 | -------------------------------------------------------------------------------- /deploy/kubernetes/namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: nsx-system-operator 5 | -------------------------------------------------------------------------------- /deploy/kubernetes/nsx-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: {tls.crt: "", tls.key: "", tls.ca: ""} 3 | kind: Secret 4 | metadata: {name: nsx-secret, namespace: nsx-system-operator} 5 | type: kubernetes.io/tls 6 | -------------------------------------------------------------------------------- /deploy/kubernetes/operator.nsx.vmware.com_ncpinstalls_crd.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | name: ncpinstalls.operator.nsx.vmware.com 5 | spec: 6 | group: operator.nsx.vmware.com 7 | names: 8 | kind: NcpInstall 9 | listKind: NcpInstallList 10 | plural: ncpinstalls 11 | singular: ncpinstall 12 | scope: Namespaced 13 | versions: 14 | - name: v1 15 | served: true 16 | storage: true 17 | subresources: 18 | status: {} 19 | schema: 20 | openAPIV3Schema: 21 | type: object 22 | description: NcpInstall is the Schema for the ncpinstalls API 23 | properties: 24 | apiVersion: 25 | description: 'APIVersion defines the versioned schema of this representation 26 | of an object. Servers should convert recognized schemas to the latest 27 | internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' 28 | type: string 29 | kind: 30 | description: 'Kind is a string value representing the REST resource this 31 | object represents. Servers may infer this from the endpoint the client 32 | submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' 33 | type: string 34 | metadata: 35 | type: object 36 | spec: 37 | description: NcpInstallSpec defines the desired state of NcpInstall 38 | type: object 39 | properties: 40 | ncpReplicas: 41 | description: the replica numbers of nsx-ncp deployment 42 | type: integer 43 | format: int32 44 | minimum: 0 45 | addNodeTag: 46 | description: 'Tag node logical switch port with node name and cluster when set to true, skip tagging when set to false. 47 | Note that if one node has multiple attached VirtualNetworkInterfaces, this function is not supported and should be set to false.' 48 | type: boolean 49 | nsx-ncp: 50 | description: nsx-ncp defines what properties users can configure for NCP Deployment 51 | type: object 52 | properties: 53 | nodeSelector: 54 | additionalProperties: 55 | type: string 56 | type: object 57 | tolerations: 58 | items: 59 | properties: 60 | effect: 61 | type: string 62 | key: 63 | type: string 64 | operator: 65 | type: string 66 | tolerationSeconds: 67 | format: int64 68 | type: integer 69 | value: 70 | type: string 71 | type: object 72 | type: array 73 | nsx-node-agent: 74 | description: nsx-node-agent defines what properties users can configure for nsx-ncp-bootstrap and nsx-node-agent DaemonSet 75 | type: object 76 | properties: 77 | tolerations: 78 | items: 79 | properties: 80 | effect: 81 | type: string 82 | key: 83 | type: string 84 | operator: 85 | type: string 86 | tolerationSeconds: 87 | format: int64 88 | type: integer 89 | value: 90 | type: string 91 | type: object 92 | type: array 93 | status: 94 | description: NcpInstallStatus defines the observed state of NcpInstall 95 | type: object 96 | properties: 97 | conditions: 98 | description: conditions is a list of conditions and their status 99 | type: array 100 | items: 101 | description: It is just the standard condition fields 102 | type: object 103 | properties: 104 | lastTransitionTime: 105 | description: Last time the condition transit from one status to another 106 | type: string 107 | format: date-time 108 | type: 109 | description: Type of condition 110 | type: string 111 | status: 112 | description: Status of condition, one of 'True', 'False', 'Unknown' 113 | type: string 114 | reason: 115 | description: Brief reason for the condition 116 | type: string 117 | message: 118 | description: Human readable message indicating details 119 | type: string 120 | 121 | -------------------------------------------------------------------------------- /deploy/kubernetes/operator.nsx.vmware.com_v1_ncpinstall_cr.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: operator.nsx.vmware.com/v1 2 | kind: NcpInstall 3 | metadata: 4 | name: ncp-install 5 | namespace: nsx-system-operator 6 | spec: 7 | ncpReplicas: 1 8 | # Note that if one node has multiple attached VirtualNetworkInterfaces, this function is not supported and should not be set to true 9 | addNodeTag: false 10 | nsx-ncp: 11 | nodeSelector: 12 | # Uncomment below to add user-defined nodeSelector for NCP Deployment 13 | #: 14 | 15 | tolerations: 16 | # Please don't modify below default tolerations for NCP Deployment 17 | - key: node-role.kubernetes.io/master 18 | effect: NoSchedule 19 | - key: node-role.kubernetes.io/control-plane 20 | effect: NoSchedule 21 | - key: node.kubernetes.io/network-unavailable 22 | effect: NoSchedule 23 | # Uncomment below to add user-defined tolerations for NCP Deployment 24 | # 25 | 26 | nsx-node-agent: 27 | tolerations: 28 | # Please don't modify below default tolerations 29 | # for nsx-ncp-bootstrap and nsx-node-agent DaemonSet 30 | - key: node-role.kubernetes.io/master 31 | effect: NoSchedule 32 | - key: node-role.kubernetes.io/control-plane 33 | effect: NoSchedule 34 | - key: node.kubernetes.io/not-ready 35 | effect: NoSchedule 36 | - key: node.kubernetes.io/unreachable 37 | effect: NoSchedule 38 | - operator: Exists 39 | effect: NoExecute 40 | # Uncomment below to add user-defined tolerations for nsx-ncp-bootstrap and nsx-node-agent DaemonSet 41 | # 42 | -------------------------------------------------------------------------------- /deploy/kubernetes/operator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nsx-ncp-operator 5 | namespace: nsx-system-operator 6 | labels: 7 | tier: nsx-networking 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | name: nsx-ncp-operator 13 | template: 14 | metadata: 15 | labels: 16 | name: nsx-ncp-operator 17 | tier: nsx-networking 18 | spec: 19 | hostNetwork: true 20 | serviceAccountName: nsx-ncp-operator 21 | tolerations: 22 | - effect: NoSchedule 23 | key: node-role.kubernetes.io/master 24 | - effect: NoSchedule 25 | key: node-role.kubernetes.io/control-plane 26 | - effect: NoSchedule 27 | key: node.kubernetes.io/not-ready 28 | - effect: NoSchedule 29 | key: node.kubernetes.io/network-unavailable 30 | volumes: 31 | - hostPath: {path: /etc/os-release} 32 | name: host-os-release 33 | containers: 34 | - name: nsx-ncp-operator 35 | image: docker.io/vmware/nsx-container-plugin-operator 36 | command: ["/bin/bash", "-c", "nsx-ncp-operator --zap-time-encoding=iso8601 --metrics-server-bind-address=:8181"] 37 | volumeMounts: 38 | - {mountPath: /host/etc/os-release, name: host-os-release} 39 | imagePullPolicy: IfNotPresent 40 | env: 41 | - name: POD_NAME 42 | valueFrom: 43 | fieldRef: 44 | fieldPath: metadata.name 45 | - name: OPERATOR_NAME 46 | value: "nsx-ncp-operator" 47 | - name: NCP_IMAGE 48 | value: "nsx-ncp:latest" 49 | - name: WATCH_NAMESPACE 50 | value: "nsx-system-operator" 51 | -------------------------------------------------------------------------------- /deploy/kubernetes/role.yaml: -------------------------------------------------------------------------------- 1 | kind: ClusterRole 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: nsx-ncp-operator 5 | rules: 6 | - apiGroups: [''] 7 | resources: [pods, pods/log, pods/exec, configmaps, namespaces, serviceaccounts, secrets, nodes/status] 8 | verbs: [create, get, list, patch, delete, update, watch, deletecollection] 9 | - apiGroups: [apps] 10 | resources: [deployments, daemonsets] 11 | verbs: [create, get, list, patch, delete, update, watch] 12 | - apiGroups: [rbac.authorization.k8s.io] 13 | resources: [clusterroles, clusterrolebindings, roles, rolebindings] 14 | verbs: [create, get, list, patch, update, watch, delete] 15 | - apiGroups: [operator.nsx.vmware.com] 16 | resources: [ncpinstalls, ncpinstalls/status] 17 | verbs: [get, list, watch, patch, update] 18 | # Required by nsx-node-agent 19 | - apiGroups: [''] 20 | resources: [endpoints, services] 21 | verbs: [get, list, watch] 22 | # Required by nsx-ncp 23 | - apiGroups: ['', extensions, networking.k8s.io, apps] 24 | resources: [namespaces, ingresses, services, pods, networkpolicies, routes, statefulsets] 25 | verbs: [get, watch, list, update, patch] 26 | - apiGroups: [nsx.vmware.com] 27 | resources: [nsxerrors, nsxlocks, ncpconfigs] 28 | verbs: [create, get, list, patch, delete, update] 29 | - apiGroups: ['', extensions, networking.k8s.io] 30 | resources: [ingresses/status, services/status, routes/status] 31 | verbs: [replace, update, patch] 32 | - apiGroups: [k8s.cni.cncf.io] 33 | resources: [network-attachment-definitions] 34 | verbs: [get, list, watch] 35 | - apiGroups: [apiextensions.k8s.io] 36 | resources: [customresourcedefinitions] 37 | verbs: [create, get, list, patch, update, watch, delete] 38 | - apiGroups: ['', extensions, networking.k8s.io, apps] 39 | resources: [deployments, endpoints, ingressclasses, nodes, pods/log, replicationcontrollers, secrets, statefulsets] 40 | verbs: [get, list, watch] 41 | - apiGroups: [vmware.com] 42 | resources: [loadbalancers, loadbalancers/status, nsxlbmonitors, nsxlbmonitors/status, virtualnetworkinterfaces, virtualnetworkinterfaces/status, virtualnetworks, virtualnetworks/status] 43 | verbs: [create, get, list, patch, update, watch, delete] 44 | - apiGroups: [policy] 45 | resources: [podsecuritypolicies] 46 | verbs: [create, get, list, patch, update, watch, delete, use] 47 | - apiGroups: [''] 48 | resources: [events] 49 | verbs: [create, get, list, patch, update, delete] 50 | -------------------------------------------------------------------------------- /deploy/kubernetes/role_binding.yaml: -------------------------------------------------------------------------------- 1 | kind: ClusterRoleBinding 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: nsx-ncp-operator 5 | subjects: 6 | - kind: ServiceAccount 7 | name: nsx-ncp-operator 8 | namespace: nsx-system-operator 9 | roleRef: 10 | kind: ClusterRole 11 | name: nsx-ncp-operator 12 | apiGroup: rbac.authorization.k8s.io -------------------------------------------------------------------------------- /deploy/kubernetes/service_account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: nsx-ncp-operator 5 | namespace: nsx-system-operator 6 | -------------------------------------------------------------------------------- /deploy/openshift4/lb-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: {tls.crt: "", tls.key: ""} 3 | kind: Secret 4 | metadata: {name: lb-secret, namespace: nsx-system-operator} 5 | type: kubernetes.io/tls 6 | -------------------------------------------------------------------------------- /deploy/openshift4/namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | labels: {name: nsx-system-operator, openshift.io/run-level: '0'} 5 | name: nsx-system-operator 6 | -------------------------------------------------------------------------------- /deploy/openshift4/nsx-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: {tls.crt: "", tls.key: "", tls.ca: ""} 3 | kind: Secret 4 | metadata: {name: nsx-secret, namespace: nsx-system-operator} 5 | type: kubernetes.io/tls 6 | -------------------------------------------------------------------------------- /deploy/openshift4/operator.nsx.vmware.com_ncpinstalls_crd.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | name: ncpinstalls.operator.nsx.vmware.com 5 | spec: 6 | group: operator.nsx.vmware.com 7 | names: 8 | kind: NcpInstall 9 | listKind: NcpInstallList 10 | plural: ncpinstalls 11 | singular: ncpinstall 12 | scope: Namespaced 13 | versions: 14 | - name: v1 15 | served: true 16 | storage: true 17 | subresources: 18 | status: {} 19 | schema: 20 | openAPIV3Schema: 21 | type: object 22 | description: NcpInstall is the Schema for the ncpinstalls API 23 | properties: 24 | apiVersion: 25 | description: 'APIVersion defines the versioned schema of this representation 26 | of an object. Servers should convert recognized schemas to the latest 27 | internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' 28 | type: string 29 | kind: 30 | description: 'Kind is a string value representing the REST resource this 31 | object represents. Servers may infer this from the endpoint the client 32 | submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' 33 | type: string 34 | metadata: 35 | type: object 36 | spec: 37 | description: NcpInstallSpec defines the desired state of NcpInstall 38 | type: object 39 | properties: 40 | ncpReplicas: 41 | description: the replica numbers of nsx-ncp deployment 42 | type: integer 43 | format: int32 44 | minimum: 0 45 | addNodeTag: 46 | description: 'Tag node logical switch port with node name and cluster when set to true, skip tagging when set to false. 47 | Note that if one node has multiple attached VirtualNetworkInterfaces, this function is not supported and should be set to false.' 48 | type: boolean 49 | nsx-ncp: 50 | description: nsx-ncp defines what properties users can configure for NCP Deployment 51 | type: object 52 | properties: 53 | nodeSelector: 54 | additionalProperties: 55 | type: string 56 | type: object 57 | tolerations: 58 | items: 59 | properties: 60 | effect: 61 | type: string 62 | key: 63 | type: string 64 | operator: 65 | type: string 66 | tolerationSeconds: 67 | format: int64 68 | type: integer 69 | value: 70 | type: string 71 | type: object 72 | type: array 73 | nsx-node-agent: 74 | description: nsx-node-agent defines what properties users can configure for nsx-ncp-bootstrap and nsx-node-agent DaemonSet 75 | type: object 76 | properties: 77 | tolerations: 78 | items: 79 | properties: 80 | effect: 81 | type: string 82 | key: 83 | type: string 84 | operator: 85 | type: string 86 | tolerationSeconds: 87 | format: int64 88 | type: integer 89 | value: 90 | type: string 91 | type: object 92 | type: array 93 | status: 94 | description: NcpInstallStatus defines the observed state of NcpInstall 95 | type: object 96 | properties: 97 | conditions: 98 | description: conditions is a list of conditions and their status 99 | type: array 100 | items: 101 | description: It is just the standard condition fields 102 | type: object 103 | properties: 104 | lastTransitionTime: 105 | description: Last time the condition transit from one status to another 106 | type: string 107 | format: date-time 108 | type: 109 | description: Type of condition 110 | type: string 111 | status: 112 | description: Status of condition, one of 'True', 'False', 'Unknown' 113 | type: string 114 | reason: 115 | description: Brief reason for the condition 116 | type: string 117 | message: 118 | description: Human readable message indicating details 119 | type: string 120 | 121 | -------------------------------------------------------------------------------- /deploy/openshift4/operator.nsx.vmware.com_v1_ncpinstall_cr.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: operator.nsx.vmware.com/v1 2 | kind: NcpInstall 3 | metadata: 4 | name: ncp-install 5 | namespace: nsx-system-operator 6 | spec: 7 | ncpReplicas: 1 8 | # Note that if one node has multiple attached VirtualNetworkInterfaces, this function is not supported and should be set to false 9 | addNodeTag: true 10 | nsx-ncp: 11 | nodeSelector: 12 | # Uncomment below to add user-defined nodeSelector for NCP Deployment 13 | #: 14 | 15 | tolerations: 16 | # Please don't modify below default tolerations for NCP Deployment 17 | - key: node-role.kubernetes.io/master 18 | effect: NoSchedule 19 | - key: node-role.kubernetes.io/control-plane 20 | effect: NoSchedule 21 | - key: node.kubernetes.io/network-unavailable 22 | effect: NoSchedule 23 | # Uncomment below to add user-defined tolerations for NCP Deployment 24 | # 25 | 26 | nsx-node-agent: 27 | tolerations: 28 | # Please don't modify below default tolerations 29 | # for nsx-ncp-bootstrap and nsx-node-agent DaemonSet 30 | - key: node-role.kubernetes.io/master 31 | effect: NoSchedule 32 | - key: node-role.kubernetes.io/control-plane 33 | effect: NoSchedule 34 | - key: node.kubernetes.io/not-ready 35 | effect: NoSchedule 36 | - key: node.kubernetes.io/unreachable 37 | effect: NoSchedule 38 | - operator: Exists 39 | effect: NoExecute 40 | # Uncomment below to add user-defined tolerations for nsx-ncp-bootstrap and nsx-node-agent DaemonSet 41 | # 42 | -------------------------------------------------------------------------------- /deploy/openshift4/operator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nsx-ncp-operator 5 | namespace: nsx-system-operator 6 | labels: 7 | tier: nsx-networking 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | name: nsx-ncp-operator 13 | template: 14 | metadata: 15 | labels: 16 | name: nsx-ncp-operator 17 | tier: nsx-networking 18 | spec: 19 | hostNetwork: true 20 | serviceAccountName: nsx-ncp-operator 21 | tolerations: 22 | - effect: NoSchedule 23 | key: node-role.kubernetes.io/master 24 | - effect: NoSchedule 25 | key: node-role.kubernetes.io/control-plane 26 | - effect: NoSchedule 27 | key: node.kubernetes.io/not-ready 28 | - effect: NoSchedule 29 | key: node.kubernetes.io/network-unavailable 30 | volumes: 31 | - hostPath: {path: /etc/os-release} 32 | name: host-os-release 33 | containers: 34 | - name: nsx-ncp-operator 35 | image: docker.io/vmware/nsx-container-plugin-operator 36 | command: ["/bin/bash", "-c", "nsx-ncp-operator --zap-time-encoding=iso8601 --metrics-server-bind-address=:8181"] 37 | volumeMounts: 38 | - {mountPath: /host/etc/os-release, name: host-os-release} 39 | imagePullPolicy: IfNotPresent 40 | env: 41 | - name: POD_NAME 42 | valueFrom: 43 | fieldRef: 44 | fieldPath: metadata.name 45 | - name: OPERATOR_NAME 46 | value: "nsx-ncp-operator" 47 | - name: NCP_IMAGE 48 | value: "nsx-ncp:latest" 49 | - name: WATCH_NAMESPACE 50 | value: "nsx-system-operator" 51 | -------------------------------------------------------------------------------- /deploy/openshift4/role.yaml: -------------------------------------------------------------------------------- 1 | kind: ClusterRole 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: nsx-ncp-operator 5 | rules: 6 | - apiGroups: [''] 7 | resources: [pods, configmaps, namespaces, serviceaccounts, secrets, nodes/status] 8 | verbs: [create, get, list, patch, delete, update, watch, deletecollection] 9 | - apiGroups: [''] 10 | resources: [nodes] 11 | verbs: [get, list, watch] 12 | - apiGroups: [config.openshift.io] 13 | resources: [networks, networks/finalizers] 14 | verbs: [get, list, watch, patch, update] 15 | - apiGroups: [apps] 16 | resources: [deployments, daemonsets] 17 | verbs: [create, get, list, patch, delete, update, watch] 18 | - apiGroups: [config.openshift.io, apiextensions.k8s.io] 19 | resources: [clusteroperators, clusteroperators/status, customresourcedefinitions] 20 | verbs: [create, get, list, patch, update, watch, delete] 21 | - apiGroups: [rbac.authorization.k8s.io] 22 | resources: [clusterroles, clusterrolebindings, roles, rolebindings] 23 | verbs: [create, get, list, patch, update, watch, delete] 24 | - apiGroups: [operator.nsx.vmware.com] 25 | resources: [ncpinstalls, ncpinstalls/status] 26 | verbs: [get, list, watch, patch, update] 27 | # Required by nsx-node-agent 28 | - apiGroups: [''] 29 | resources: [endpoints, services] 30 | verbs: [get, list, watch] 31 | # Required by nsx-ncp 32 | - apiGroups: ['', extensions, networking.k8s.io, config.openshift.io, apps] 33 | resources: [deployments, endpoints, pods/log, nodes, replicationcontrollers, secrets, 34 | ingressclasses, routes, network, networks, statefulsets] 35 | verbs: [get, watch, list] 36 | - apiGroups: [route.openshift.io] 37 | resources: [routes] 38 | verbs: [get, watch, list] 39 | - apiGroups: ['', extensions, networking.k8s.io, apps] 40 | resources: [namespaces, ingresses, services, pods, networkpolicies, routes, statefulsets] 41 | verbs: [get, watch, list, update, patch] 42 | - apiGroups: [nsx.vmware.com] 43 | resources: [nsxerrors, nsxlocks, ncpconfigs] 44 | verbs: [create, get, list, patch, delete, update] 45 | - apiGroups: ['', extensions, networking.k8s.io] 46 | resources: [ingresses/status, services/status, routes/status] 47 | verbs: [replace, update, patch] 48 | - apiGroups: [config.openshift.io] 49 | resources: [networks] 50 | verbs: [patch] 51 | - apiGroups: [route.openshift.io] 52 | resources: [routes, routes/status] 53 | verbs: [patch, update] 54 | - apiGroups: [operator.openshift.io] 55 | resources: [ingresscontrollers] 56 | verbs: [list] 57 | - apiGroups: [k8s.cni.cncf.io] 58 | resources: [network-attachment-definitions] 59 | verbs: [get, list, watch] 60 | - apiGroups: [vmware.com] 61 | resources: [loadbalancers, loadbalancers/status, nsxlbmonitors, nsxlbmonitors/status, virtualnetworkinterfaces, virtualnetworkinterfaces/status, virtualnetworks, virtualnetworks/status] 62 | verbs: [create, get, list, patch, update, watch, delete] 63 | - apiGroups: [''] 64 | resources: [pods/exec] 65 | verbs: [get, create] 66 | - apiGroups: [''] 67 | resources: [events] 68 | verbs: [create, get, list, patch, update, delete] 69 | -------------------------------------------------------------------------------- /deploy/openshift4/role_binding.yaml: -------------------------------------------------------------------------------- 1 | kind: ClusterRoleBinding 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: nsx-ncp-operator 5 | subjects: 6 | - kind: ServiceAccount 7 | name: nsx-ncp-operator 8 | namespace: nsx-system-operator 9 | roleRef: 10 | kind: ClusterRole 11 | name: nsx-ncp-operator 12 | apiGroup: rbac.authorization.k8s.io -------------------------------------------------------------------------------- /deploy/openshift4/service_account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: nsx-ncp-operator 5 | namespace: nsx-system-operator 6 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/vmware/nsx-container-plugin-operator 2 | 3 | go 1.23.1 4 | 5 | require ( 6 | github.com/ghodss/yaml v1.0.0 7 | github.com/imdario/mergo v0.3.7 8 | github.com/openshift/api v3.9.1-0.20191111211345-a27ff30ebf09+incompatible 9 | github.com/openshift/cluster-network-operator v0.0.0-20200505233431-0c44782d5245 10 | github.com/openshift/library-go v0.0.0-20200511081854-8db3781f6d14 11 | github.com/operator-framework/operator-sdk v0.17.1-0.20200428043048-cb85478660f0 12 | github.com/pkg/errors v0.9.1 13 | github.com/sirupsen/logrus v1.6.0 14 | github.com/spf13/pflag v1.0.5 15 | github.com/stretchr/testify v1.4.0 16 | github.com/vmware/go-vmware-nsxt v0.0.0-20201207175959-23201aae9cc3 17 | github.com/vmware/vsphere-automation-sdk-go/runtime v0.7.0 18 | github.com/vmware/vsphere-automation-sdk-go/services/nsxt v0.12.0 19 | gopkg.in/ini.v1 v1.51.0 20 | k8s.io/api v0.18.2 21 | k8s.io/apimachinery v0.18.3 22 | k8s.io/client-go v12.0.0+incompatible 23 | k8s.io/kubectl v0.17.4 24 | sigs.k8s.io/controller-runtime v0.5.2 25 | ) 26 | 27 | require ( 28 | cloud.google.com/go v0.49.0 // indirect 29 | github.com/Azure/go-autorest/autorest v0.9.3-0.20191028180845-3492b2aff503 // indirect 30 | github.com/Azure/go-autorest/autorest/adal v0.8.1-0.20191028180845-3492b2aff503 // indirect 31 | github.com/Azure/go-autorest/autorest/date v0.2.0 // indirect 32 | github.com/Azure/go-autorest/logger v0.1.0 // indirect 33 | github.com/Azure/go-autorest/tracing v0.5.0 // indirect 34 | github.com/Masterminds/goutils v1.1.1 // indirect 35 | github.com/Masterminds/semver v1.5.0 // indirect 36 | github.com/Masterminds/sprig v2.22.0+incompatible // indirect 37 | github.com/antihax/optional v1.0.0 // indirect 38 | github.com/beevik/etree v1.1.0 // indirect 39 | github.com/beorn7/perks v1.0.1 // indirect 40 | github.com/cespare/xxhash/v2 v2.1.1 // indirect 41 | github.com/davecgh/go-spew v1.1.1 // indirect 42 | github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect 43 | github.com/evanphx/json-patch v4.9.0+incompatible // indirect 44 | github.com/gibson042/canonicaljson-go v1.0.3 // indirect 45 | github.com/go-logr/logr v0.1.0 // indirect 46 | github.com/go-logr/zapr v0.1.1 // indirect 47 | github.com/gogo/protobuf v1.3.2 // indirect 48 | github.com/golang-jwt/jwt/v4 v4.5.2 // indirect 49 | github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9 // indirect 50 | github.com/golang/protobuf v1.5.0 // indirect 51 | github.com/google/go-cmp v0.5.5 // indirect 52 | github.com/google/gofuzz v1.1.0 // indirect 53 | github.com/google/uuid v1.2.0 // indirect 54 | github.com/googleapis/gnostic v0.3.1 // indirect 55 | github.com/gophercloud/gophercloud v0.6.0 // indirect 56 | github.com/hashicorp/golang-lru v0.5.3 // indirect 57 | github.com/huandu/xstrings v1.2.0 // indirect 58 | github.com/json-iterator/go v1.1.11 // indirect 59 | github.com/konsorten/go-windows-terminal-sequences v1.0.3 // indirect 60 | github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect 61 | github.com/mitchellh/copystructure v1.0.0 // indirect 62 | github.com/mitchellh/reflectwalk v1.0.0 // indirect 63 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 64 | github.com/modern-go/reflect2 v1.0.1 // indirect 65 | github.com/pmezard/go-difflib v1.0.0 // indirect 66 | github.com/prometheus/client_golang v1.11.1 // indirect 67 | github.com/prometheus/client_model v0.2.0 // indirect 68 | github.com/prometheus/common v0.26.0 // indirect 69 | github.com/prometheus/procfs v0.6.0 // indirect 70 | github.com/vmware/vsphere-automation-sdk-go/lib v0.7.0 // indirect 71 | go.uber.org/atomic v1.6.0 // indirect 72 | go.uber.org/multierr v1.5.0 // indirect 73 | go.uber.org/zap v1.14.1 // indirect 74 | golang.org/x/crypto v0.36.0 // indirect 75 | golang.org/x/net v0.38.0 // indirect 76 | golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 // indirect 77 | golang.org/x/sys v0.31.0 // indirect 78 | golang.org/x/term v0.30.0 // indirect 79 | golang.org/x/text v0.23.0 // indirect 80 | golang.org/x/time v0.0.0-20191024005414-555d28b269f0 // indirect 81 | gomodules.xyz/jsonpatch/v2 v2.0.1 // indirect 82 | google.golang.org/appengine v1.6.5 // indirect 83 | google.golang.org/protobuf v1.33.0 // indirect 84 | gopkg.in/fsnotify.v1 v1.4.7 // indirect 85 | gopkg.in/inf.v0 v0.9.1 // indirect 86 | gopkg.in/yaml.v2 v2.3.0 // indirect 87 | k8s.io/component-base v0.18.3 // indirect 88 | k8s.io/klog v1.0.0 // indirect 89 | k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 // indirect 90 | k8s.io/kube-proxy v0.18.3 // indirect 91 | k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 // indirect 92 | sigs.k8s.io/yaml v1.2.0 // indirect 93 | ) 94 | 95 | replace ( 96 | github.com/Azure/go-autorest => github.com/Azure/go-autorest v13.3.2+incompatible // Required by OLM 97 | github.com/Masterminds/goutils => github.com/Masterminds/goutils v1.1.1 98 | github.com/gogo/protobuf => github.com/gogo/protobuf v1.3.2 99 | github.com/openshift/api => github.com/openshift/api v0.0.0-20200413201024-c6e8c9b6eb9a // Required by network CRD API 100 | k8s.io/apimachinery => k8s.io/apimachinery v0.17.1 // Replaced by MCO/CRI-O 101 | k8s.io/client-go => k8s.io/client-go v0.17.4 // Required by prometheus-operator 102 | ) 103 | -------------------------------------------------------------------------------- /hack/certify-operator-ocp.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -x 2 | set -eo pipefail 3 | 4 | function cleanup { 5 | $CONTAINER_TOOL image rm -f quay.io/opdev/preflight:stable 6 | } 7 | 8 | trap cleanup EXIT 9 | 10 | CONTAINER_TOOL=${CONTAINER_TOOL:-docker} 11 | CONTAINER_REGISTRY=${CONTAINER_REGISTRY:-quay.io} 12 | 13 | $CONTAINER_TOOL run \ 14 | --rm \ 15 | --security-opt=label=disable \ 16 | --env PFLT_LOGLEVEL=trace \ 17 | --env PFLT_CERTIFICATION_PROJECT_ID=$PFLT_CERTIFICATION_PROJECT_ID \ 18 | --env PFLT_PYXIS_API_TOKEN=$PFLT_PYXIS_API_TOKEN \ 19 | quay.io/opdev/preflight:stable check container -s docker.io/vmware/nsx-container-plugin-operator:$VERSION 20 | 21 | exit 0 22 | -------------------------------------------------------------------------------- /hack/get-kustomize.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Copyright © 2021 VMware, Inc. All Rights Reserved. 4 | # SPDX-License-Identifier: Apache-2.0 5 | 6 | THIS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 7 | KUSTOMIZE_VERSION="v3.9.1" 8 | _BINDIR=$THIS_DIR/.bin 9 | 10 | # Check if you have kustomize installed, or download it. 11 | # please note that different versions of kustomize can give different results 12 | check_or_install_kustomize() { 13 | # Check if there is already a kustomize binary in $_BINDIR and if yes, check 14 | # if the version matches the expected one. 15 | local kustomize="$(PATH=$_BINDIR command -v kustomize)" 16 | if [ -x "$kustomize" ]; then 17 | local kustomize_version="$($kustomize version --short)" 18 | # Should work with following styles: 19 | # - kustomize/v3.3.0 20 | # - {kustomize/v3.8.2 2020-08-29T17:44:01Z } 21 | kustomize_version="${kustomize_version##*/}" 22 | kustomize_version="${kustomize_version%% *}" 23 | if [ "${kustomize_version}" == "${KUSTOMIZE_VERSION}" ]; then 24 | # If version is exact match, stop here. 25 | >&2 echo "Found "$kustomize" version "$kustomize_version 26 | echo "$kustomize" 27 | return 0 28 | fi 29 | # If we are here kustomize version isn't the right one 30 | >&2 echo "Found "$kustomize" version "$kustomize_version", expecting "$KUSTOMIZE_VERSION". Installing desired version" 31 | fi 32 | local kustomize_url="https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize/${KUSTOMIZE_VERSION}/kustomize_${KUSTOMIZE_VERSION}_linux_amd64.tar.gz" 33 | curl -sLo kustomize.tar.gz "${kustomize_url}" || return 1 34 | mkdir -p $_BINDIR || return 1 35 | tar -xzf kustomize.tar.gz -C "$_BINDIR" || return 1 36 | kustomize=$_BINDIR/kustomize 37 | rm -f kustomize.tar.gz 38 | echo $kustomize 39 | return 0 40 | } 41 | -------------------------------------------------------------------------------- /hack/prepare-assets.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Copyright © 2021 VMware, Inc. All Rights Reserved. 4 | # SPDX-License-Identifier: Apache-2.0 5 | 6 | # Usage: VERSION=v1.0.0 ./prepare-assets.sh 7 | 8 | set -eo pipefail 9 | 10 | function echoerr { 11 | >&2 echo "$@" 12 | exit 1 13 | } 14 | 15 | if [ -z "$VERSION" ]; then 16 | echoerr "Environment variable VERSION must be set" 17 | fi 18 | 19 | if [ -z "$1" ]; then 20 | echoerr "Argument required: output directory for assets" 21 | fi 22 | 23 | THIS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 24 | pushd $THIS_DIR/.. > /dev/null 25 | 26 | source ./hack/get-kustomize.sh 27 | kustomize=$(check_or_install_kustomize) 28 | 29 | mkdir -p "$1" 30 | OUTPUT_DIR=$(cd "$1" && pwd) 31 | 32 | OPERATOR_IMG_NAME="vmware/nsx-container-plugin-operator" 33 | OPERATOR_PLATFORMS=( 34 | "openshift4" 35 | "kubernetes" 36 | ) 37 | 38 | for platform in "${OPERATOR_PLATFORMS[@]}"; do 39 | mkdir -p ${OUTPUT_DIR}/${platform} 40 | cp deploy/${platform}/*.yaml ${OUTPUT_DIR}/${platform} 41 | pushd ${OUTPUT_DIR} > /dev/null 42 | pushd ${platform} > /dev/null 43 | # erase anything that might already be in the kustomization file 44 | echo "" > kustomization.yaml 45 | $kustomize edit add base operator.yaml 46 | $kustomize edit set image ${OPERATOR_IMG_NAME}:${VERSION} 47 | $kustomize build > operator_tmp.yaml 48 | mv operator_tmp.yaml operator.yaml 49 | rm kustomization.yaml 50 | popd > /dev/null 51 | tar czf ${platform}.tar.gz ${platform}/*.yaml 52 | rm -rf ${platform} 53 | popd > /dev/null 54 | done 55 | 56 | ls "$OUTPUT_DIR" | cat 57 | -------------------------------------------------------------------------------- /olm-catalog/0.1.0/operator.nsx.vmware.com_ncpinstalls_crd.yaml: -------------------------------------------------------------------------------- 1 | ../../deploy/openshift4/operator.nsx.vmware.com_ncpinstalls_crd.yaml -------------------------------------------------------------------------------- /olm-catalog/make_zip_bundle.py: -------------------------------------------------------------------------------- 1 | import os 2 | import os.path 3 | import sys 4 | 5 | try: 6 | import yaml 7 | except ImportError: 8 | print("Please install pyyaml") 9 | sys.exit(1) 10 | 11 | import zipfile 12 | 13 | 14 | def _read_yaml(manifest_path): 15 | with open(manifest_path) as f: 16 | try: 17 | return yaml.load(f) 18 | except Exception as e: 19 | print("Unable to parse yaml data in %s: %s" % (manifest_path, e)) 20 | sys.exit(1) 21 | 22 | 23 | def parse_package_manifest(manifest_path): 24 | try: 25 | data = _read_yaml(manifest_path) 26 | except Exception as e: 27 | print("Unable to read file %s: %s" % (manifest_path, e)) 28 | sys.exit(1) 29 | try: 30 | channels = data.get('channels', []) 31 | except AttributeError: 32 | print("Parsed YAML is not a dict: %s" % data) 33 | sys.exit(1) 34 | for channel in channels: 35 | if channel.get('name') == 'alpha': 36 | currentCSV = channel['currentCSV'] 37 | break 38 | try: 39 | # by convention the version starts with a 'v', we only want 40 | # the actual version number 41 | version = currentCSV.split('.', 1)[1][1:] 42 | except IndexError: 43 | print("Cannot find version in current CSV name: %s" % currentCSV) 44 | sys.exit(1) 45 | return version 46 | 47 | 48 | def make_zip_bundle(manifest_file, version, zip_file): 49 | bundle_files = [f for f in os.listdir(version) 50 | if os.path.isfile(os.path.join(version, f))] 51 | with zipfile.ZipFile(zip_file, 'w') as bundle: 52 | bundle.write(manifest_file) 53 | for bundle_file in bundle_files: 54 | bundle.write("%s/%s" % (version, bundle_file), 55 | arcname=bundle_file) 56 | print("Zip bundle %s ready" % zip_file) 57 | 58 | 59 | def main(): 60 | if len(sys.argv) < 2: 61 | print("Missing package manifest path") 62 | sys.exit(1) 63 | version = parse_package_manifest(sys.argv[1]) 64 | if len(sys.argv) > 2: 65 | zip_file = sys.argv[2] 66 | else: 67 | zip_file = 'nsx-ncp-operator-bundle.zip' 68 | make_zip_bundle(sys.argv[1], version, zip_file) 69 | 70 | if __name__ == '__main__': 71 | main() 72 | -------------------------------------------------------------------------------- /olm-catalog/nsx-container-plugin-operator.package.yaml: -------------------------------------------------------------------------------- 1 | channels: 2 | - currentCSV: nsx-container-plugin-operator.v0.2.0 3 | name: alpha 4 | defaultChannel: alpha 5 | packageName: nsx-container-plugin-operator 6 | -------------------------------------------------------------------------------- /pkg/apis/addtoscheme_operator_v1.go: -------------------------------------------------------------------------------- 1 | package apis 2 | 3 | import ( 4 | v1 "github.com/vmware/nsx-container-plugin-operator/pkg/apis/operator/v1" 5 | ) 6 | 7 | func init() { 8 | // Register the types with the Scheme so the components can map objects to GroupVersionKinds and back 9 | AddToSchemes = append(AddToSchemes, v1.SchemeBuilder.AddToScheme) 10 | } 11 | -------------------------------------------------------------------------------- /pkg/apis/apis.go: -------------------------------------------------------------------------------- 1 | /* Copyright © 2020 VMware, Inc. All Rights Reserved. 2 | SPDX-License-Identifier: Apache-2.0 */ 3 | 4 | package apis 5 | 6 | import ( 7 | "k8s.io/apimachinery/pkg/runtime" 8 | ) 9 | 10 | // AddToSchemes may be used to add all resources defined in the project to a Scheme 11 | var AddToSchemes runtime.SchemeBuilder 12 | 13 | // AddToScheme adds all Resources to the Scheme 14 | func AddToScheme(s *runtime.Scheme) error { 15 | return AddToSchemes.AddToScheme(s) 16 | } 17 | -------------------------------------------------------------------------------- /pkg/apis/operator/group.go: -------------------------------------------------------------------------------- 1 | // Package operator contains operator API versions. 2 | // 3 | // This file ensures Go source parsers acknowledge the operator package 4 | // and any child packages. It can be removed if any other Go source files are 5 | // added to this package. 6 | package operator 7 | -------------------------------------------------------------------------------- /pkg/apis/operator/v1/doc.go: -------------------------------------------------------------------------------- 1 | // Package v1 contains API Schema definitions for the operator v1 API group 2 | // +k8s:deepcopy-gen=package,register 3 | // +groupName=operator.nsx.vmware.com 4 | package v1 5 | -------------------------------------------------------------------------------- /pkg/apis/operator/v1/ncpinstall_types.go: -------------------------------------------------------------------------------- 1 | package v1 2 | 3 | import ( 4 | configv1 "github.com/openshift/api/config/v1" 5 | corev1 "k8s.io/api/core/v1" 6 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 7 | ) 8 | 9 | // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. 10 | 11 | // NcpInstallSpec defines the desired state of NcpInstall 12 | type NcpInstallSpec struct { 13 | // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster 14 | // Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file 15 | // Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html 16 | 17 | // Replicas number for nsx-ncp deployment 18 | // Operator will ignore the value if NCP HA is deactivated 19 | // +kubebuilder:validation:Minimum=1 20 | // +optional 21 | NcpReplicas int32 `json:"ncpReplicas,omitempty"` 22 | // For tagging node logical switch ports with node name and cluster 23 | // Note that if one node has multiple attached VirtualNetworkInterfaces, this function is not supported and should be set to false. 24 | AddNodeTag bool `json:"addNodeTag,omitempty"` 25 | // For configuring nsx-ncp Deployment properties 26 | NsxNcpSpec NsxNcpDeploymentSpec `json:"nsx-ncp,omitempty"` 27 | // For configuring nsx-ncp-bootstrap and nsx-node-agent DaemonSet properties 28 | NsxNodeAgentDsSpec NsxNodeAgentDaemonSetSpec `json:"nsx-node-agent,omitempty"` 29 | } 30 | 31 | // NsxNcpDeploymentSpec define user configured properties for NCP Deployment 32 | type NsxNcpDeploymentSpec struct { 33 | NodeSelector map[string]string `json:"nodeSelector,omitempty"` 34 | Tolerations []corev1.Toleration `json:"tolerations,omitempty"` 35 | } 36 | 37 | // NsxNodeAgentDaemonSetSpec define user configured properties for nsx-ncp-bootstrap and nsx-node-agent DaemonSet 38 | type NsxNodeAgentDaemonSetSpec struct { 39 | Tolerations []corev1.Toleration `json:"tolerations,omitempty"` 40 | } 41 | 42 | // NcpInstallStatus defines the observed state of NcpInstall 43 | type NcpInstallStatus struct { 44 | // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster 45 | // Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file 46 | // Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html 47 | 48 | // Conditions describes the state of NCP installation. 49 | // +optional 50 | Conditions []InstallCondition `json:"conditions,omitempty"` 51 | } 52 | 53 | type InstallCondition = configv1.ClusterOperatorStatusCondition 54 | 55 | // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object 56 | 57 | // NcpInstall is the Schema for the ncpinstalls API 58 | // +kubebuilder:subresource:status 59 | // +kubebuilder:resource:path=ncpinstalls,scope=Namespaced 60 | type NcpInstall struct { 61 | metav1.TypeMeta `json:",inline"` 62 | metav1.ObjectMeta `json:"metadata,omitempty"` 63 | 64 | Spec NcpInstallSpec `json:"spec,omitempty"` 65 | Status NcpInstallStatus `json:"status,omitempty"` 66 | } 67 | 68 | // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object 69 | 70 | // NcpInstallList contains a list of NcpInstall 71 | type NcpInstallList struct { 72 | metav1.TypeMeta `json:",inline"` 73 | metav1.ListMeta `json:"metadata,omitempty"` 74 | Items []NcpInstall `json:"items"` 75 | } 76 | 77 | func init() { 78 | SchemeBuilder.Register(&NcpInstall{}, &NcpInstallList{}) 79 | } 80 | -------------------------------------------------------------------------------- /pkg/apis/operator/v1/register.go: -------------------------------------------------------------------------------- 1 | // NOTE: Boilerplate only. Ignore this file. 2 | 3 | // Package v1 contains API Schema definitions for the operator v1 API group 4 | // +k8s:deepcopy-gen=package,register 5 | // +groupName=operator.nsx.vmware.com 6 | package v1 7 | 8 | import ( 9 | "k8s.io/apimachinery/pkg/runtime/schema" 10 | "sigs.k8s.io/controller-runtime/pkg/scheme" 11 | ) 12 | 13 | var ( 14 | // SchemeGroupVersion is group version used to register these objects 15 | SchemeGroupVersion = schema.GroupVersion{Group: "operator.nsx.vmware.com", Version: "v1"} 16 | 17 | // SchemeBuilder is used to add go types to the GroupVersionKind scheme 18 | SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} 19 | ) 20 | -------------------------------------------------------------------------------- /pkg/apis/operator/v1/zz_generated.deepcopy.go: -------------------------------------------------------------------------------- 1 | // +build !ignore_autogenerated 2 | 3 | // Code generated by operator-sdk. DO NOT EDIT. 4 | 5 | package v1 6 | 7 | import ( 8 | configv1 "github.com/openshift/api/config/v1" 9 | runtime "k8s.io/apimachinery/pkg/runtime" 10 | ) 11 | 12 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 13 | func (in *NcpInstall) DeepCopyInto(out *NcpInstall) { 14 | *out = *in 15 | out.TypeMeta = in.TypeMeta 16 | in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) 17 | out.Spec = in.Spec 18 | in.Status.DeepCopyInto(&out.Status) 19 | return 20 | } 21 | 22 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NcpInstall. 23 | func (in *NcpInstall) DeepCopy() *NcpInstall { 24 | if in == nil { 25 | return nil 26 | } 27 | out := new(NcpInstall) 28 | in.DeepCopyInto(out) 29 | return out 30 | } 31 | 32 | // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. 33 | func (in *NcpInstall) DeepCopyObject() runtime.Object { 34 | if c := in.DeepCopy(); c != nil { 35 | return c 36 | } 37 | return nil 38 | } 39 | 40 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 41 | func (in *NcpInstallList) DeepCopyInto(out *NcpInstallList) { 42 | *out = *in 43 | out.TypeMeta = in.TypeMeta 44 | in.ListMeta.DeepCopyInto(&out.ListMeta) 45 | if in.Items != nil { 46 | in, out := &in.Items, &out.Items 47 | *out = make([]NcpInstall, len(*in)) 48 | for i := range *in { 49 | (*in)[i].DeepCopyInto(&(*out)[i]) 50 | } 51 | } 52 | return 53 | } 54 | 55 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NcpInstallList. 56 | func (in *NcpInstallList) DeepCopy() *NcpInstallList { 57 | if in == nil { 58 | return nil 59 | } 60 | out := new(NcpInstallList) 61 | in.DeepCopyInto(out) 62 | return out 63 | } 64 | 65 | // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. 66 | func (in *NcpInstallList) DeepCopyObject() runtime.Object { 67 | if c := in.DeepCopy(); c != nil { 68 | return c 69 | } 70 | return nil 71 | } 72 | 73 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 74 | func (in *NcpInstallSpec) DeepCopyInto(out *NcpInstallSpec) { 75 | *out = *in 76 | return 77 | } 78 | 79 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NcpInstallSpec. 80 | func (in *NcpInstallSpec) DeepCopy() *NcpInstallSpec { 81 | if in == nil { 82 | return nil 83 | } 84 | out := new(NcpInstallSpec) 85 | in.DeepCopyInto(out) 86 | return out 87 | } 88 | 89 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 90 | func (in *NcpInstallStatus) DeepCopyInto(out *NcpInstallStatus) { 91 | *out = *in 92 | if in.Conditions != nil { 93 | in, out := &in.Conditions, &out.Conditions 94 | *out = make([]configv1.ClusterOperatorStatusCondition, len(*in)) 95 | for i := range *in { 96 | (*in)[i].DeepCopyInto(&(*out)[i]) 97 | } 98 | } 99 | return 100 | } 101 | 102 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NcpInstallStatus. 103 | func (in *NcpInstallStatus) DeepCopy() *NcpInstallStatus { 104 | if in == nil { 105 | return nil 106 | } 107 | out := new(NcpInstallStatus) 108 | in.DeepCopyInto(out) 109 | return out 110 | } 111 | -------------------------------------------------------------------------------- /pkg/controller/add_configmap.go: -------------------------------------------------------------------------------- 1 | /* Copyright © 2020 VMware, Inc. All Rights Reserved. 2 | SPDX-License-Identifier: Apache-2.0 */ 3 | 4 | package controller 5 | 6 | import ( 7 | "github.com/vmware/nsx-container-plugin-operator/pkg/controller/configmap" 8 | ) 9 | 10 | func init() { 11 | // AddToManagerFuncs is a list of functions to create controllers and add them to a manager. 12 | AddToManagerFuncs = append(AddToManagerFuncs, configmap.Add) 13 | } 14 | -------------------------------------------------------------------------------- /pkg/controller/add_node.go: -------------------------------------------------------------------------------- 1 | /* Copyright © 2020 VMware, Inc. All Rights Reserved. 2 | SPDX-License-Identifier: Apache-2.0 */ 3 | 4 | package controller 5 | 6 | import ( 7 | "github.com/vmware/nsx-container-plugin-operator/pkg/controller/node" 8 | ) 9 | 10 | func init() { 11 | // AddToManagerFuncs is a list of functions to create controllers and add them to a manager. 12 | AddToManagerFuncs = append(AddToManagerFuncs, node.Add) 13 | } 14 | -------------------------------------------------------------------------------- /pkg/controller/add_pod.go: -------------------------------------------------------------------------------- 1 | /* Copyright © 2020 VMware, Inc. All Rights Reserved. 2 | SPDX-License-Identifier: Apache-2.0 */ 3 | 4 | package controller 5 | 6 | import ( 7 | "github.com/vmware/nsx-container-plugin-operator/pkg/controller/pod" 8 | ) 9 | 10 | func init() { 11 | // AddToManagerFuncs is a list of functions to create controllers and add them to a manager. 12 | AddToManagerFuncs = append(AddToManagerFuncs, pod.Add) 13 | } 14 | -------------------------------------------------------------------------------- /pkg/controller/configmap/configmap_controller_test.go: -------------------------------------------------------------------------------- 1 | /* Copyright © 2020 VMware, Inc. All Rights Reserved. 2 | SPDX-License-Identifier: Apache-2.0 */ 3 | 4 | package configmap 5 | 6 | import ( 7 | "context" 8 | "os" 9 | "testing" 10 | 11 | "github.com/stretchr/testify/assert" 12 | appsv1 "k8s.io/api/apps/v1" 13 | corev1 "k8s.io/api/core/v1" 14 | "k8s.io/apimachinery/pkg/api/errors" 15 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 16 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 17 | "k8s.io/apimachinery/pkg/types" 18 | "sigs.k8s.io/controller-runtime/pkg/client/fake" 19 | ) 20 | 21 | func TestConfigMapController_deleteExistingPods(t *testing.T) { 22 | c := fake.NewFakeClient() 23 | // Create a pod without label 24 | ncpPod := &corev1.Pod{ 25 | ObjectMeta: metav1.ObjectMeta{ 26 | Name: "nsx-ncp", 27 | Namespace: "nsx-system", 28 | }, 29 | } 30 | c.Create(context.TODO(), ncpPod) 31 | deleteExistingPods(c, "nsx-system") 32 | obj := &corev1.Pod{} 33 | namespacedName := types.NamespacedName{ 34 | Name: "nsx-ncp", 35 | Namespace: "nsx-system", 36 | } 37 | err := c.Get(context.TODO(), namespacedName, obj) 38 | if err != nil { 39 | t.Fatalf("failed to get ncp pod") 40 | } 41 | 42 | // Update pod with label 43 | ncpPod = &corev1.Pod{ 44 | ObjectMeta: metav1.ObjectMeta{ 45 | Name: "nsx-ncp", 46 | Namespace: "nsx-system", 47 | Labels: map[string]string{ 48 | "component": "nsx-ncp", 49 | }, 50 | }, 51 | } 52 | c.Update(context.TODO(), ncpPod) 53 | deleteExistingPods(c, "nsx-ncp") 54 | obj = &corev1.Pod{} 55 | err = c.Get(context.TODO(), namespacedName, obj) 56 | if !errors.IsNotFound(err) { 57 | t.Fatalf("failed to delete ncp pod") 58 | } 59 | } 60 | 61 | func TestConfigMapController_patchObjSpecAnnotations(t *testing.T) { 62 | nsxNameSpaceName := "nsx-system" 63 | testname := "test-name" 64 | var err error 65 | 66 | // Patch obj without template case 67 | obj := &unstructured.Unstructured{ 68 | Object: map[string]interface{}{ 69 | "apiVersion": "apps/v1", 70 | "kind": "Deployment", 71 | "metadata": map[string]interface{}{ 72 | "name": "test-name", 73 | "namespace": nsxNameSpaceName, 74 | }, 75 | "spec": map[string]interface{}{ 76 | "replicas": 2, 77 | "selector": map[string]interface{}{ 78 | "matchLabels": map[string]interface{}{ 79 | "app": "demo", 80 | }, 81 | }, 82 | }, 83 | }, 84 | } 85 | 86 | err = patchObjSpecAnnotations(obj, testname) 87 | assert.True(t, err != nil) 88 | 89 | // Patch obj with template emtpy case 90 | obj = &unstructured.Unstructured{ 91 | Object: map[string]interface{}{ 92 | "apiVersion": "apps/v1", 93 | "kind": "Deployment", 94 | "metadata": map[string]interface{}{ 95 | "name": "test-name", 96 | "namespace": nsxNameSpaceName, 97 | }, 98 | "spec": map[string]interface{}{ 99 | "replicas": 2, 100 | "selector": map[string]interface{}{ 101 | "matchLabels": map[string]interface{}{ 102 | "app": "demo", 103 | }, 104 | }, 105 | "template": map[string]interface{}{}, 106 | }, 107 | }, 108 | } 109 | 110 | err = patchObjSpecAnnotations(obj, testname) 111 | assert.True(t, err == nil) 112 | 113 | // Verify timestamp field was patched 114 | annotations, found, err := unstructured.NestedMap(obj.Object, "spec", "template", "metadata", "annotations") 115 | if err != nil || !found || annotations == nil { 116 | t.Fatalf("Get annotations failed") 117 | } 118 | timeStamp, timeStampFound := annotations["updateTimeStamp"].(string) 119 | assert.True(t, timeStampFound) 120 | assert.True(t, len(timeStamp) != 0) 121 | 122 | // Patch obj without metada case 123 | obj = &unstructured.Unstructured{ 124 | Object: map[string]interface{}{ 125 | "apiVersion": "apps/v1", 126 | "kind": "Deployment", 127 | "metadata": map[string]interface{}{ 128 | "name": "test-name", 129 | "namespace": nsxNameSpaceName, 130 | }, 131 | "spec": map[string]interface{}{ 132 | "replicas": 2, 133 | "selector": map[string]interface{}{ 134 | "matchLabels": map[string]interface{}{ 135 | "app": "demo", 136 | }, 137 | }, 138 | "template": map[string]interface{}{ 139 | "hostNetwork": "true", 140 | }, 141 | }, 142 | }, 143 | } 144 | 145 | err = patchObjSpecAnnotations(obj, testname) 146 | assert.True(t, err == nil) 147 | 148 | // Verify timestamp field was patched 149 | annotations, found, err = unstructured.NestedMap(obj.Object, "spec", "template", "metadata", "annotations") 150 | if err != nil || !found || annotations == nil { 151 | t.Fatalf("Get annotations failed") 152 | } 153 | timeStamp, timeStampFound = annotations["updateTimeStamp"].(string) 154 | assert.True(t, timeStampFound) 155 | assert.True(t, len(timeStamp) != 0) 156 | 157 | // Patch obj without annotations spec case 158 | testname = "nsx-ncp" 159 | obj = &unstructured.Unstructured{ 160 | Object: map[string]interface{}{ 161 | "apiVersion": "apps/v1", 162 | "kind": "Deployment", 163 | "metadata": map[string]interface{}{ 164 | "name": testname, 165 | "namespace": nsxNameSpaceName, 166 | }, 167 | "spec": map[string]interface{}{ 168 | "replicas": 2, 169 | "selector": map[string]interface{}{ 170 | "matchLabels": map[string]interface{}{ 171 | "app": "demo", 172 | }, 173 | }, 174 | "template": map[string]interface{}{ 175 | "metadata": map[string]interface{}{ 176 | "creationTimestamp": "null", 177 | "labels": map[string]interface{}{ 178 | "component": "nsx-ncp", 179 | "tier": "nsx-networking", 180 | "version": "v1", 181 | }, 182 | }, 183 | "hostNetwork": "true", 184 | }, 185 | }, 186 | }, 187 | } 188 | 189 | err = patchObjSpecAnnotations(obj, testname) 190 | assert.True(t, err == nil) 191 | 192 | // Verify timestamp field was patched 193 | annotations, found, err = unstructured.NestedMap(obj.Object, "spec", "template", "metadata", "annotations") 194 | if err != nil || !found || annotations == nil { 195 | t.Fatalf("Get annotations failed") 196 | } 197 | timeStamp, timeStampFound = annotations["updateTimeStamp"].(string) 198 | assert.True(t, timeStampFound) 199 | assert.True(t, len(timeStamp) != 0) 200 | 201 | // Patch obj with annotations spec case 202 | testname = "nsx-node-agent" 203 | obj = &unstructured.Unstructured{ 204 | Object: map[string]interface{}{ 205 | "apiVersion": "apps/v1", 206 | "kind": "Deployment", 207 | "metadata": map[string]interface{}{ 208 | "name": testname, 209 | "namespace": nsxNameSpaceName, 210 | }, 211 | "spec": map[string]interface{}{ 212 | "replicas": 2, 213 | "selector": map[string]interface{}{ 214 | "matchLabels": map[string]interface{}{ 215 | "app": "demo", 216 | }, 217 | }, 218 | "template": map[string]interface{}{ 219 | "metadata": map[string]interface{}{ 220 | "creationTimestamp": "null", 221 | "labels": map[string]interface{}{ 222 | "component": "nsx-ncp", 223 | "tier": "nsx-networking", 224 | "version": "v1", 225 | }, 226 | "annotations": map[string]interface{}{ 227 | "test_annotation": "test_value", 228 | }, 229 | "hostNetwork": "true", 230 | }, 231 | }, 232 | }, 233 | }, 234 | } 235 | 236 | err = patchObjSpecAnnotations(obj, testname) 237 | assert.True(t, err == nil) 238 | 239 | // Verify timestamp field was patched 240 | annotations, found, err = unstructured.NestedMap(obj.Object, "spec", "template", "metadata", "annotations") 241 | if err != nil || !found || annotations == nil { 242 | t.Fatalf("Get annotations failed") 243 | } 244 | timeStamp, timeStampFound = annotations["updateTimeStamp"].(string) 245 | assert.True(t, timeStampFound) 246 | assert.True(t, len(timeStamp) != 0) 247 | } 248 | 249 | func NewFakeReconcileConfigMap() *ReconcileConfigMap { 250 | client := fake.NewFakeClient() 251 | return &ReconcileConfigMap{ 252 | client: client, 253 | } 254 | } 255 | 256 | func TestConfigMapController_isNcpDeploymentChanged(t *testing.T) { 257 | r := NewFakeReconcileConfigMap() 258 | // NCP deployment not found case 259 | ncpChanged, _ := r.isNcpDeploymentChanged(1, nil, nil) 260 | assert.Equal(t, true, ncpChanged) 261 | 262 | container := corev1.Container{Image: "fakeImage"} 263 | var replicas int32 = 1 264 | var ncpNodeSelector = map[string]string{"nodekey": "master"} 265 | var ncpTolerations = []corev1.Toleration{ 266 | { 267 | Key: "Key", 268 | Operator: "Equal", 269 | Value: "Value", 270 | Effect: "NoEffect", 271 | }, 272 | } 273 | 274 | ncpDeployment := &appsv1.Deployment{ 275 | ObjectMeta: metav1.ObjectMeta{ 276 | Name: "nsx-ncp", 277 | Namespace: "nsx-system", 278 | }, 279 | Spec: appsv1.DeploymentSpec{ 280 | Template: corev1.PodTemplateSpec{ 281 | Spec: corev1.PodSpec{ 282 | Containers: []corev1.Container{container}, 283 | NodeSelector: ncpNodeSelector, 284 | Tolerations: ncpTolerations, 285 | }, 286 | }, 287 | Replicas: &replicas, 288 | }, 289 | } 290 | r.client.Create(context.TODO(), ncpDeployment) 291 | 292 | // Image no change case 293 | os.Setenv("NCP_IMAGE", "fakeImage") 294 | ncpChanged, _ = r.isNcpDeploymentChanged(1, &ncpNodeSelector, &ncpTolerations) 295 | assert.Equal(t, false, ncpChanged) 296 | 297 | // Replicas change case 298 | ncpChanged, _ = r.isNcpDeploymentChanged(3, &ncpNodeSelector, &ncpTolerations) 299 | assert.Equal(t, true, ncpChanged) 300 | 301 | //Image change case 302 | os.Setenv("NCP_IMAGE", "fakeNewImage") 303 | ncpChanged, _ = r.isNcpDeploymentChanged(1, &ncpNodeSelector, &ncpTolerations) 304 | assert.Equal(t, true, ncpChanged) 305 | 306 | // NodeSelector no change case 307 | os.Setenv("NCP_IMAGE", "fakeImage") 308 | ncpChanged, _ = r.isNcpDeploymentChanged(1, &ncpNodeSelector, &ncpTolerations) 309 | assert.Equal(t, false, ncpChanged) 310 | 311 | // NodeSelector change case 312 | newNodeSelector := map[string]string{ 313 | "nodekey": "master", 314 | "nodekey1": "vm", 315 | } 316 | ncpChanged, _ = r.isNcpDeploymentChanged(1, &newNodeSelector, &ncpTolerations) 317 | assert.Equal(t, true, ncpChanged) 318 | 319 | // NodeSelector change case when ncpNodeSelector passing empty [] 320 | emptyNodeSelector := map[string]string{} 321 | ncpChanged, _ = r.isNcpDeploymentChanged(1, &emptyNodeSelector, &ncpTolerations) 322 | assert.Equal(t, true, ncpChanged) 323 | 324 | // Node Tolerations no change case 325 | os.Setenv("NCP_IMAGE", "fakeImage") 326 | ncpChanged, _ = r.isNcpDeploymentChanged(1, &ncpNodeSelector, &ncpTolerations) 327 | assert.Equal(t, false, ncpChanged) 328 | 329 | // Node Tolerations change case 330 | var newTolerations = []corev1.Toleration{ 331 | { 332 | Key: "Key2", 333 | Operator: "Equal", 334 | Value: "Value2", 335 | Effect: "NoEffect", 336 | }, 337 | } 338 | ncpChanged, _ = r.isNcpDeploymentChanged(1, &ncpNodeSelector, &newTolerations) 339 | assert.Equal(t, true, ncpChanged) 340 | 341 | // Node Tolerations change case when passing empty [] 342 | var emptyTolerations []corev1.Toleration 343 | ncpChanged, _ = r.isNcpDeploymentChanged(1, &ncpNodeSelector, &emptyTolerations) 344 | assert.Equal(t, true, ncpChanged) 345 | 346 | // Node Tolerations no change case when tolerations passed by empty [] 347 | ncpDeployment = &appsv1.Deployment{ 348 | ObjectMeta: metav1.ObjectMeta{ 349 | Name: "nsx-ncp", 350 | Namespace: "nsx-system", 351 | }, 352 | Spec: appsv1.DeploymentSpec{ 353 | Template: corev1.PodTemplateSpec{ 354 | Spec: corev1.PodSpec{ 355 | Containers: []corev1.Container{container}, 356 | NodeSelector: ncpNodeSelector, 357 | }, 358 | }, 359 | Replicas: &replicas, 360 | }, 361 | } 362 | 363 | r.client.Update(context.TODO(), ncpDeployment) 364 | ncpChanged, _ = r.isNcpDeploymentChanged(1, &ncpNodeSelector, &emptyTolerations) 365 | assert.Equal(t, false, ncpChanged) 366 | 367 | } 368 | 369 | func TestConfigMapController_isNsxNodeAgentDsChanged(t *testing.T) { 370 | r := NewFakeReconcileConfigMap() 371 | // Nsx Node Agent DaemonSet not found case 372 | nsxNodeAgentChanged, _ := r.isNsxNodeAgentDsChanged(nil) 373 | assert.Equal(t, true, nsxNodeAgentChanged) 374 | 375 | container := corev1.Container{Image: "fakeImage"} 376 | var nsxNodeAgentTolerations = []corev1.Toleration{ 377 | { 378 | Key: "Key", 379 | Operator: "Equal", 380 | Value: "Value", 381 | Effect: "NoEffect", 382 | }, 383 | } 384 | 385 | nsxNodeAgentDs := &appsv1.DaemonSet{ 386 | ObjectMeta: metav1.ObjectMeta{ 387 | Name: "nsx-node-agent", 388 | Namespace: "nsx-system", 389 | }, 390 | Spec: appsv1.DaemonSetSpec{ 391 | Template: corev1.PodTemplateSpec{ 392 | Spec: corev1.PodSpec{ 393 | Containers: []corev1.Container{container}, 394 | Tolerations: nsxNodeAgentTolerations, 395 | }, 396 | }, 397 | }, 398 | } 399 | r.client.Create(context.TODO(), nsxNodeAgentDs) 400 | 401 | // Nsx Node Agent Tolerations no change case 402 | nsxNodeAgentChanged, _ = r.isNsxNodeAgentDsChanged(&nsxNodeAgentTolerations) 403 | assert.Equal(t, false, nsxNodeAgentChanged) 404 | 405 | // Nsx Node Agent Tolerations change case 406 | var newTolerations = []corev1.Toleration{ 407 | { 408 | Key: "Key2", 409 | Operator: "Equal", 410 | Value: "Value2", 411 | Effect: "NoEffect", 412 | }, 413 | } 414 | nsxNodeAgentChanged, _ = r.isNsxNodeAgentDsChanged(&newTolerations) 415 | assert.Equal(t, true, nsxNodeAgentChanged) 416 | 417 | // Nsx Node Agent Tolerations change case when passing empty [] 418 | var emptyTolerations []corev1.Toleration 419 | nsxNodeAgentChanged, _ = r.isNsxNodeAgentDsChanged(&emptyTolerations) 420 | assert.Equal(t, true, nsxNodeAgentChanged) 421 | 422 | // Nsx Node Agent Tolerations no change case when tolerations passed by empty [] 423 | nsxNodeAgentDs = &appsv1.DaemonSet{ 424 | ObjectMeta: metav1.ObjectMeta{ 425 | Name: "nsx-node-agent", 426 | Namespace: "nsx-system", 427 | }, 428 | Spec: appsv1.DaemonSetSpec{ 429 | Template: corev1.PodTemplateSpec{ 430 | Spec: corev1.PodSpec{ 431 | Containers: []corev1.Container{container}, 432 | }, 433 | }, 434 | }, 435 | } 436 | r.client.Update(context.TODO(), nsxNodeAgentDs) 437 | nsxNodeAgentChanged, _ = r.isNsxNodeAgentDsChanged(&emptyTolerations) 438 | assert.Equal(t, false, nsxNodeAgentChanged) 439 | } 440 | 441 | func TestConfigMapController_isSecretChanged(t *testing.T) { 442 | r := NewFakeReconcileConfigMap() 443 | mockValue := []byte("mockCrt") 444 | nsxSecret := &corev1.Secret{ 445 | ObjectMeta: metav1.ObjectMeta{ 446 | Name: "nsx-secret", 447 | Namespace: "nsx-system", 448 | }, 449 | Data: map[string][]byte{ 450 | "tls.crt": mockValue, "tls.key": mockValue, "tls.ca": mockValue, 451 | }, 452 | } 453 | lbSecret := &corev1.Secret{ 454 | ObjectMeta: metav1.ObjectMeta{ 455 | Name: "lb-secret", 456 | Namespace: "nsx-system", 457 | }, 458 | Data: map[string][]byte{ 459 | "tls.crt": mockValue, "tls.key": mockValue, 460 | }, 461 | } 462 | r.client.Create(context.TODO(), nsxSecret) 463 | r.client.Create(context.TODO(), lbSecret) 464 | 465 | // Secret nil case 466 | secretChanged, _ := r.isSecretChanged(nil, nil) 467 | assert.True(t, secretChanged) 468 | secretChanged, _ = r.isSecretChanged(nil, lbSecret) 469 | assert.True(t, secretChanged) 470 | secretChanged, _ = r.isSecretChanged(nsxSecret, nil) 471 | assert.True(t, secretChanged) 472 | 473 | // Secret equal case, with missing key 474 | secretChanged, _ = r.isSecretChanged(nsxSecret, lbSecret) 475 | assert.False(t, secretChanged) 476 | // Secret equal, with empty key 477 | mockSecret := &corev1.Secret{ 478 | Data: map[string][]byte{"tls.crt": mockValue, "tls.key": mockValue, "tls.ca": []byte{}}, 479 | } 480 | secretChanged, _ = r.isSecretChanged(nsxSecret, mockSecret) 481 | assert.False(t, secretChanged) 482 | // Secret not equal case, with missing key 483 | mockSecret = &corev1.Secret{ 484 | Data: map[string][]byte{"tls.crt": mockValue, "tls.key": []byte("key")}, 485 | } 486 | secretChanged, _ = r.isSecretChanged(nsxSecret, mockSecret) 487 | assert.True(t, secretChanged) 488 | // Secret not equal, with all keys 489 | mockSecret = &corev1.Secret{ 490 | Data: map[string][]byte{"tls.crt": mockValue, "tls.key": []byte("key"), "tls.ca": mockValue}, 491 | } 492 | secretChanged, _ = r.isSecretChanged(nsxSecret, mockSecret) 493 | assert.True(t, secretChanged) 494 | secretChanged, _ = r.isSecretChanged(mockSecret, lbSecret) 495 | assert.True(t, secretChanged) 496 | } 497 | -------------------------------------------------------------------------------- /pkg/controller/controller.go: -------------------------------------------------------------------------------- 1 | /* Copyright © 2020 VMware, Inc. All Rights Reserved. 2 | SPDX-License-Identifier: Apache-2.0 */ 3 | 4 | package controller 5 | 6 | import ( 7 | "github.com/vmware/nsx-container-plugin-operator/pkg/controller/sharedinfo" 8 | "github.com/vmware/nsx-container-plugin-operator/pkg/controller/statusmanager" 9 | operatorversion "github.com/vmware/nsx-container-plugin-operator/version" 10 | "sigs.k8s.io/controller-runtime/pkg/manager" 11 | ) 12 | 13 | // AddToManagerFuncs is a list of functions to add all Controllers to the Manager 14 | var AddToManagerFuncs []func(manager.Manager, *statusmanager.StatusManager, *sharedinfo.SharedInfo) error 15 | 16 | // AddToManager adds all Controllers to the Manager 17 | func AddToManager(m manager.Manager, operatorNamespace string) error { 18 | sharedInfo, err := sharedinfo.New(m, operatorNamespace) 19 | if err != nil { 20 | return err 21 | } 22 | s := statusmanager.New(m.GetClient(), m.GetRESTMapper(), "nsx-ncp", operatorversion.Version, operatorNamespace, sharedInfo) 23 | for _, f := range AddToManagerFuncs { 24 | if err := f(m, s, sharedInfo); err != nil { 25 | return err 26 | } 27 | } 28 | return nil 29 | } 30 | -------------------------------------------------------------------------------- /pkg/controller/pod/pod_controller.go: -------------------------------------------------------------------------------- 1 | /* Copyright © 2020 VMware, Inc. All Rights Reserved. 2 | SPDX-License-Identifier: Apache-2.0 */ 3 | 4 | package pod 5 | 6 | import ( 7 | "bytes" 8 | "context" 9 | "fmt" 10 | "io" 11 | "strings" 12 | 13 | configv1 "github.com/openshift/api/config/v1" 14 | "github.com/openshift/cluster-network-operator/pkg/apply" 15 | "github.com/pkg/errors" 16 | "github.com/vmware/nsx-container-plugin-operator/pkg/controller/sharedinfo" 17 | "github.com/vmware/nsx-container-plugin-operator/pkg/controller/statusmanager" 18 | operatortypes "github.com/vmware/nsx-container-plugin-operator/pkg/types" 19 | appsv1 "k8s.io/api/apps/v1" 20 | corev1 "k8s.io/api/core/v1" 21 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 22 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 23 | "k8s.io/apimachinery/pkg/labels" 24 | "k8s.io/apimachinery/pkg/runtime" 25 | "k8s.io/apimachinery/pkg/types" 26 | "k8s.io/client-go/kubernetes" 27 | "k8s.io/client-go/rest" 28 | "sigs.k8s.io/controller-runtime/pkg/client" 29 | "sigs.k8s.io/controller-runtime/pkg/controller" 30 | "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 31 | "sigs.k8s.io/controller-runtime/pkg/handler" 32 | logf "sigs.k8s.io/controller-runtime/pkg/log" 33 | "sigs.k8s.io/controller-runtime/pkg/manager" 34 | "sigs.k8s.io/controller-runtime/pkg/reconcile" 35 | "sigs.k8s.io/controller-runtime/pkg/source" 36 | ) 37 | 38 | var log = logf.Log.WithName("controller_pod") 39 | 40 | var SetControllerReference = controllerutil.SetControllerReference 41 | 42 | var ApplyObject = apply.ApplyObject 43 | 44 | var firstBoot = true 45 | 46 | // Add creates a new Pod Controller and adds it to the Manager. The Manager will set fields on the Controller 47 | // and Start it when the Manager is Started. 48 | func Add(mgr manager.Manager, status *statusmanager.StatusManager, sharedInfo *sharedinfo.SharedInfo) error { 49 | return add(mgr, newReconciler(mgr, status, sharedInfo)) 50 | } 51 | 52 | func getNsxSystemNsName() string { 53 | return operatortypes.NsxNamespace 54 | } 55 | 56 | func getNsxNcpDeployments(nsxSystemNs string) []types.NamespacedName { 57 | return []types.NamespacedName{ 58 | // We reconcile only these K8s resources 59 | {Namespace: nsxSystemNs, Name: operatortypes.NsxNcpDeploymentName}, 60 | } 61 | } 62 | 63 | func getNsxNcpDs(nsxSystemNs string) []types.NamespacedName { 64 | return []types.NamespacedName{ 65 | // We reconcile only these K8s resources 66 | {Namespace: nsxSystemNs, Name: operatortypes.NsxNodeAgentDsName}, 67 | {Namespace: nsxSystemNs, Name: operatortypes.NsxNcpBootstrapDsName}, 68 | } 69 | } 70 | 71 | func mergeAndGetNsxNcpResources(resources ...[]types.NamespacedName) []types.NamespacedName { 72 | result := []types.NamespacedName{} 73 | for _, resource := range resources { 74 | result = append(result, resource...) 75 | } 76 | return result 77 | } 78 | 79 | // newReconciler returns a new reconcile.Reconciler 80 | func newReconciler(mgr manager.Manager, status *statusmanager.StatusManager, sharedInfo *sharedinfo.SharedInfo) reconcile.Reconciler { 81 | // Install the operator config from OC API 82 | configv1.Install(mgr.GetScheme()) 83 | 84 | nsxSystemNs := getNsxSystemNsName() 85 | 86 | nsxNcpDs := getNsxNcpDs(nsxSystemNs) 87 | status.SetDaemonSets(nsxNcpDs) 88 | 89 | nsxNcpDeployments := getNsxNcpDeployments(nsxSystemNs) 90 | status.SetDeployments(nsxNcpDeployments) 91 | 92 | nsxNcpResources := mergeAndGetNsxNcpResources( 93 | nsxNcpDs, nsxNcpDeployments) 94 | 95 | reconcilePod := ReconcilePod{ 96 | client: mgr.GetClient(), 97 | scheme: mgr.GetScheme(), 98 | status: status, 99 | nsxNcpResources: nsxNcpResources, 100 | sharedInfo: sharedInfo, 101 | } 102 | 103 | if sharedInfo.AdaptorName == "openshift4" { 104 | reconcilePod.Adaptor = &PodOc{} 105 | } else { 106 | reconcilePod.Adaptor = &PodK8s{} 107 | } 108 | return &reconcilePod 109 | } 110 | 111 | // add adds a new Controller to mgr with r as the reconcile.Reconciler 112 | func add(mgr manager.Manager, r reconcile.Reconciler) error { 113 | // Create a new controller 114 | c, err := controller.New("pod-controller", mgr, controller.Options{Reconciler: r}) 115 | if err != nil { 116 | return err 117 | } 118 | err = c.Watch(&source.Kind{Type: &appsv1.DaemonSet{}}, &handler.EnqueueRequestForObject{}) 119 | if err != nil { 120 | return err 121 | } 122 | err = c.Watch(&source.Kind{Type: &appsv1.Deployment{}}, &handler.EnqueueRequestForObject{}) 123 | if err != nil { 124 | return err 125 | } 126 | // sometimes watching DaemonSet/Deployment cannot catch the pod restarting 127 | err = c.Watch(&source.Kind{Type: &corev1.Pod{}}, &handler.EnqueueRequestForObject{}) 128 | if err != nil { 129 | return err 130 | } 131 | 132 | return nil 133 | } 134 | 135 | // blank assignment to verify that ReconcilePod implements reconcile.Reconciler 136 | var _ reconcile.Reconciler = &ReconcilePod{} 137 | 138 | // ReconcilePods watches for updates to specified resources and then updates its StatusManager 139 | type ReconcilePod struct { 140 | client client.Client 141 | scheme *runtime.Scheme 142 | status *statusmanager.StatusManager 143 | sharedInfo *sharedinfo.SharedInfo 144 | 145 | nsxNcpResources []types.NamespacedName 146 | Adaptor 147 | } 148 | 149 | type Adaptor interface { 150 | setControllerReference(r *ReconcilePod, obj *unstructured.Unstructured) error 151 | } 152 | 153 | type Pod struct{} 154 | 155 | type PodK8s struct { 156 | Pod 157 | } 158 | 159 | type PodOc struct { 160 | Pod 161 | } 162 | 163 | func (r *ReconcilePod) isForNcpDeployOrNodeAgentDS(request reconcile.Request) bool { 164 | for _, nsxNcpResource := range r.nsxNcpResources { 165 | if nsxNcpResource.Namespace == request.Namespace && nsxNcpResource.Name == request.Name { 166 | return true 167 | } 168 | } 169 | return false 170 | } 171 | 172 | func (r *ReconcilePod) isForNsxNodeAgentPod(request reconcile.Request) bool { 173 | if request.Namespace == operatortypes.NsxNamespace && strings.Contains( 174 | request.Name, operatortypes.NsxNodeAgentDsName) && 175 | request.Name != operatortypes.NsxNodeAgentDsName { 176 | return true 177 | } 178 | return false 179 | } 180 | 181 | // Reconcile updates the ClusterOperator.Status to match the current state of the 182 | // watched Deployments/DaemonSets 183 | func (r *ReconcilePod) Reconcile(request reconcile.Request) (reconcile.Result, error) { 184 | reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name) 185 | 186 | result, err := r.status.CheckExistingAgentPods(&firstBoot, r.sharedInfo) 187 | emptyResult := reconcile.Result{} 188 | if result != emptyResult || err != nil { 189 | return result, err 190 | } 191 | 192 | if !r.isForNcpDeployOrNodeAgentDS(request) { 193 | // the request is not for ncp deployement or nsx-node-agent ds, but for nsx-node-agent pod 194 | if r.isForNsxNodeAgentPod(request) { 195 | reqLogger.Info("Reconciling pod update for network status") 196 | return r.status.SetNodeConditionFromPod(request.NamespacedName, r.sharedInfo, nil) 197 | } 198 | return reconcile.Result{}, nil 199 | } 200 | 201 | reqLogger.Info("Reconciling pod update") 202 | r.status.SetFromPodsForOverall() 203 | 204 | if err := r.recreateNsxNcpResourceIfDeleted(request.Name); err != nil { 205 | return reconcile.Result{Requeue: true}, err 206 | } 207 | 208 | if request.Name == operatortypes.NsxNodeAgentDsName { 209 | if err := r.recreateNodeAgentPodsIfInvalidResolvConf( 210 | request.Name); err != nil { 211 | return reconcile.Result{Requeue: true}, err 212 | } 213 | } 214 | 215 | return reconcile.Result{RequeueAfter: operatortypes.DefaultResyncPeriod}, nil 216 | } 217 | 218 | func (r *ReconcilePod) recreateNsxNcpResourceIfDeleted(resName string) error { 219 | doesResExist, err := operatortypes.CheckIfNCPK8sResourceExists( 220 | r.client, resName) 221 | if err != nil { 222 | log.Error(err, fmt.Sprintf( 223 | "An error occurred while retrieving K8s resource - '%s'", resName)) 224 | return err 225 | } 226 | if doesResExist { 227 | log.V(1).Info(fmt.Sprintf( 228 | "K8s resource - '%s' already exists", resName)) 229 | return nil 230 | } 231 | 232 | log.Info(fmt.Sprintf("K8s resource - '%s' does not exist. It will be recreated", resName)) 233 | 234 | k8sObj := r.identifyAndGetK8SObjToCreate(resName) 235 | if k8sObj == nil { 236 | log.Info(fmt.Sprintf("%s spec not set. Waiting for config_map controller to set it", resName)) 237 | } 238 | if err = r.setControllerReference(r, k8sObj); err != nil { 239 | log.Error(err, fmt.Sprintf( 240 | "Failed to set controller reference for K8s resource: %s", resName)) 241 | return err 242 | } 243 | if err = r.createK8sObject(k8sObj); err != nil { 244 | log.Info(fmt.Sprintf( 245 | "Failed to recreate K8s resource: %s", resName)) 246 | return err 247 | } 248 | log.Info(fmt.Sprintf("Recreated K8s resource: %s", resName)) 249 | 250 | return nil 251 | } 252 | 253 | func (r *ReconcilePod) identifyAndGetK8SObjToCreate(resName string) *unstructured.Unstructured { 254 | if resName == operatortypes.NsxNcpBootstrapDsName { 255 | return r.sharedInfo.NsxNcpBootstrapDsSpec.DeepCopy() 256 | } else if resName == operatortypes.NsxNodeAgentDsName { 257 | return r.sharedInfo.NsxNodeAgentDsSpec.DeepCopy() 258 | } else { 259 | return r.sharedInfo.NsxNcpDeploymentSpec.DeepCopy() 260 | } 261 | } 262 | 263 | func (adaptor *PodK8s) setControllerReference(r *ReconcilePod, obj *unstructured.Unstructured) error { 264 | return nil 265 | } 266 | 267 | func (adaptor *PodOc) setControllerReference(r *ReconcilePod, obj *unstructured.Unstructured) error { 268 | if r.sharedInfo.NetworkConfig == nil { 269 | return errors.New("NetworkConfig empty. Waiting for config_map controller to set it") 270 | } 271 | err := SetControllerReference(r.sharedInfo.NetworkConfig, obj, r.scheme) 272 | if err != nil { 273 | err = errors.Wrapf( 274 | err, "could not set reference for (%s) %s/%s", 275 | obj.GroupVersionKind(), obj.GetNamespace(), obj.GetName()) 276 | r.status.SetDegraded(statusmanager.OperatorConfig, "ApplyObjectsError", 277 | fmt.Sprintf("Failed to apply objects: %v", err)) 278 | return err 279 | } 280 | return nil 281 | } 282 | 283 | func (r *ReconcilePod) createK8sObject(obj *unstructured.Unstructured) error { 284 | if err := ApplyObject(context.TODO(), r.client, obj); err != nil { 285 | log.Error( 286 | err, fmt.Sprintf("could not apply (%s) %s/%s", 287 | obj.GroupVersionKind(), obj.GetNamespace(), obj.GetName())) 288 | r.status.SetDegraded( 289 | statusmanager.OperatorConfig, "ApplyOperatorConfig", 290 | fmt.Sprintf("Failed to apply operator configuration: %v", err)) 291 | return err 292 | } 293 | return nil 294 | } 295 | 296 | func (r *ReconcilePod) recreateNodeAgentPodsIfInvalidResolvConf( 297 | resName string, 298 | ) error { 299 | podsInCLB, err := identifyPodsInCLBDueToInvalidResolvConf(r.client) 300 | if err != nil { 301 | log.Error(err, "Could not identify if any pod is in CLB because "+ 302 | "of invalid resolv.conf") 303 | return err 304 | } 305 | if len(podsInCLB) > 0 && !deletePods(podsInCLB, r.client) { 306 | err := errors.New("Error occured while trying to restart pods in " + 307 | "CLB because of invalid resolv.conf") 308 | log.Error(err, "") 309 | return err 310 | } 311 | return nil 312 | } 313 | 314 | func identifyPodsInCLBDueToInvalidResolvConf(c client.Client) ( 315 | []corev1.Pod, error, 316 | ) { 317 | var podsInCLB []corev1.Pod 318 | podList := &corev1.PodList{} 319 | nodeAgentLabelSelector := labels.SelectorFromSet( 320 | map[string]string{"component": operatortypes.NsxNodeAgentDsName}) 321 | err := c.List(context.TODO(), podList, &client.ListOptions{ 322 | LabelSelector: nodeAgentLabelSelector, 323 | }) 324 | if err != nil { 325 | log.Error(err, "Error while getting the post list for node-agent") 326 | return nil, err 327 | } 328 | for _, pod := range podList.Items { 329 | if isNodeAgentContainerInCLB(&pod) { 330 | nodeAgentLogs, err := getContainerLogsInPod( 331 | &pod, operatortypes.NsxNodeAgentContainerName) 332 | if err != nil { 333 | log.Error(err, "Error occured while getting container logs") 334 | return nil, err 335 | } 336 | if strings.Contains( 337 | nodeAgentLogs, "Failed to establish a new connection: "+ 338 | "[Errno -2] Name or service not known") { 339 | log.Info(fmt.Sprintf( 340 | "Pod %v in node %v is in CLB because of invalid resolv.conf. "+ 341 | "It shall be restarted", pod.Name, pod.Spec.NodeName)) 342 | podsInCLB = append(podsInCLB, pod) 343 | } 344 | } 345 | } 346 | return podsInCLB, nil 347 | } 348 | 349 | func isNodeAgentContainerInCLB(pod *corev1.Pod) bool { 350 | for _, containerStatus := range pod.Status.ContainerStatuses { 351 | if containerStatus.Name == operatortypes.NsxNodeAgentContainerName { 352 | if containerStatus.State.Waiting != nil && 353 | containerStatus.State.Waiting.Reason == "CrashLoopBackOff" { 354 | return true 355 | } 356 | } 357 | } 358 | return false 359 | } 360 | 361 | var getContainerLogsInPod = func(pod *corev1.Pod, containerName string) ( 362 | string, error, 363 | ) { 364 | config, err := rest.InClusterConfig() 365 | if err != nil { 366 | log.Error(err, "Failed to invoke rest.InClusterConfig") 367 | return "", err 368 | } 369 | clientSet, err := kubernetes.NewForConfig(config) 370 | if err != nil { 371 | log.Error(err, "Failed to invoke kubernetes.NewForConfig") 372 | return "", err 373 | } 374 | logLinesRetrieved := int64(50) 375 | podLogOptions := &corev1.PodLogOptions{ 376 | Container: operatortypes.NsxNodeAgentContainerName, 377 | Previous: true, 378 | TailLines: &logLinesRetrieved, 379 | } 380 | podLogs, err := clientSet.CoreV1().Pods(pod.Namespace).GetLogs( 381 | pod.Name, podLogOptions).Stream() 382 | if err != nil { 383 | log.Error(err, "Failed to invoke GetLogs") 384 | return "", err 385 | } 386 | defer podLogs.Close() 387 | buf := new(bytes.Buffer) 388 | _, err = io.Copy(buf, podLogs) 389 | if err != nil { 390 | log.Error(err, "Failed to copy podLogs") 391 | return "", err 392 | } 393 | return buf.String(), nil 394 | } 395 | 396 | func deletePods(pods []corev1.Pod, c client.Client) bool { 397 | policy := metav1.DeletePropagationForeground 398 | allPodsDeleted := true 399 | for _, pod := range pods { 400 | err := c.Delete( 401 | context.TODO(), &pod, client.GracePeriodSeconds(60), 402 | client.PropagationPolicy(policy)) 403 | if err != nil { 404 | log.Error(err, fmt.Sprintf("Unable to delete pod %v. Its "+ 405 | "deletion will be retried later", pod.Name)) 406 | allPodsDeleted = false 407 | } 408 | } 409 | return allPodsDeleted 410 | } 411 | -------------------------------------------------------------------------------- /pkg/controller/pod/pod_controller_test.go: -------------------------------------------------------------------------------- 1 | /* Copyright © 2020 VMware, Inc. All Rights Reserved. 2 | SPDX-License-Identifier: Apache-2.0 */ 3 | 4 | package pod 5 | 6 | import ( 7 | "context" 8 | "fmt" 9 | "reflect" 10 | "testing" 11 | 12 | configv1 "github.com/openshift/api/config/v1" 13 | "github.com/vmware/nsx-container-plugin-operator/pkg/controller/sharedinfo" 14 | "github.com/vmware/nsx-container-plugin-operator/pkg/controller/statusmanager" 15 | operatortypes "github.com/vmware/nsx-container-plugin-operator/pkg/types" 16 | appsv1 "k8s.io/api/apps/v1" 17 | corev1 "k8s.io/api/core/v1" 18 | "k8s.io/apimachinery/pkg/api/errors" 19 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 20 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 21 | "k8s.io/apimachinery/pkg/runtime" 22 | "k8s.io/apimachinery/pkg/types" 23 | "k8s.io/client-go/kubernetes/scheme" 24 | k8sclient "sigs.k8s.io/controller-runtime/pkg/client" 25 | "sigs.k8s.io/controller-runtime/pkg/client/fake" 26 | "sigs.k8s.io/controller-runtime/pkg/reconcile" 27 | ) 28 | 29 | func init() { 30 | configv1.AddToScheme(scheme.Scheme) 31 | appsv1.AddToScheme(scheme.Scheme) 32 | } 33 | 34 | func TestPodController_getNsxSystemNsName(t *testing.T) { 35 | res := getNsxSystemNsName() 36 | if res != "nsx-system" { 37 | t.Fatalf("pod controller not watching correct ns: nsx-system") 38 | } 39 | } 40 | 41 | func TestPodController_getNsxNcpDeployments(t *testing.T) { 42 | nsxNs := getNsxSystemNsName() 43 | nsxNcpDeployments := getNsxNcpDeployments(nsxNs) 44 | expectedNsxNcpDeployments := []types.NamespacedName{ 45 | {Namespace: nsxNs, Name: "nsx-ncp"}, 46 | } 47 | if !reflect.DeepEqual(expectedNsxNcpDeployments, nsxNcpDeployments) { 48 | t.Fatalf("pod controller must watch the deployments: %v", expectedNsxNcpDeployments) 49 | } 50 | } 51 | 52 | func TestPodController_getNsxNcpDs(t *testing.T) { 53 | nsxNs := getNsxSystemNsName() 54 | nsxNcpDs := getNsxNcpDs(nsxNs) 55 | expectedNsxNcpDs := []types.NamespacedName{ 56 | {Namespace: nsxNs, Name: "nsx-node-agent"}, 57 | {Namespace: nsxNs, Name: "nsx-ncp-bootstrap"}, 58 | } 59 | if !reflect.DeepEqual(expectedNsxNcpDs, nsxNcpDs) { 60 | t.Fatalf("pod controller must watch the deployments: %v", expectedNsxNcpDs) 61 | } 62 | } 63 | 64 | func TestPodController_mergeAndGetNsxNcpResources(t *testing.T) { 65 | nsxNs := getNsxSystemNsName() 66 | nsxNcpDs := getNsxNcpDs(nsxNs) 67 | nsxNcpDeployments := getNsxNcpDeployments(nsxNs) 68 | expectedResult := []types.NamespacedName{ 69 | {Namespace: nsxNs, Name: "nsx-node-agent"}, 70 | {Namespace: nsxNs, Name: "nsx-ncp-bootstrap"}, 71 | {Namespace: nsxNs, Name: "nsx-ncp"}, 72 | } 73 | result := mergeAndGetNsxNcpResources(nsxNcpDs, nsxNcpDeployments) 74 | if !reflect.DeepEqual(expectedResult, result) { 75 | t.Fatalf("pod controller must watch the K8s Resources: %v", expectedResult) 76 | } 77 | } 78 | 79 | func getTestReconcilePod(t string) *ReconcilePod { 80 | client := fake.NewFakeClient() 81 | mapper := &statusmanager.FakeRESTMapper{} 82 | sharedInfo := &sharedinfo.SharedInfo{ 83 | AdaptorName: t, 84 | } 85 | status := statusmanager.New( 86 | client, mapper, "testing", "1.2.3", "operator-namespace", sharedInfo) 87 | sharedInfo.NetworkConfig = &configv1.Network{} 88 | 89 | nsxNcpResources := mergeAndGetNsxNcpResources( 90 | getNsxNcpDs(getNsxSystemNsName()), 91 | getNsxNcpDeployments(getNsxSystemNsName())) 92 | // Create a ReconcilePod object with the scheme and fake client. 93 | reconcilePod := ReconcilePod{ 94 | client: client, 95 | status: status, 96 | nsxNcpResources: nsxNcpResources, 97 | sharedInfo: sharedInfo, 98 | } 99 | if t == "openshift4" { 100 | reconcilePod.Adaptor = &PodOc{} 101 | } else { 102 | reconcilePod.Adaptor = &PodK8s{} 103 | } 104 | return &reconcilePod 105 | } 106 | 107 | func (r *ReconcilePod) testRequestContainsNsxNcpResource(t *testing.T) { 108 | req := reconcile.Request{ 109 | NamespacedName: types.NamespacedName{ 110 | Name: "nsx-ncp", 111 | Namespace: "nsx-system", 112 | }, 113 | } 114 | 115 | if !r.isForNcpDeployOrNodeAgentDS(req) { 116 | t.Fatalf("pod controller must honor the request for NSX NCP Resource") 117 | } 118 | } 119 | 120 | func (r *ReconcilePod) testRequestNotContainsNsxNcpResource(t *testing.T) { 121 | req := reconcile.Request{ 122 | NamespacedName: types.NamespacedName{ 123 | Name: "dummy", 124 | Namespace: "dummy", 125 | }, 126 | } 127 | 128 | if r.isForNcpDeployOrNodeAgentDS(req) { 129 | t.Fatalf("pod controller must ignore the request for non NSX NCP Resource") 130 | } 131 | } 132 | 133 | func TestPodController_isForNcpDeployOrNodeAgentDS(t *testing.T) { 134 | r := getTestReconcilePod("openshift4") 135 | r.testRequestContainsNsxNcpResource(t) 136 | r.testRequestNotContainsNsxNcpResource(t) 137 | } 138 | 139 | func (r *ReconcilePod) testReconcileOnNotWatchedResource(t *testing.T) { 140 | // Mock request to simulate Reconcile() being called on an event for a 141 | // non-watched resource. 142 | req := reconcile.Request{ 143 | NamespacedName: types.NamespacedName{ 144 | Name: "dummy", 145 | Namespace: "dummy", 146 | }, 147 | } 148 | res, err := r.Reconcile(req) 149 | if err != nil { 150 | t.Fatalf("reconcile: (%v)", err) 151 | } 152 | if res != (reconcile.Result{}) { 153 | t.Error("reconcile should not requeue the request when the resource is not to be watched") 154 | } 155 | } 156 | 157 | func (r *ReconcilePod) testReconcileOnWatchedResource(t *testing.T) { 158 | // Mock request to simulate Reconcile() being called on an event for a 159 | // watched resource. 160 | req := reconcile.Request{ 161 | NamespacedName: types.NamespacedName{ 162 | Name: "nsx-ncp", 163 | Namespace: "nsx-system", 164 | }, 165 | } 166 | ncpDeployment := &appsv1.Deployment{ 167 | ObjectMeta: metav1.ObjectMeta{ 168 | Name: "nsx-ncp", 169 | Namespace: "nsx-system", 170 | }, 171 | } 172 | r.client.Create(context.TODO(), ncpDeployment) 173 | res, err := r.Reconcile(req) 174 | if err != nil { 175 | t.Fatalf("reconcile: (%v)", err) 176 | } 177 | if res.RequeueAfter != operatortypes.DefaultResyncPeriod { 178 | t.Fatalf("reconcile should requeue the request after %v", operatortypes.DefaultResyncPeriod) 179 | } 180 | r.client.Delete(context.TODO(), ncpDeployment) 181 | } 182 | 183 | func (r *ReconcilePod) testReconcileOnWatchedResourceWhenDeleted(t *testing.T) { 184 | originalSetControllerReferenceFunc := SetControllerReference 185 | originalApplyObject := ApplyObject 186 | defer func() { 187 | SetControllerReference = originalSetControllerReferenceFunc 188 | ApplyObject = originalApplyObject 189 | }() 190 | 191 | req := reconcile.Request{ 192 | NamespacedName: types.NamespacedName{ 193 | Name: "nsx-ncp", 194 | Namespace: "nsx-system", 195 | }, 196 | } 197 | ncpDeployment := &appsv1.Deployment{ 198 | ObjectMeta: metav1.ObjectMeta{ 199 | Name: "nsx-ncp", 200 | Namespace: "nsx-system", 201 | }, 202 | } 203 | SetControllerReference = func(owner, controlled metav1.Object, scheme *runtime.Scheme) error { 204 | return nil 205 | } 206 | ApplyObject = func(ctx context.Context, client k8sclient.Client, obj *unstructured.Unstructured) error { 207 | r.client.Create(context.TODO(), ncpDeployment) 208 | return nil 209 | } 210 | 211 | // Do not create nsx-ncp deployment so that it assumes it's deleted 212 | res, err := r.Reconcile(req) 213 | if err != nil { 214 | t.Fatalf("reconcile: (%v)", err) 215 | } 216 | if res.RequeueAfter != operatortypes.DefaultResyncPeriod { 217 | t.Fatalf("reconcile should requeue the request after %v", operatortypes.DefaultResyncPeriod) 218 | } 219 | 220 | // Validate that reconcile recreated the deployment 221 | instance := &appsv1.Deployment{} 222 | instanceDetails := types.NamespacedName{ 223 | Namespace: operatortypes.NsxNamespace, 224 | Name: operatortypes.NsxNcpDeploymentName, 225 | } 226 | err = r.client.Get(context.TODO(), instanceDetails, instance) 227 | if err != nil { 228 | t.Fatalf( 229 | "reconcile failed to/did not recreate ncp deployment: (%v)", err) 230 | } 231 | 232 | r.client.Delete(context.TODO(), ncpDeployment) 233 | } 234 | 235 | func (r *ReconcilePod) testReconcileOnCLBNsxNodeAgentInvalidResolvConf( 236 | t *testing.T) { 237 | c := r.client 238 | originalSetControllerReferenceFunc := SetControllerReference 239 | originalApplyObject := ApplyObject 240 | defer func() { 241 | SetControllerReference = originalSetControllerReferenceFunc 242 | ApplyObject = originalApplyObject 243 | }() 244 | SetControllerReference = func(owner, controlled metav1.Object, scheme *runtime.Scheme) error { 245 | return nil 246 | } 247 | ApplyObject = func(ctx context.Context, client k8sclient.Client, obj *unstructured.Unstructured) error { 248 | return nil 249 | } 250 | // Reconcile should NOT recreate nsx-node-agent pod if it's in CLB but not 251 | // because of invalid resolv.conf 252 | nodeAgentPod := &corev1.Pod{ 253 | ObjectMeta: metav1.ObjectMeta{ 254 | Name: "nsx-node-agent", 255 | Namespace: "nsx-system", 256 | Labels: map[string]string{ 257 | "component": "nsx-node-agent", 258 | }, 259 | }, 260 | Status: corev1.PodStatus{ 261 | ContainerStatuses: []corev1.ContainerStatus{ 262 | { 263 | Name: "nsx-node-agent", 264 | State: corev1.ContainerState{ 265 | Waiting: &corev1.ContainerStateWaiting{ 266 | Reason: "CrashLoopBackOff", 267 | }, 268 | }, 269 | }, 270 | }, 271 | }, 272 | } 273 | c.Create(context.TODO(), nodeAgentPod) 274 | oldGetContainerLogsInPod := getContainerLogsInPod 275 | defer func() { 276 | getContainerLogsInPod = oldGetContainerLogsInPod 277 | }() 278 | getContainerLogsInPod = func(pod *corev1.Pod, containerName string) ( 279 | string, error) { 280 | return "", nil 281 | } 282 | req := reconcile.Request{ 283 | NamespacedName: types.NamespacedName{ 284 | Name: "nsx-node-agent", 285 | Namespace: "nsx-system", 286 | }, 287 | } 288 | res, err := r.Reconcile(req) 289 | if err != nil { 290 | t.Fatalf("reconcile: (%v)", err) 291 | } 292 | if res.RequeueAfter != operatortypes.DefaultResyncPeriod { 293 | t.Fatalf("reconcile should requeue the request after %v but it did "+ 294 | "after %v", operatortypes.DefaultResyncPeriod, res.RequeueAfter) 295 | } 296 | obj := &corev1.Pod{} 297 | namespacedName := types.NamespacedName{ 298 | Name: "nsx-node-agent", 299 | Namespace: "nsx-system", 300 | } 301 | err = c.Get(context.TODO(), namespacedName, obj) 302 | if err != nil { 303 | t.Fatalf("failed to find nsx-node-agent pod") 304 | } 305 | 306 | // Reconcile should recreate nsx-node-agent pod if it's in CLB and 307 | // because of invaid resolv.conf 308 | nodeAgentPod = &corev1.Pod{ 309 | ObjectMeta: metav1.ObjectMeta{ 310 | Name: "nsx-node-agent", 311 | Namespace: "nsx-system", 312 | Labels: map[string]string{ 313 | "component": "nsx-node-agent", 314 | }, 315 | }, 316 | Status: corev1.PodStatus{ 317 | ContainerStatuses: []corev1.ContainerStatus{ 318 | { 319 | Name: "nsx-node-agent", 320 | State: corev1.ContainerState{ 321 | Waiting: &corev1.ContainerStateWaiting{ 322 | Reason: "CrashLoopBackOff", 323 | }, 324 | }, 325 | }, 326 | }, 327 | }, 328 | } 329 | c.Update(context.TODO(), nodeAgentPod) 330 | getContainerLogsInPod = func(pod *corev1.Pod, containerName string) ( 331 | string, error) { 332 | return "Failed to establish a new connection: [Errno -2] Name " + 333 | "or service not known", nil 334 | } 335 | res, err = r.Reconcile(req) 336 | if err != nil { 337 | t.Fatalf("reconcile: (%v)", err) 338 | } 339 | if res.RequeueAfter != operatortypes.DefaultResyncPeriod { 340 | t.Fatalf("reconcile should requeue the request after %v but it did "+ 341 | "after %v", operatortypes.DefaultResyncPeriod, res.RequeueAfter) 342 | } 343 | obj = &corev1.Pod{} 344 | err = c.Get(context.TODO(), namespacedName, obj) 345 | if !errors.IsNotFound(err) { 346 | t.Fatalf("failed to delete nsx-node-agent pod in CLB because of " + 347 | "invalid resolv.conf") 348 | } 349 | } 350 | 351 | func TestPodControllerReconcile(t *testing.T) { 352 | r := getTestReconcilePod("openshift4") 353 | r.testReconcileOnNotWatchedResource(t) 354 | r.testReconcileOnWatchedResource(t) 355 | r.testReconcileOnWatchedResourceWhenDeleted(t) 356 | r.testReconcileOnCLBNsxNodeAgentInvalidResolvConf(t) 357 | } 358 | 359 | func TestPodController_deletePods(t *testing.T) { 360 | c := fake.NewFakeClient() 361 | nodeAgentPod := &corev1.Pod{ 362 | ObjectMeta: metav1.ObjectMeta{ 363 | Name: "nsx-node-agent", 364 | Namespace: "nsx-system", 365 | }, 366 | } 367 | c.Create(context.TODO(), nodeAgentPod) 368 | deletePods([]corev1.Pod{*nodeAgentPod}, c) 369 | obj := &corev1.Pod{} 370 | namespacedName := types.NamespacedName{ 371 | Name: "nsx-node-agent", 372 | Namespace: "nsx-system", 373 | } 374 | err := c.Get(context.TODO(), namespacedName, obj) 375 | if !errors.IsNotFound(err) { 376 | t.Fatalf("failed to delete nsx-node-agent pod") 377 | } 378 | } 379 | 380 | func _test_no_labels(c k8sclient.Client, t *testing.T) { 381 | nodeAgentPod := &corev1.Pod{ 382 | ObjectMeta: metav1.ObjectMeta{ 383 | Name: "nsx-node-agent", 384 | Namespace: "nsx-system", 385 | }, 386 | } 387 | c.Create(context.TODO(), nodeAgentPod) 388 | podsInCLB, err := identifyPodsInCLBDueToInvalidResolvConf(c) 389 | 390 | if err != nil { 391 | t.Fatalf(fmt.Sprintf("Failed to identify Pods in CLB when there "+ 392 | "is none. Got error: %v", err)) 393 | } 394 | if len(podsInCLB) > 0 { 395 | t.Fatalf("Incorrect identification of pods in CLB. Identified " + 396 | "pods in CLB when there should be None.") 397 | } 398 | } 399 | 400 | func _test_normal_running(c k8sclient.Client, t *testing.T) { 401 | nodeAgentPod := &corev1.Pod{ 402 | ObjectMeta: metav1.ObjectMeta{ 403 | Name: "nsx-node-agent", 404 | Namespace: "nsx-system", 405 | Labels: map[string]string{ 406 | "component": "nsx-node-agent", 407 | }, 408 | }, 409 | Status: corev1.PodStatus{ 410 | ContainerStatuses: []corev1.ContainerStatus{ 411 | { 412 | Name: "nsx-node-agent", 413 | State: corev1.ContainerState{ 414 | Running: &corev1.ContainerStateRunning{ 415 | StartedAt: metav1.Now().Rfc3339Copy(), 416 | }, 417 | }, 418 | }, 419 | }, 420 | }, 421 | } 422 | c.Update(context.TODO(), nodeAgentPod) 423 | podsInCLB, err := identifyPodsInCLBDueToInvalidResolvConf(c) 424 | if err != nil { 425 | t.Fatalf(fmt.Sprintf("Failed to identify Pods in CLB when there "+ 426 | "is none. Got error: %v", err)) 427 | } 428 | if len(podsInCLB) > 0 { 429 | t.Fatalf("Incorrect identification of pods in CLB. Identified " + 430 | "pods in CLB when there should be None.") 431 | } 432 | } 433 | 434 | func _test_clb_but_not_invalid_resolv(c k8sclient.Client, t *testing.T) { 435 | nodeAgentPod := &corev1.Pod{ 436 | ObjectMeta: metav1.ObjectMeta{ 437 | Name: "nsx-node-agent", 438 | Namespace: "nsx-system", 439 | Labels: map[string]string{ 440 | "component": "nsx-node-agent", 441 | }, 442 | }, 443 | Status: corev1.PodStatus{ 444 | ContainerStatuses: []corev1.ContainerStatus{ 445 | { 446 | Name: "nsx-node-agent", 447 | State: corev1.ContainerState{ 448 | Waiting: &corev1.ContainerStateWaiting{ 449 | Reason: "CrashLoopBackOff", 450 | }, 451 | }, 452 | }, 453 | }, 454 | }, 455 | } 456 | c.Update(context.TODO(), nodeAgentPod) 457 | oldGetContainerLogsInPod := getContainerLogsInPod 458 | defer func() { 459 | getContainerLogsInPod = oldGetContainerLogsInPod 460 | }() 461 | getContainerLogsInPod = func(pod *corev1.Pod, containerName string) ( 462 | string, error) { 463 | return "", nil 464 | } 465 | podsInCLB, err := identifyPodsInCLBDueToInvalidResolvConf(c) 466 | if err != nil { 467 | t.Fatalf(fmt.Sprintf("Failed to identify Pods in CLB when there "+ 468 | "is none. Got error: %v", err)) 469 | } 470 | if len(podsInCLB) > 0 { 471 | t.Fatalf("Incorrect identification of pods in CLB. Identified " + 472 | "pods in CLB when there should be None.") 473 | } 474 | } 475 | 476 | func _test_clb_due_to_invalid_resolv(c k8sclient.Client, t *testing.T) { 477 | nodeAgentPod := &corev1.Pod{ 478 | ObjectMeta: metav1.ObjectMeta{ 479 | Name: "nsx-node-agent", 480 | Namespace: "nsx-system", 481 | Labels: map[string]string{ 482 | "component": "nsx-node-agent", 483 | }, 484 | }, 485 | Status: corev1.PodStatus{ 486 | ContainerStatuses: []corev1.ContainerStatus{ 487 | { 488 | Name: "nsx-node-agent", 489 | State: corev1.ContainerState{ 490 | Waiting: &corev1.ContainerStateWaiting{ 491 | Reason: "CrashLoopBackOff", 492 | }, 493 | }, 494 | }, 495 | }, 496 | }, 497 | } 498 | c.Update(context.TODO(), nodeAgentPod) 499 | oldGetContainerLogsInPod := getContainerLogsInPod 500 | defer func() { 501 | getContainerLogsInPod = oldGetContainerLogsInPod 502 | }() 503 | getContainerLogsInPod = func(pod *corev1.Pod, containerName string) ( 504 | string, error) { 505 | return "Failed to establish a new connection: [Errno -2] Name " + 506 | "or service not known", nil 507 | } 508 | podsInCLB, err := identifyPodsInCLBDueToInvalidResolvConf(c) 509 | if err != nil { 510 | t.Fatalf(fmt.Sprintf("Failed to identify Pods in CLB when there "+ 511 | "is none. Got error: %v", err)) 512 | } 513 | if len(podsInCLB) == 0 { 514 | t.Fatalf("Incorrect identification of pods in CLB. No pods " + 515 | "identified in CLB when expected.") 516 | } 517 | } 518 | 519 | func TestPodController_identifyPodsInCLBDueToInvalidResolvConf( 520 | t *testing.T) { 521 | c := fake.NewFakeClient() 522 | 523 | _test_no_labels(c, t) 524 | _test_normal_running(c, t) 525 | _test_clb_but_not_invalid_resolv(c, t) 526 | _test_clb_due_to_invalid_resolv(c, t) 527 | } 528 | -------------------------------------------------------------------------------- /pkg/controller/sharedinfo/shared_info.go: -------------------------------------------------------------------------------- 1 | /* Copyright © 2020 VMware, Inc. All Rights Reserved. 2 | SPDX-License-Identifier: Apache-2.0 */ 3 | 4 | package sharedinfo 5 | 6 | import ( 7 | "context" 8 | "fmt" 9 | "time" 10 | 11 | "gopkg.in/ini.v1" 12 | 13 | configv1 "github.com/openshift/api/config/v1" 14 | operatorv1 "github.com/vmware/nsx-container-plugin-operator/pkg/apis/operator/v1" 15 | operatortypes "github.com/vmware/nsx-container-plugin-operator/pkg/types" 16 | corev1 "k8s.io/api/core/v1" 17 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 18 | "k8s.io/apimachinery/pkg/types" 19 | "k8s.io/apimachinery/pkg/util/version" 20 | clientset "k8s.io/client-go/kubernetes" 21 | "k8s.io/client-go/rest" 22 | logf "sigs.k8s.io/controller-runtime/pkg/log" 23 | "sigs.k8s.io/controller-runtime/pkg/manager" 24 | ) 25 | 26 | var log = logf.Log.WithName("shared_info") 27 | 28 | type SharedInfo struct { 29 | AdaptorName string 30 | AddNodeTag bool 31 | PodSecurityPolicySupport bool 32 | LastNodeAgentStartTime map[string]time.Time 33 | NetworkConfig *configv1.Network 34 | OperatorConfigMap *corev1.ConfigMap 35 | OperatorNsxSecret *corev1.Secret 36 | 37 | NsxNodeAgentDsSpec *unstructured.Unstructured 38 | NsxNcpBootstrapDsSpec *unstructured.Unstructured 39 | NsxNcpDeploymentSpec *unstructured.Unstructured 40 | } 41 | 42 | func getAdaptorName() (string, error) { 43 | cfg, err := ini.Load(operatortypes.OsReleaseFile) 44 | if err != nil { 45 | log.Error(err, fmt.Sprintf("Failed to load os-release from %s.", operatortypes.OsReleaseFile)) 46 | return "", err 47 | } 48 | if cfg.Section("").Key("ID").String() == "rhcos" { 49 | return "openshift4", nil 50 | } 51 | return "kubernetes", nil 52 | } 53 | 54 | func New(mgr manager.Manager, operatorNamespace string) (*SharedInfo, error) { 55 | reader := mgr.GetAPIReader() 56 | watchedNamespace := operatorNamespace 57 | if watchedNamespace == "" { 58 | log.Info(fmt.Sprintf("SharedInfo can only check a single namespace, defaulting to: %s", 59 | operatortypes.OperatorNamespace)) 60 | watchedNamespace = operatortypes.OperatorNamespace 61 | } 62 | adaptorName, err := getAdaptorName() 63 | log.Info(fmt.Sprintf("adaptor name: %s", adaptorName)) 64 | if err != nil { 65 | return nil, err 66 | } 67 | ncpinstallName := types.NamespacedName{ 68 | Name: operatortypes.NcpInstallCRDName, 69 | Namespace: watchedNamespace, 70 | } 71 | ncpInstall := &operatorv1.NcpInstall{} 72 | err = reader.Get(context.TODO(), ncpinstallName, ncpInstall) 73 | if err != nil { 74 | log.Error(err, "Failed to get ncp-install") 75 | return nil, err 76 | } 77 | // The default value is true 78 | addNodeTag := true 79 | if ncpInstall.Spec.AddNodeTag == false { 80 | addNodeTag = false 81 | } 82 | 83 | podSecurityPolicySupport := isPodSecurityPolicySupport(mgr.GetConfig()) 84 | 85 | return &SharedInfo{ 86 | AdaptorName: adaptorName, AddNodeTag: addNodeTag, 87 | PodSecurityPolicySupport: podSecurityPolicySupport, 88 | }, nil 89 | } 90 | 91 | // PodSecurityPolicy resource is not supported any longer starting k8s >= v1.25.0 92 | func isPodSecurityPolicySupport(c *rest.Config) bool { 93 | version125, _ := version.ParseGeneric("v1.25.0") 94 | 95 | clientset, err := clientset.NewForConfig(c) 96 | if err != nil { 97 | log.Error(err, "failed to create clientset") 98 | return false 99 | } 100 | 101 | serverVersion, err := clientset.Discovery().ServerVersion() 102 | if err != nil { 103 | log.Error(err, "failed to get server Kubernetes version") 104 | return false 105 | } 106 | 107 | runningVersion, err := version.ParseGeneric(serverVersion.String()) 108 | if err != nil { 109 | log.Error(err, fmt.Sprintf("unexpected error parsing server Kubernetes version %s", runningVersion.String())) 110 | return false 111 | } 112 | 113 | log.Info(fmt.Sprintf("running server version is %s", runningVersion.String())) 114 | return runningVersion.LessThan(version125) 115 | } 116 | -------------------------------------------------------------------------------- /pkg/controller/statusmanager/node_status.go: -------------------------------------------------------------------------------- 1 | /* Copyright © 2020 VMware, Inc. All Rights Reserved. 2 | SPDX-License-Identifier: Apache-2.0 */ 3 | 4 | package statusmanager 5 | 6 | import ( 7 | "fmt" 8 | "strings" 9 | ) 10 | 11 | type NodeStatus struct { 12 | Addresses []string 13 | Success bool 14 | Reason string 15 | } 16 | 17 | func (status *StatusManager) SetFromNodes(cachedNodeSet map[string]*NodeStatus) { 18 | status.Lock() 19 | defer status.Unlock() 20 | messages := []string{} 21 | allProcessesDone := true 22 | for nodeName, _ := range cachedNodeSet { 23 | if !cachedNodeSet[nodeName].Success { 24 | messages = append(messages, cachedNodeSet[nodeName].Reason) 25 | allProcessesDone = false 26 | } 27 | } 28 | if allProcessesDone { 29 | status.setNotDegraded(ClusterNode) 30 | } else { 31 | message := strings.Join(messages, "\n") 32 | log.Info(fmt.Sprintf("Setting degraded status %s", message)) 33 | status.setDegraded(ClusterNode, "ProcessClusterNodeError", message) 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /pkg/controller/statusmanager/status_manager.go: -------------------------------------------------------------------------------- 1 | /* Copyright © 2020 VMware, Inc. All Rights Reserved. 2 | SPDX-License-Identifier: Apache-2.0 */ 3 | 4 | // code from https://github.com/openshift/cluster-network-operator/blob/bfc8b01b1ec4d7e5b0cd6423fe75daef945c3cbe/pkg/controller/statusmanager/status_manager.go 5 | 6 | package statusmanager 7 | 8 | import ( 9 | "context" 10 | "fmt" 11 | "reflect" 12 | "strings" 13 | "sync" 14 | 15 | "github.com/ghodss/yaml" 16 | 17 | configv1 "github.com/openshift/api/config/v1" 18 | "github.com/openshift/library-go/pkg/config/clusteroperator/v1helpers" 19 | operatorv1 "github.com/vmware/nsx-container-plugin-operator/pkg/apis/operator/v1" 20 | "github.com/vmware/nsx-container-plugin-operator/pkg/controller/sharedinfo" 21 | operatortypes "github.com/vmware/nsx-container-plugin-operator/pkg/types" 22 | "github.com/vmware/nsx-container-plugin-operator/version" 23 | 24 | "k8s.io/apimachinery/pkg/api/errors" 25 | "k8s.io/apimachinery/pkg/api/meta" 26 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 27 | "k8s.io/apimachinery/pkg/types" 28 | "k8s.io/client-go/util/retry" 29 | 30 | "sigs.k8s.io/controller-runtime/pkg/client" 31 | ) 32 | 33 | type StatusLevel int 34 | 35 | const ( 36 | ClusterConfig StatusLevel = iota 37 | OperatorConfig 38 | PodDeployment 39 | RolloutHung 40 | ClusterNode 41 | maxStatusLevel 42 | ) 43 | 44 | type Adaptor interface { 45 | getLastPodState(status *StatusManager) (map[types.NamespacedName]daemonsetState, map[types.NamespacedName]deploymentState) 46 | setLastPodState(status *StatusManager, dss map[types.NamespacedName]daemonsetState, deps map[types.NamespacedName]deploymentState) error 47 | set(status *StatusManager, reachedAvailableLevel bool, conditions ...configv1.ClusterOperatorStatusCondition) 48 | } 49 | 50 | // Status coordinates changes to ClusterOperator.Status 51 | type StatusManager struct { 52 | sync.Mutex 53 | 54 | client client.Client 55 | mapper meta.RESTMapper 56 | name string 57 | version string 58 | 59 | failing [maxStatusLevel]*configv1.ClusterOperatorStatusCondition 60 | 61 | daemonSets []types.NamespacedName 62 | deployments []types.NamespacedName 63 | 64 | OperatorNamespace string 65 | AdaptorName string 66 | Adaptor 67 | } 68 | 69 | type Status struct {} 70 | 71 | type StatusK8s struct { 72 | Status 73 | } 74 | 75 | type StatusOc struct { 76 | Status 77 | } 78 | 79 | func (status *StatusManager) GetOperatorNamespace() string { 80 | return status.OperatorNamespace 81 | } 82 | 83 | func (status *StatusManager) setConditions(progressing []string, reachedAvailableLevel bool) { 84 | conditions := make([]configv1.ClusterOperatorStatusCondition, 0, 2) 85 | if len(progressing) > 0 { 86 | conditions = append(conditions, 87 | configv1.ClusterOperatorStatusCondition{ 88 | Type: configv1.OperatorProgressing, 89 | Status: configv1.ConditionTrue, 90 | Reason: "Deploying", 91 | Message: strings.Join(progressing, "\n"), 92 | }, 93 | ) 94 | } else { 95 | conditions = append(conditions, 96 | configv1.ClusterOperatorStatusCondition{ 97 | Type: configv1.OperatorProgressing, 98 | Status: configv1.ConditionFalse, 99 | }, 100 | ) 101 | } 102 | if reachedAvailableLevel { 103 | conditions = append(conditions, 104 | configv1.ClusterOperatorStatusCondition{ 105 | Type: configv1.OperatorAvailable, 106 | Status: configv1.ConditionTrue, 107 | }, 108 | ) 109 | } 110 | 111 | status.set(status, reachedAvailableLevel, conditions...) 112 | } 113 | 114 | func New(client client.Client, mapper meta.RESTMapper, name, version string, operatorNamespace string, sharedInfo *sharedinfo.SharedInfo) *StatusManager { 115 | status := StatusManager{ 116 | client: client, 117 | mapper: mapper, 118 | name: name, 119 | version: version, 120 | OperatorNamespace: operatorNamespace, 121 | AdaptorName: sharedInfo.AdaptorName, 122 | } 123 | if sharedInfo.AdaptorName == "openshift4" { 124 | status.Adaptor = &StatusOc{} 125 | } else { 126 | status.Adaptor = &StatusK8s{} 127 | } 128 | return &status 129 | } 130 | 131 | // Set updates the ClusterOperator.Status with the provided conditions 132 | func (adaptor *StatusK8s) set(status *StatusManager, reachedAvailableLevel bool, conditions ...configv1.ClusterOperatorStatusCondition) { 133 | err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { 134 | ncpInstall := &operatorv1.NcpInstall{} 135 | err := status.client.Get(context.TODO(), types.NamespacedName{Name: operatortypes.NcpInstallCRDName, Namespace: operatortypes.OperatorNamespace}, ncpInstall) 136 | if err != nil { 137 | log.Error(err, "Failed to get ncpInstall") 138 | return err 139 | } 140 | co := &configv1.ClusterOperator{ObjectMeta: metav1.ObjectMeta{Name: status.name}} 141 | 142 | oldStatus := ncpInstall.Status.DeepCopy() 143 | 144 | if reachedAvailableLevel { 145 | co.Status.Versions = []configv1.OperandVersion{ 146 | {Name: "operator", Version: version.Version}, 147 | } 148 | } 149 | status.CombineConditions(&co.Status.Conditions, &conditions) 150 | 151 | progressingCondition := v1helpers.FindStatusCondition(co.Status.Conditions, configv1.OperatorProgressing) 152 | availableCondition := v1helpers.FindStatusCondition(co.Status.Conditions, configv1.OperatorAvailable) 153 | if availableCondition == nil && progressingCondition != nil && progressingCondition.Status == configv1.ConditionTrue { 154 | v1helpers.SetStatusCondition(&co.Status.Conditions, 155 | configv1.ClusterOperatorStatusCondition{ 156 | Type: configv1.OperatorAvailable, 157 | Status: configv1.ConditionFalse, 158 | Reason: "Startup", 159 | Message: "The network is starting up", 160 | }, 161 | ) 162 | } 163 | 164 | if reflect.DeepEqual(*oldStatus, co.Status) { 165 | return nil 166 | } 167 | 168 | // Set status to ncp-install CRD 169 | err = status.setNcpInstallCrdStatus(status.OperatorNamespace, &co.Status.Conditions) 170 | return err 171 | }) 172 | if err != nil { 173 | log.Error(err, "Failed to set NcpInstall") 174 | } 175 | } 176 | 177 | // Set updates the ClusterOperator.Status with the provided conditions 178 | func (adaptor *StatusOc) set(status *StatusManager, reachedAvailableLevel bool, conditions ...configv1.ClusterOperatorStatusCondition) { 179 | err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { 180 | co := &configv1.ClusterOperator{ObjectMeta: metav1.ObjectMeta{Name: status.name}} 181 | err := status.client.Get(context.TODO(), types.NamespacedName{Name: status.name}, co) 182 | isNotFound := errors.IsNotFound(err) 183 | if err != nil && !isNotFound { 184 | return err 185 | } 186 | 187 | oldStatus := co.Status.DeepCopy() 188 | 189 | if reachedAvailableLevel { 190 | co.Status.Versions = []configv1.OperandVersion{ 191 | {Name: "operator", Version: version.Version}, 192 | } 193 | } 194 | status.CombineConditions(&co.Status.Conditions, &conditions) 195 | 196 | progressingCondition := v1helpers.FindStatusCondition(co.Status.Conditions, configv1.OperatorProgressing) 197 | availableCondition := v1helpers.FindStatusCondition(co.Status.Conditions, configv1.OperatorAvailable) 198 | if availableCondition == nil && progressingCondition != nil && progressingCondition.Status == configv1.ConditionTrue { 199 | v1helpers.SetStatusCondition(&co.Status.Conditions, 200 | configv1.ClusterOperatorStatusCondition{ 201 | Type: configv1.OperatorAvailable, 202 | Status: configv1.ConditionFalse, 203 | Reason: "Startup", 204 | Message: "The network is starting up", 205 | }, 206 | ) 207 | } 208 | 209 | v1helpers.SetStatusCondition(&co.Status.Conditions, 210 | configv1.ClusterOperatorStatusCondition{ 211 | Type: configv1.OperatorUpgradeable, 212 | Status: configv1.ConditionTrue, 213 | }, 214 | ) 215 | 216 | if reflect.DeepEqual(*oldStatus, co.Status) { 217 | return nil 218 | } 219 | 220 | buf, err := yaml.Marshal(co.Status.Conditions) 221 | if err != nil { 222 | buf = []byte(fmt.Sprintf("(failed to convert to YAML: %s)", err)) 223 | } 224 | if isNotFound { 225 | if err := status.client.Create(context.TODO(), co); err != nil { 226 | return err 227 | } 228 | log.Info(fmt.Sprintf("Created ClusterOperator with conditions:\n%s", string(buf))) 229 | return nil 230 | } 231 | if err := status.client.Status().Update(context.TODO(), co); err != nil { 232 | return err 233 | } 234 | log.Info(fmt.Sprintf("Updated ClusterOperator with conditions:\n%s", string(buf))) 235 | // Set status to ncp-install CRD 236 | err = status.setNcpInstallCrdStatus(status.OperatorNamespace, &co.Status.Conditions) 237 | return err 238 | }) 239 | if err != nil { 240 | log.Error(err, "Failed to set ClusterOperator") 241 | } 242 | } 243 | 244 | // syncDegraded syncs the current Degraded status 245 | func (status *StatusManager) syncDegraded() { 246 | for _, c := range status.failing { 247 | if c != nil { 248 | status.set(status, false, *c) 249 | return 250 | } 251 | } 252 | status.set( 253 | status, 254 | false, 255 | configv1.ClusterOperatorStatusCondition{ 256 | Type: configv1.OperatorDegraded, 257 | Status: configv1.ConditionFalse, 258 | }, 259 | ) 260 | } 261 | 262 | func (status *StatusManager) setDegraded(statusLevel StatusLevel, reason, message string) { 263 | status.failing[statusLevel] = &configv1.ClusterOperatorStatusCondition{ 264 | Type: configv1.OperatorDegraded, 265 | Status: configv1.ConditionTrue, 266 | Reason: reason, 267 | Message: message, 268 | } 269 | status.syncDegraded() 270 | } 271 | 272 | func (status *StatusManager) setNotDegraded(statusLevel StatusLevel) { 273 | if status.failing[statusLevel] != nil { 274 | status.failing[statusLevel] = nil 275 | } 276 | status.syncDegraded() 277 | } 278 | 279 | func (status *StatusManager) SetDegraded(statusLevel StatusLevel, reason, message string) { 280 | status.Lock() 281 | defer status.Unlock() 282 | status.setDegraded(statusLevel, reason, message) 283 | } 284 | 285 | func (status *StatusManager) SetNotDegraded(statusLevel StatusLevel) { 286 | status.Lock() 287 | defer status.Unlock() 288 | status.setNotDegraded(statusLevel) 289 | } 290 | 291 | func (status *StatusManager) SetDaemonSets(daemonSets []types.NamespacedName) { 292 | status.Lock() 293 | defer status.Unlock() 294 | status.daemonSets = daemonSets 295 | } 296 | 297 | func (status *StatusManager) SetDeployments(deployments []types.NamespacedName) { 298 | status.Lock() 299 | defer status.Unlock() 300 | status.deployments = deployments 301 | } 302 | 303 | func (status *StatusManager) setNcpInstallCrdStatus(operatorNamespace string, conditions *[]configv1.ClusterOperatorStatusCondition) error { 304 | crd := &operatorv1.NcpInstall{} 305 | err := status.client.Get(context.TODO(), types.NamespacedName{Name: operatortypes.NcpInstallCRDName, 306 | Namespace: operatorNamespace}, crd) 307 | if err != nil { 308 | log.Error(err, "Failed to get ncp-install CRD") 309 | return err 310 | } 311 | changed, messages := status.CombineConditions(&crd.Status.Conditions, conditions) 312 | if !changed { 313 | return nil 314 | } 315 | log.Info(fmt.Sprintf("Trying to update ncp-install CRD with condition %s", messages)) 316 | err = status.client.Status().Update(context.TODO(), crd) 317 | if err != nil { 318 | log.Error(err, "Failed to update ncp-install CRD status") 319 | } else { 320 | log.Info("Updated ncp-install CRD status") 321 | } 322 | return err 323 | } 324 | -------------------------------------------------------------------------------- /pkg/controller/statusmanager/test_utils.go: -------------------------------------------------------------------------------- 1 | /* Copyright © 2020 VMware, Inc. All Rights Reserved. 2 | SPDX-License-Identifier: Apache-2.0 */ 3 | 4 | package statusmanager 5 | 6 | import ( 7 | "k8s.io/apimachinery/pkg/api/meta" 8 | "k8s.io/apimachinery/pkg/runtime/schema" 9 | ) 10 | 11 | type FakeRESTMapper struct { 12 | kindForInput schema.GroupVersionResource 13 | } 14 | 15 | func (f *FakeRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { 16 | f.kindForInput = resource 17 | return schema.GroupVersionKind{ 18 | Group: "test", 19 | Version: "test", 20 | Kind: "test"}, nil 21 | } 22 | 23 | func (f *FakeRESTMapper) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { 24 | return nil, nil 25 | } 26 | 27 | func (f *FakeRESTMapper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) { 28 | return schema.GroupVersionResource{}, nil 29 | } 30 | 31 | func (f *FakeRESTMapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { 32 | return nil, nil 33 | } 34 | 35 | func (f *FakeRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { 36 | return nil, nil 37 | } 38 | 39 | func (f *FakeRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { 40 | return nil, nil 41 | } 42 | 43 | func (f *FakeRESTMapper) ResourceSingularizer(resource string) (singular string, err error) { 44 | return "", nil 45 | } 46 | -------------------------------------------------------------------------------- /pkg/types/defaults.go: -------------------------------------------------------------------------------- 1 | /* Copyright © 2020 VMware, Inc. All Rights Reserved. 2 | SPDX-License-Identifier: Apache-2.0 */ 3 | 4 | package types 5 | 6 | const ( 7 | NcpDefaultReplicas int = 1 8 | DefaultMTU int = 1500 9 | ) 10 | 11 | var ( 12 | NcpSections = []string{"DEFAULT", "ha", "k8s", "coe", "nsx_v3", "vc"} 13 | OperatorSections = []string{"DEFAULT", "ha", "k8s", "coe", "nsx_v3", "vc", "nsx_node_agent", "nsx_kube_proxy"} 14 | // AgentSections are responsible for rendering nsx-node-agent configmap, while not all keys updates in section `coe` and 15 | // `k8s` require restart of nsx-node-agent. So AgentRestartSections are responsible for checking whether nsx-node-agent 16 | // should be restarted when related keys are updated. 17 | AgentSections = []string{"DEFAULT", "k8s", "coe", "nsx_node_agent", "nsx_kube_proxy"} 18 | AgentRestartSections = []string{"DEFAULT", "nsx_node_agent", "nsx_kube_proxy"} 19 | BootstrapRestartOptionKeys = map[string][]string{ 20 | "DEFAULT": { 21 | "log_dir", "log_file", "log_rotation_file_max_mb", "log_rotation_backup_count", 22 | }, 23 | "nsx_node_agent": { 24 | "enable_ipv6", "use_nsx_ovs_kernel_module", "ovs_uplink_port", "mtu", 25 | }, 26 | } 27 | AgentRestartOptionKeys = map[string][]string{ 28 | "k8s": { 29 | "apiserver_host_ip", "apiserver_host_port", "client_token_file", "ca_file", "enable_hostport_snat", 30 | }, 31 | "coe": {"connect_retry_timeout"}, 32 | } 33 | ) 34 | 35 | var TASSection = string("cf") 36 | 37 | var MPOptions = map[string][]string{ 38 | "nsx_v3": { 39 | "top_firewall_section_marker", "bottom_firewall_section_marker", 40 | }, 41 | } 42 | 43 | var WCPOptions = map[string][]string{ 44 | "nsx_v3": { 45 | "dlb_l4_persistence", "single_tier_sr_topology", "enforcement_point", "search_node_tag_on", 46 | "vif_app_id_type", "configure_t0_redistribution", "multi_t0", 47 | }, 48 | "k8s": { 49 | "enable_vnet_crd", "enable_lb_monitor_crd", "enable_nsxnetworkconfig_crd", "enable_routeset_crd", 50 | "enable_ip_pool_crd", "enable_vm_crd", "lb_statistic_monitor_interval", "enable_lb_vs_statistics_monitor", 51 | "network_info_resync_period", "ip_usage_alarm_threshold", 52 | }, 53 | "coe": { 54 | "node_type", 55 | }, 56 | "vc": {"vc_endpoint", "sso_domain", "https_port"}, 57 | } 58 | -------------------------------------------------------------------------------- /pkg/types/names.go: -------------------------------------------------------------------------------- 1 | /* Copyright © 2020 VMware, Inc. All Rights Reserved. 2 | SPDX-License-Identifier: Apache-2.0 */ 3 | 4 | package types 5 | 6 | import "time" 7 | 8 | const ( 9 | OperatorNamespace string = "nsx-system-operator" 10 | ConfigMapName string = "nsx-ncp-operator-config" 11 | OperatorRoleName string = "nsx-ncp-operator" 12 | NcpInstallCRDName string = "ncp-install" 13 | NetworkCRDName string = "cluster" 14 | NsxNamespace string = "nsx-system" 15 | NcpConfigMapName string = "nsx-ncp-config" 16 | NodeAgentConfigMapName string = "nsx-node-agent-config" 17 | ConfigMapDataKey string = "ncp.ini" 18 | NcpConfigMapRenderKey string = "NSXNCPConfig" 19 | NodeAgentConfigMapRenderKey string = "NSXNodeAgentConfig" 20 | NcpImageKey string = "NcpImage" 21 | NcpReplicasKey string = "NcpReplicas" 22 | NcpNodeSelectorRenderKey string = "NcpNodeSelector" 23 | NcpTolerationsRenderKey string = "NcpTolerations" 24 | NsxNodeTolerationsRenderKey string = "NsxNodeAgentTolerations" 25 | NsxNodeAgentDsName string = "nsx-node-agent" 26 | NsxNcpBootstrapDsName string = "nsx-ncp-bootstrap" 27 | NsxNcpDeploymentName string = "nsx-ncp" 28 | NetworkType string = "ncp" 29 | LbCertRenderKey string = "LbCert" 30 | LbKeyRenderKey string = "LbKey" 31 | LbSecret string = "lb-secret" 32 | NcpImageEnv string = "NCP_IMAGE" 33 | NsxCertRenderKey string = "NsxCert" 34 | NsxKeyRenderKey string = "NsxKey" 35 | NsxCARenderKey string = "NsxCA" 36 | NsxSecret string = "nsx-secret" 37 | NsxCertTempPath string = "/tmp/nsx.cert" 38 | NsxKeyTempPath string = "/tmp/nsx.key" 39 | NsxCATempPath string = "/tmp/nsx.ca" 40 | NsxNodeAgentContainerName string = "nsx-node-agent" 41 | OsReleaseFile string = "/host/etc/os-release" 42 | NsxOvsKmodRenderKey string = "UseNsxOvsKmod" 43 | TimeBeforeRecoverNetwork time.Duration = 180 * time.Second 44 | DefaultResyncPeriod time.Duration = 2 * time.Minute 45 | ) 46 | -------------------------------------------------------------------------------- /pkg/types/utils.go: -------------------------------------------------------------------------------- 1 | /* Copyright © 2021 VMware, Inc. All Rights Reserved. 2 | SPDX-License-Identifier: Apache-2.0 */ 3 | 4 | package types 5 | 6 | import ( 7 | "context" 8 | "fmt" 9 | "strings" 10 | 11 | "github.com/pkg/errors" 12 | "github.com/vmware/vsphere-automation-sdk-go/runtime/bindings" 13 | "github.com/vmware/vsphere-automation-sdk-go/runtime/data" 14 | appsv1 "k8s.io/api/apps/v1" 15 | apierrors "k8s.io/apimachinery/pkg/api/errors" 16 | "k8s.io/apimachinery/pkg/runtime" 17 | "k8s.io/apimachinery/pkg/types" 18 | "sigs.k8s.io/controller-runtime/pkg/client" 19 | ) 20 | 21 | // CheckIfNCPK8sResourceExists infers the k8s object from resName and checks 22 | // if it exists 23 | func CheckIfNCPK8sResourceExists( 24 | c client.Client, resName string) (bool, error) { 25 | instance, err := identifyAndGetInstance(resName) 26 | if err != nil { 27 | return false, err 28 | } 29 | instanceDetails := types.NamespacedName{ 30 | Namespace: NsxNamespace, 31 | Name: resName, 32 | } 33 | err = c.Get(context.TODO(), instanceDetails, instance) 34 | if err != nil { 35 | if apierrors.IsNotFound(err) { 36 | return false, nil 37 | } 38 | return false, err 39 | } 40 | return true, nil 41 | } 42 | 43 | func identifyAndGetInstance(resName string) (runtime.Object, error) { 44 | if resName == NsxNcpBootstrapDsName || resName == NsxNodeAgentDsName { 45 | return &appsv1.DaemonSet{}, nil 46 | } else if resName == NsxNcpDeploymentName { 47 | return &appsv1.Deployment{}, nil 48 | } 49 | return nil, errors.Errorf("failed to identify instance for: %s", resName) 50 | } 51 | 52 | func CastToBindingType[T any](dataValue *data.StructValue, destBindingType bindings.BindingType) (T, error) { 53 | var result T 54 | converter := bindings.NewTypeConverter() 55 | obj, errs := converter.ConvertToGolang(dataValue, destBindingType) 56 | if len(errs) > 0 { 57 | return result, errs[0] 58 | } 59 | result, ok := obj.(T) 60 | if !ok { 61 | return result, fmt.Errorf("cast to bind type failed: %v is not of type %T", obj, result) 62 | } 63 | return result, nil 64 | } 65 | 66 | func ExtractSegmentIdFromPath(segmentPath string) (string, error) { 67 | segments := strings.Split(segmentPath, "/infra/segments/") 68 | if len(segments) > 1 { 69 | return segments[len(segments)-1], nil 70 | } 71 | return "", fmt.Errorf("unable to find the Segment ID from provided path: %s", segmentPath) 72 | } 73 | -------------------------------------------------------------------------------- /pkg/types/utils_test.go: -------------------------------------------------------------------------------- 1 | /* Copyright © 2021 VMware, Inc. All Rights Reserved. 2 | SPDX-License-Identifier: Apache-2.0 */ 3 | 4 | package types 5 | 6 | import ( 7 | "context" 8 | "reflect" 9 | "testing" 10 | 11 | appsv1 "k8s.io/api/apps/v1" 12 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 13 | "sigs.k8s.io/controller-runtime/pkg/client/fake" 14 | ) 15 | 16 | func TestUtils_identifyAndGetInstance(t *testing.T) { 17 | instance, err := identifyAndGetInstance( 18 | NsxNcpDeploymentName) 19 | if !reflect.DeepEqual(instance, &appsv1.Deployment{}) { 20 | t.Fatalf("nsx-ncp instance must be a Deployment") 21 | } 22 | instance, err = identifyAndGetInstance( 23 | NsxNcpBootstrapDsName) 24 | if !reflect.DeepEqual(instance, &appsv1.DaemonSet{}) { 25 | t.Fatalf("nsx-ncp instance must be a DaemonSet") 26 | } 27 | instance, err = identifyAndGetInstance( 28 | NsxNodeAgentDsName) 29 | if !reflect.DeepEqual(instance, &appsv1.DaemonSet{}) { 30 | t.Fatalf("nsx-ncp instance must be a DaemonSet") 31 | } 32 | instance, err = identifyAndGetInstance( 33 | "new-k8s-resource") 34 | if err == nil { 35 | t.Fatalf("identifyAndGetInstance: (%v)", err) 36 | } 37 | } 38 | 39 | func TestUtils_CheckIfNCPK8sResourceExists(t *testing.T) { 40 | c := fake.NewFakeClient() 41 | // NCP resource does not exist 42 | resExists, err := CheckIfNCPK8sResourceExists(c, NsxNodeAgentDsName) 43 | if err != nil { 44 | t.Fatalf("CheckIfNCPK8sResourceExists: (%v)", err) 45 | } 46 | if resExists { 47 | t.Fatalf("nsx-node-agent does not exist but client found it: %v", err) 48 | } 49 | 50 | // NCP resource exists 51 | ncpDeployment := &appsv1.Deployment{ 52 | ObjectMeta: metav1.ObjectMeta{ 53 | Name: "nsx-ncp", 54 | Namespace: "nsx-system", 55 | }, 56 | } 57 | c.Create(context.TODO(), ncpDeployment) 58 | resExists, err = CheckIfNCPK8sResourceExists(c, NsxNcpDeploymentName) 59 | if err != nil { 60 | t.Fatalf("CheckIfNCPK8sResourceExists: (%v)", err) 61 | } 62 | if !resExists { 63 | t.Fatalf("nsx-ncp exists but client could not find it: %v", err) 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /tools.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2020 VMware, Inc. All Rights Reserved. 2 | // SPDX-License-Identifier: Apache-2.0 3 | 4 | // +build tools 5 | 6 | // Place any runtime dependencies as imports in this file. 7 | // Go modules will be forced to download and install them. 8 | package tools 9 | -------------------------------------------------------------------------------- /version/version.go: -------------------------------------------------------------------------------- 1 | /* Copyright © 2020 VMware, Inc. All Rights Reserved. 2 | SPDX-License-Identifier: Apache-2.0 */ 3 | 4 | package version 5 | 6 | const ( 7 | Version = "0.0.3" 8 | ) 9 | -------------------------------------------------------------------------------- /versioning.mk: -------------------------------------------------------------------------------- 1 | # check if git is available 2 | ifeq ($(shell which git),) 3 | $(warning git is not available, binaries will not include git SHA) 4 | GIT_SHA := 5 | GIT_TREE_STATE := 6 | GIT_TAG := 7 | VERSION_SUFFIX := unknown 8 | else 9 | GIT_SHA := $(shell git rev-parse --short HEAD) 10 | # Tree state is "dirty" if there are uncommitted changes, untracked files are ignored 11 | GIT_TREE_STATE := $(shell test -n "`git status --porcelain --untracked-files=no`" && echo "dirty" || echo "clean") 12 | # Empty string if we are not building a tag 13 | GIT_TAG := $(shell git describe --tags --abbrev=0 --exact-match 2>/dev/null) 14 | ifeq ($(GIT_TREE_STATE),dirty) 15 | VERSION_SUFFIX := $(GIT_SHA).dirty 16 | else 17 | VERSION_SUFFIX := $(GIT_SHA) 18 | endif 19 | endif 20 | 21 | ifndef VERSION 22 | VERSION := $(shell head -n 1 VERSION) 23 | DOCKER_IMG_VERSION := $(VERSION)-$(VERSION_SUFFIX) 24 | else 25 | DOCKER_IMG_VERSION := $(VERSION) 26 | endif 27 | 28 | VERSION_LDFLAGS = -X github.com/vmware/nsx-container-plugin-operator/pkg/version.Version=$(VERSION) 29 | VERSION_LDFLAGS += -X github.com/vmware/nsx-container-plugin-operator/pkg/version.GitSHA=$(GIT_SHA) 30 | VERSION_LDFLAGS += -X github.com/vmware/nsx-container-plugin-operator/pkg/version.GitTreeState=$(GIT_TREE_STATE) 31 | 32 | 33 | version-info: 34 | @echo "===> Version information <===" 35 | @echo "VERSION: $(VERSION)" 36 | @echo "GIT_SHA: $(GIT_SHA)" 37 | @echo "GIT_TREE_STATE: $(GIT_TREE_STATE)" 38 | @echo "DOCKER_IMG_VERSION: $(DOCKER_IMG_VERSION)" 39 | --------------------------------------------------------------------------------